Core/Common: Address Feedback.

master
Fernando Sahmkow 2020-06-27 18:20:06 +07:00
parent e486c66850
commit 2f8947583f
21 changed files with 58 additions and 58 deletions

@ -54,9 +54,7 @@ Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_paramete
impl->handle = CreateFiber(default_stack_size, &FiberStartFunc, this); impl->handle = CreateFiber(default_stack_size, &FiberStartFunc, this);
} }
Fiber::Fiber() { Fiber::Fiber() : impl{std::make_unique<FiberImpl>()} {}
impl = std::make_unique<FiberImpl>();
}
Fiber::~Fiber() { Fiber::~Fiber() {
if (released) { if (released) {
@ -116,8 +114,8 @@ std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
struct Fiber::FiberImpl { struct Fiber::FiberImpl {
alignas(64) std::array<u8, default_stack_size> stack; alignas(64) std::array<u8, default_stack_size> stack;
u8* stack_limit;
alignas(64) std::array<u8, default_stack_size> rewind_stack; alignas(64) std::array<u8, default_stack_size> rewind_stack;
u8* stack_limit;
u8* rewind_stack_limit; u8* rewind_stack_limit;
boost::context::detail::fcontext_t context; boost::context::detail::fcontext_t context;
boost::context::detail::fcontext_t rewind_context; boost::context::detail::fcontext_t rewind_context;
@ -168,9 +166,7 @@ void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start
rewind_parameter = start_parameter; rewind_parameter = start_parameter;
} }
Fiber::Fiber() { Fiber::Fiber() : impl{std::make_unique<FiberImpl>()} {}
impl = std::make_unique<FiberImpl>();
}
Fiber::~Fiber() { Fiber::~Fiber() {
if (released) { if (released) {

@ -20,7 +20,7 @@
namespace { namespace {
void thread_pause() { void ThreadPause() {
#if __x86_64__ #if __x86_64__
_mm_pause(); _mm_pause();
#elif __aarch64__ && _MSC_VER #elif __aarch64__ && _MSC_VER
@ -30,13 +30,13 @@ void thread_pause() {
#endif #endif
} }
} // namespace } // Anonymous namespace
namespace Common { namespace Common {
void SpinLock::lock() { void SpinLock::lock() {
while (lck.test_and_set(std::memory_order_acquire)) { while (lck.test_and_set(std::memory_order_acquire)) {
thread_pause(); ThreadPause();
} }
} }

@ -8,6 +8,11 @@
namespace Common { namespace Common {
/**
* SpinLock class
* a lock similar to mutex that forces a thread to spin wait instead calling the
* supervisor. Should be used on short sequences of code.
*/
class SpinLock { class SpinLock {
public: public:
void lock(); void lock();

@ -3,6 +3,7 @@
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <chrono> #include <chrono>
#include <mutex>
#include <thread> #include <thread>
#ifdef _MSC_VER #ifdef _MSC_VER
@ -52,7 +53,7 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequenc
} }
u64 NativeClock::GetRTSC() { u64 NativeClock::GetRTSC() {
rtsc_serialize.lock(); std::scoped_lock scope{rtsc_serialize};
_mm_mfence(); _mm_mfence();
const u64 current_measure = __rdtsc(); const u64 current_measure = __rdtsc();
u64 diff = current_measure - last_measure; u64 diff = current_measure - last_measure;
@ -61,7 +62,6 @@ u64 NativeClock::GetRTSC() {
last_measure = current_measure; last_measure = current_measure;
} }
accumulated_ticks += diff; accumulated_ticks += diff;
rtsc_serialize.unlock();
/// The clock cannot be more precise than the guest timer, remove the lower bits /// The clock cannot be more precise than the guest timer, remove the lower bits
return accumulated_ticks & inaccuracy_mask; return accumulated_ticks & inaccuracy_mask;
} }

@ -148,7 +148,7 @@ public:
*/ */
virtual void SetTPIDR_EL0(u64 value) = 0; virtual void SetTPIDR_EL0(u64 value) = 0;
virtual void ChangeProcessorId(std::size_t new_core_id) = 0; virtual void ChangeProcessorID(std::size_t new_core_id) = 0;
virtual void SaveContext(ThreadContext32& ctx) = 0; virtual void SaveContext(ThreadContext32& ctx) = 0;
virtual void SaveContext(ThreadContext64& ctx) = 0; virtual void SaveContext(ThreadContext64& ctx) = 0;

@ -23,7 +23,7 @@ public:
CPUInterruptHandler(CPUInterruptHandler&&) = default; CPUInterruptHandler(CPUInterruptHandler&&) = default;
CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default; CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default;
constexpr bool IsInterrupted() const { bool IsInterrupted() const {
return is_interrupted; return is_interrupted;
} }

@ -107,7 +107,7 @@ public:
u64 GetTicksRemaining() override { u64 GetTicksRemaining() override {
if (parent.uses_wall_clock) { if (parent.uses_wall_clock) {
if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) { if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
return 1000U; return minimum_run_cycles;
} }
return 0U; return 0U;
} }
@ -116,6 +116,7 @@ public:
ARM_Dynarmic_32& parent; ARM_Dynarmic_32& parent;
std::size_t num_interpreted_instructions{}; std::size_t num_interpreted_instructions{};
static constexpr u64 minimum_run_cycles = 1000U;
}; };
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table, std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
@ -214,7 +215,7 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
cp15->uprw = static_cast<u32>(value); cp15->uprw = static_cast<u32>(value);
} }
void ARM_Dynarmic_32::ChangeProcessorId(std::size_t new_core_id) { void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
jit->ChangeProcessorID(new_core_id); jit->ChangeProcessorID(new_core_id);
} }

@ -47,7 +47,7 @@ public:
void SetTlsAddress(VAddr address) override; void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override; void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override; u64 GetTPIDR_EL0() const override;
void ChangeProcessorId(std::size_t new_core_id) override; void ChangeProcessorID(std::size_t new_core_id) override;
void SaveContext(ThreadContext32& ctx) override; void SaveContext(ThreadContext32& ctx) override;
void SaveContext(ThreadContext64& ctx) override {} void SaveContext(ThreadContext64& ctx) override {}

@ -144,7 +144,7 @@ public:
u64 GetTicksRemaining() override { u64 GetTicksRemaining() override {
if (parent.uses_wall_clock) { if (parent.uses_wall_clock) {
if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) { if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
return 1000U; return minimum_run_cycles;
} }
return 0U; return 0U;
} }
@ -159,6 +159,7 @@ public:
std::size_t num_interpreted_instructions = 0; std::size_t num_interpreted_instructions = 0;
u64 tpidrro_el0 = 0; u64 tpidrro_el0 = 0;
u64 tpidr_el0 = 0; u64 tpidr_el0 = 0;
static constexpr u64 minimum_run_cycles = 1000U;
}; };
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table, std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
@ -271,7 +272,7 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) {
cb->tpidr_el0 = value; cb->tpidr_el0 = value;
} }
void ARM_Dynarmic_64::ChangeProcessorId(std::size_t new_core_id) { void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
jit->ChangeProcessorID(new_core_id); jit->ChangeProcessorID(new_core_id);
} }

@ -45,7 +45,7 @@ public:
void SetTlsAddress(VAddr address) override; void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override; void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override; u64 GetTPIDR_EL0() const override;
void ChangeProcessorId(std::size_t new_core_id) override; void ChangeProcessorID(std::size_t new_core_id) override;
void SaveContext(ThreadContext32& ctx) override {} void SaveContext(ThreadContext32& ctx) override {}
void SaveContext(ThreadContext64& ctx) override; void SaveContext(ThreadContext64& ctx) override;

@ -159,7 +159,7 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) {
CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value)); CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value));
} }
void ARM_Unicorn::ChangeProcessorId(std::size_t new_core_id) { void ARM_Unicorn::ChangeProcessorID(std::size_t new_core_id) {
core_index = new_core_id; core_index = new_core_id;
} }

@ -36,7 +36,7 @@ public:
void SetTlsAddress(VAddr address) override; void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override; void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override; u64 GetTPIDR_EL0() const override;
void ChangeProcessorId(std::size_t new_core_id) override; void ChangeProcessorID(std::size_t new_core_id) override;
void PrepareReschedule() override; void PrepareReschedule() override;
void ClearExclusiveState() override; void ClearExclusiveState() override;
void ExecuteInstructions(std::size_t num_instructions); void ExecuteInstructions(std::size_t num_instructions);

@ -443,7 +443,7 @@ bool System::IsPoweredOn() const {
} }
void System::PrepareReschedule() { void System::PrepareReschedule() {
// impl->CurrentPhysicalCore().Stop(); // Deprecated, does nothing, kept for backward compatibility.
} }
void System::PrepareReschedule(const u32 core_index) { void System::PrepareReschedule(const u32 core_index) {

@ -138,13 +138,13 @@ public:
/** /**
* Run the OS and Application * Run the OS and Application
* This function will start emulation and run the competent devices * This function will start emulation and run the relevant devices
*/ */
ResultStatus Run(); ResultStatus Run();
/** /**
* Pause the OS and Application * Pause the OS and Application
* This function will pause emulation and stop the competent devices * This function will pause emulation and stop the relevant devices
*/ */
ResultStatus Pause(); ResultStatus Pause();

@ -45,9 +45,9 @@ CoreTiming::CoreTiming() {
CoreTiming::~CoreTiming() = default; CoreTiming::~CoreTiming() = default;
void CoreTiming::ThreadEntry(CoreTiming& instance) { void CoreTiming::ThreadEntry(CoreTiming& instance) {
std::string name = "yuzu:HostTiming"; constexpr char name[] = "yuzu:HostTiming";
MicroProfileOnThreadCreate(name.c_str()); MicroProfileOnThreadCreate(name);
Common::SetCurrentThreadName(name.c_str()); Common::SetCurrentThreadName(name);
Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
instance.on_thread_init(); instance.on_thread_init();
instance.ThreadLoop(); instance.ThreadLoop();
@ -108,18 +108,19 @@ bool CoreTiming::HasPendingEvents() const {
void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type, void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
u64 userdata) { u64 userdata) {
basic_lock.lock(); {
const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future); std::scoped_lock scope{basic_lock};
const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
basic_lock.unlock(); }
event.Set(); event.Set();
} }
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) { void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
basic_lock.lock(); std::scoped_lock scope{basic_lock};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type.lock().get() == event_type.get() && e.userdata == userdata; return e.type.lock().get() == event_type.get() && e.userdata == userdata;
}); });
@ -129,7 +130,6 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u
event_queue.erase(itr, event_queue.end()); event_queue.erase(itr, event_queue.end());
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
} }
basic_lock.unlock();
} }
void CoreTiming::AddTicks(u64 ticks) { void CoreTiming::AddTicks(u64 ticks) {
@ -187,8 +187,8 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
} }
std::optional<s64> CoreTiming::Advance() { std::optional<s64> CoreTiming::Advance() {
advance_lock.lock(); std::scoped_lock advance_scope{advance_lock};
basic_lock.lock(); std::scoped_lock basic_scope{basic_lock};
global_timer = GetGlobalTimeNs().count(); global_timer = GetGlobalTimeNs().count();
while (!event_queue.empty() && event_queue.front().time <= global_timer) { while (!event_queue.empty() && event_queue.front().time <= global_timer) {
@ -207,12 +207,8 @@ std::optional<s64> CoreTiming::Advance() {
if (!event_queue.empty()) { if (!event_queue.empty()) {
const s64 next_time = event_queue.front().time - global_timer; const s64 next_time = event_queue.front().time - global_timer;
basic_lock.unlock();
advance_lock.unlock();
return next_time; return next_time;
} else { } else {
basic_lock.unlock();
advance_lock.unlock();
return std::nullopt; return std::nullopt;
} }
} }

@ -472,16 +472,12 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
} }
void KernelCore::InvalidateAllInstructionCaches() { void KernelCore::InvalidateAllInstructionCaches() {
if (!IsMulticore()) { auto& threads = GlobalScheduler().GetThreadList();
auto& threads = GlobalScheduler().GetThreadList(); for (auto& thread : threads) {
for (auto& thread : threads) { if (!thread->IsHLEThread()) {
if (!thread->IsHLEThread()) { auto& arm_interface = thread->ArmInterface();
auto& arm_interface = thread->ArmInterface(); arm_interface.ClearInstructionCache();
arm_interface.ClearInstructionCache();
}
} }
} else {
UNIMPLEMENTED_MSG("Cache Invalidation unimplemented for multicore");
} }
} }

@ -37,6 +37,10 @@ void PhysicalCore::Shutdown() {
scheduler.Shutdown(); scheduler.Shutdown();
} }
bool PhysicalCore::IsInterrupted() const {
return interrupt_handler.IsInterrupted();
}
void PhysicalCore::Interrupt() { void PhysicalCore::Interrupt() {
guard->lock(); guard->lock();
interrupt_handler.SetInterrupt(true); interrupt_handler.SetInterrupt(true);

@ -7,8 +7,6 @@
#include <cstddef> #include <cstddef>
#include <memory> #include <memory>
#include "core/arm/cpu_interrupt_handler.h"
namespace Common { namespace Common {
class SpinLock; class SpinLock;
} }
@ -19,6 +17,7 @@ class Scheduler;
namespace Core { namespace Core {
class ARM_Interface; class ARM_Interface;
class CPUInterruptHandler;
class ExclusiveMonitor; class ExclusiveMonitor;
class System; class System;
} // namespace Core } // namespace Core
@ -45,9 +44,7 @@ public:
void ClearInterrupt(); void ClearInterrupt();
/// Check if this core is interrupted /// Check if this core is interrupted
bool IsInterrupted() const { bool IsInterrupted() const;
return interrupt_handler.IsInterrupted();
}
// Shutdown this physical core. // Shutdown this physical core.
void Shutdown(); void Shutdown();

@ -658,7 +658,7 @@ void Scheduler::Reload() {
cpu_core.LoadContext(thread->GetContext64()); cpu_core.LoadContext(thread->GetContext64());
cpu_core.SetTlsAddress(thread->GetTLSAddress()); cpu_core.SetTlsAddress(thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.ChangeProcessorId(this->core_id); cpu_core.ChangeProcessorID(this->core_id);
cpu_core.ClearExclusiveState(); cpu_core.ClearExclusiveState();
} }
} }
@ -691,7 +691,7 @@ void Scheduler::SwitchContextStep2() {
cpu_core.LoadContext(new_thread->GetContext64()); cpu_core.LoadContext(new_thread->GetContext64());
cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
cpu_core.ChangeProcessorId(this->core_id); cpu_core.ChangeProcessorID(this->core_id);
cpu_core.ClearExclusiveState(); cpu_core.ClearExclusiveState();
} }
} }

@ -240,6 +240,10 @@ public:
return switch_fiber; return switch_fiber;
} }
const std::shared_ptr<Common::Fiber>& ControlContext() const {
return switch_fiber;
}
private: private:
friend class GlobalScheduler; friend class GlobalScheduler;

@ -68,7 +68,7 @@ static void ThreadStart1(u32 id, TestControl1& test_control) {
* doing all the work required. * doing all the work required.
*/ */
TEST_CASE("Fibers::Setup", "[common]") { TEST_CASE("Fibers::Setup", "[common]") {
constexpr u32 num_threads = 7; constexpr std::size_t num_threads = 7;
TestControl1 test_control{}; TestControl1 test_control{};
test_control.thread_fibers.resize(num_threads); test_control.thread_fibers.resize(num_threads);
test_control.work_fibers.resize(num_threads); test_control.work_fibers.resize(num_threads);