|
|
@ -28,9 +28,9 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
|
|
|
|
KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
|
|
|
|
m_idle_stack = std::make_shared<Common::Fiber>([this] {
|
|
|
|
m_switch_fiber = std::make_shared<Common::Fiber>([this] {
|
|
|
|
while (true) {
|
|
|
|
while (true) {
|
|
|
|
ScheduleImplOffStack();
|
|
|
|
ScheduleImplFiber();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
|
@ -60,9 +60,9 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
|
|
|
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
|
|
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
|
|
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
|
|
|
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
|
|
|
|
|
|
|
|
|
|
|
|
auto* scheduler = kernel.CurrentScheduler();
|
|
|
|
auto* scheduler{kernel.CurrentScheduler()};
|
|
|
|
|
|
|
|
|
|
|
|
if (!scheduler) {
|
|
|
|
if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
|
|
|
|
// HACK: we cannot schedule from this thread, it is not a core thread
|
|
|
|
// HACK: we cannot schedule from this thread, it is not a core thread
|
|
|
|
RescheduleCores(kernel, cores_needing_scheduling);
|
|
|
|
RescheduleCores(kernel, cores_needing_scheduling);
|
|
|
|
if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) {
|
|
|
|
if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) {
|
|
|
@ -125,9 +125,9 @@ void KScheduler::RescheduleCurrentCoreImpl() {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::Initialize(KThread* idle_thread) {
|
|
|
|
void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) {
|
|
|
|
// Set core ID/idle thread/interrupt task manager.
|
|
|
|
// Set core ID/idle thread/interrupt task manager.
|
|
|
|
m_core_id = GetCurrentCoreId(kernel);
|
|
|
|
m_core_id = core_id;
|
|
|
|
m_idle_thread = idle_thread;
|
|
|
|
m_idle_thread = idle_thread;
|
|
|
|
// m_state.idle_thread_stack = m_idle_thread->GetStackTop();
|
|
|
|
// m_state.idle_thread_stack = m_idle_thread->GetStackTop();
|
|
|
|
// m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
|
|
|
|
// m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
|
|
|
@ -142,10 +142,10 @@ void KScheduler::Initialize(KThread* idle_thread) {
|
|
|
|
// Bind interrupt handler.
|
|
|
|
// Bind interrupt handler.
|
|
|
|
// kernel.GetInterruptManager().BindHandler(
|
|
|
|
// kernel.GetInterruptManager().BindHandler(
|
|
|
|
// GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
|
|
|
|
// GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
|
|
|
|
// KInterruptController::PriorityLevel_Scheduler, false, false);
|
|
|
|
// KInterruptController::PriorityLevel::Scheduler, false, false);
|
|
|
|
|
|
|
|
|
|
|
|
// Set the current thread.
|
|
|
|
// Set the current thread.
|
|
|
|
m_current_thread = GetCurrentThreadPointer(kernel);
|
|
|
|
m_current_thread = main_thread;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::Activate() {
|
|
|
|
void KScheduler::Activate() {
|
|
|
@ -156,6 +156,10 @@ void KScheduler::Activate() {
|
|
|
|
RescheduleCurrentCore();
|
|
|
|
RescheduleCurrentCore();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::OnThreadStart() {
|
|
|
|
|
|
|
|
GetCurrentThread(kernel).EnableDispatch();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
|
|
|
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
|
|
|
if (KThread* prev_highest_thread = m_state.highest_priority_thread;
|
|
|
|
if (KThread* prev_highest_thread = m_state.highest_priority_thread;
|
|
|
|
prev_highest_thread != highest_thread) [[likely]] {
|
|
|
|
prev_highest_thread != highest_thread) [[likely]] {
|
|
|
@ -372,37 +376,30 @@ void KScheduler::ScheduleImpl() {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// The highest priority thread is not the same as the current thread.
|
|
|
|
// The highest priority thread is not the same as the current thread.
|
|
|
|
// Switch to the idle thread stack and continue executing from there.
|
|
|
|
// Jump to the switcher and continue executing from there.
|
|
|
|
m_idle_cur_thread = cur_thread;
|
|
|
|
m_switch_cur_thread = cur_thread;
|
|
|
|
m_idle_highest_priority_thread = highest_priority_thread;
|
|
|
|
m_switch_highest_priority_thread = highest_priority_thread;
|
|
|
|
Common::Fiber::YieldTo(cur_thread->host_context, *m_idle_stack);
|
|
|
|
m_switch_from_schedule = true;
|
|
|
|
|
|
|
|
Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
|
|
|
|
|
|
|
|
|
|
|
|
// Returning from ScheduleImpl occurs after this thread has been scheduled again.
|
|
|
|
// Returning from ScheduleImpl occurs after this thread has been scheduled again.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::ScheduleImplOffStack() {
|
|
|
|
void KScheduler::ScheduleImplFiber() {
|
|
|
|
KThread* const cur_thread{m_idle_cur_thread};
|
|
|
|
KThread* const cur_thread{m_switch_cur_thread};
|
|
|
|
KThread* highest_priority_thread{m_idle_highest_priority_thread};
|
|
|
|
KThread* highest_priority_thread{m_switch_highest_priority_thread};
|
|
|
|
|
|
|
|
|
|
|
|
// Get a reference to the current thread's stack parameters.
|
|
|
|
// If we're not coming from scheduling (i.e., we came from SC preemption),
|
|
|
|
auto& sp{cur_thread->GetStackParameters()};
|
|
|
|
// we should restart the scheduling loop directly. Not accurate to HOS.
|
|
|
|
|
|
|
|
if (!m_switch_from_schedule) {
|
|
|
|
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Mark that we are not coming from scheduling anymore.
|
|
|
|
|
|
|
|
m_switch_from_schedule = false;
|
|
|
|
|
|
|
|
|
|
|
|
// Save the original thread context.
|
|
|
|
// Save the original thread context.
|
|
|
|
{
|
|
|
|
Unload(cur_thread);
|
|
|
|
auto& physical_core = kernel.System().CurrentPhysicalCore();
|
|
|
|
|
|
|
|
auto& cpu_core = physical_core.ArmInterface();
|
|
|
|
|
|
|
|
cpu_core.SaveContext(cur_thread->GetContext32());
|
|
|
|
|
|
|
|
cpu_core.SaveContext(cur_thread->GetContext64());
|
|
|
|
|
|
|
|
// Save the TPIDR_EL0 system register in case it was modified.
|
|
|
|
|
|
|
|
cur_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
|
|
|
|
|
|
|
cpu_core.ClearExclusiveState();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Check if the thread is terminated by checking the DPC flags.
|
|
|
|
|
|
|
|
if ((sp.dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
|
|
|
|
|
|
|
|
// The thread isn't terminated, so we want to unlock it.
|
|
|
|
|
|
|
|
sp.m_lock.store(false, std::memory_order_seq_cst);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// The current thread's context has been entirely taken care of.
|
|
|
|
// The current thread's context has been entirely taken care of.
|
|
|
|
// Now we want to loop until we successfully switch the thread context.
|
|
|
|
// Now we want to loop until we successfully switch the thread context.
|
|
|
@ -411,45 +408,24 @@ void KScheduler::ScheduleImplOffStack() {
|
|
|
|
// Check if the highest priority thread is null.
|
|
|
|
// Check if the highest priority thread is null.
|
|
|
|
if (!highest_priority_thread) {
|
|
|
|
if (!highest_priority_thread) {
|
|
|
|
// The next thread is nullptr!
|
|
|
|
// The next thread is nullptr!
|
|
|
|
// Switch to nullptr. This will actually switch to the idle thread.
|
|
|
|
|
|
|
|
SwitchThread(nullptr);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// We've switched to the idle thread, so we want to process interrupt tasks until we
|
|
|
|
// Switch to the idle thread. Note: HOS treats idling as a special case for
|
|
|
|
// schedule a non-idle thread.
|
|
|
|
// performance. This is not *required* for yuzu's purposes, and for singlecore
|
|
|
|
while (!m_state.interrupt_task_runnable) {
|
|
|
|
// compatibility, we can just move the logic that would go here into the execution
|
|
|
|
// Check if we need scheduling.
|
|
|
|
// of the idle thread. If we ever remove singlecore, we should implement this
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
// accurately to HOS.
|
|
|
|
goto retry;
|
|
|
|
highest_priority_thread = m_idle_thread;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Clear the previous thread.
|
|
|
|
|
|
|
|
m_state.prev_thread = nullptr;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Wait for an interrupt before checking again.
|
|
|
|
|
|
|
|
kernel.System().GetCpuManager().WaitForAndHandleInterrupt();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Execute any pending interrupt tasks.
|
|
|
|
|
|
|
|
// m_state.interrupt_task_manager->DoTasks();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Clear the interrupt task thread as runnable.
|
|
|
|
|
|
|
|
m_state.interrupt_task_runnable = false;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Retry the scheduling loop.
|
|
|
|
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
// We want to try to lock the highest priority thread's context.
|
|
|
|
// We want to try to lock the highest priority thread's context.
|
|
|
|
// Try to take it.
|
|
|
|
// Try to take it.
|
|
|
|
bool expected{false};
|
|
|
|
while (!highest_priority_thread->context_guard.try_lock()) {
|
|
|
|
while (!highest_priority_thread->stack_parameters.m_lock.compare_exchange_strong(
|
|
|
|
|
|
|
|
expected, true, std::memory_order_seq_cst)) {
|
|
|
|
|
|
|
|
// The highest priority thread's context is already locked.
|
|
|
|
// The highest priority thread's context is already locked.
|
|
|
|
// Check if we need scheduling. If we don't, we can retry directly.
|
|
|
|
// Check if we need scheduling. If we don't, we can retry directly.
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
// If we do, another core is interfering, and we must start again.
|
|
|
|
// If we do, another core is interfering, and we must start again.
|
|
|
|
goto retry;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
expected = false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// It's time to switch the thread.
|
|
|
|
// It's time to switch the thread.
|
|
|
@ -461,13 +437,11 @@ void KScheduler::ScheduleImplOffStack() {
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
// Our switch failed.
|
|
|
|
// Our switch failed.
|
|
|
|
// We should unlock the thread context, and then retry.
|
|
|
|
// We should unlock the thread context, and then retry.
|
|
|
|
highest_priority_thread->stack_parameters.m_lock.store(false,
|
|
|
|
highest_priority_thread->context_guard.unlock();
|
|
|
|
std::memory_order_seq_cst);
|
|
|
|
|
|
|
|
goto retry;
|
|
|
|
goto retry;
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
retry:
|
|
|
|
retry:
|
|
|
|
|
|
|
|
|
|
|
@ -480,18 +454,35 @@ void KScheduler::ScheduleImplOffStack() {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Reload the guest thread context.
|
|
|
|
// Reload the guest thread context.
|
|
|
|
{
|
|
|
|
Reload(highest_priority_thread);
|
|
|
|
auto& cpu_core = kernel.System().CurrentArmInterface();
|
|
|
|
|
|
|
|
cpu_core.LoadContext(highest_priority_thread->GetContext32());
|
|
|
|
|
|
|
|
cpu_core.LoadContext(highest_priority_thread->GetContext64());
|
|
|
|
|
|
|
|
cpu_core.SetTlsAddress(highest_priority_thread->GetTLSAddress());
|
|
|
|
|
|
|
|
cpu_core.SetTPIDR_EL0(highest_priority_thread->GetTPIDR_EL0());
|
|
|
|
|
|
|
|
cpu_core.LoadWatchpointArray(highest_priority_thread->GetOwnerProcess()->GetWatchpoints());
|
|
|
|
|
|
|
|
cpu_core.ClearExclusiveState();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Reload the host thread.
|
|
|
|
// Reload the host thread.
|
|
|
|
Common::Fiber::YieldTo(m_idle_stack, *highest_priority_thread->host_context);
|
|
|
|
Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::Unload(KThread* thread) {
|
|
|
|
|
|
|
|
auto& cpu_core = kernel.System().ArmInterface(m_core_id);
|
|
|
|
|
|
|
|
cpu_core.SaveContext(thread->GetContext32());
|
|
|
|
|
|
|
|
cpu_core.SaveContext(thread->GetContext64());
|
|
|
|
|
|
|
|
// Save the TPIDR_EL0 system register in case it was modified.
|
|
|
|
|
|
|
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
|
|
|
|
|
|
|
cpu_core.ClearExclusiveState();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Check if the thread is terminated by checking the DPC flags.
|
|
|
|
|
|
|
|
if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
|
|
|
|
|
|
|
|
// The thread isn't terminated, so we want to unlock it.
|
|
|
|
|
|
|
|
thread->context_guard.unlock();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::Reload(KThread* thread) {
|
|
|
|
|
|
|
|
auto& cpu_core = kernel.System().ArmInterface(m_core_id);
|
|
|
|
|
|
|
|
cpu_core.LoadContext(thread->GetContext32());
|
|
|
|
|
|
|
|
cpu_core.LoadContext(thread->GetContext64());
|
|
|
|
|
|
|
|
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
|
|
|
|
|
|
|
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
|
|
|
|
|
|
|
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
|
|
|
|
|
|
|
|
cpu_core.ClearExclusiveState();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
|
|
|
|
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
|
|
|
|