@ -11,11 +11,15 @@
# include <utility>
# include "common/assert.h"
# include "common/bit_util.h"
# include "common/fiber.h"
# include "common/logging/log.h"
# include "core/arm/arm_interface.h"
# include "core/core.h"
# include "core/core_timing.h"
# include "core/cpu_manager.h"
# include "core/hle/kernel/kernel.h"
# include "core/hle/kernel/physical_core.h"
# include "core/hle/kernel/process.h"
# include "core/hle/kernel/scheduler.h"
# include "core/hle/kernel/time_manager.h"
@ -27,78 +31,108 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
GlobalScheduler : : ~ GlobalScheduler ( ) = default ;
void GlobalScheduler : : AddThread ( std : : shared_ptr < Thread > thread ) {
global_list_guard . lock ( ) ;
thread_list . push_back ( std : : move ( thread ) ) ;
global_list_guard . unlock ( ) ;
}
void GlobalScheduler : : RemoveThread ( std : : shared_ptr < Thread > thread ) {
global_list_guard . lock ( ) ;
thread_list . erase ( std : : remove ( thread_list . begin ( ) , thread_list . end ( ) , thread ) ,
thread_list . end ( ) ) ;
global_list_guard . unlock ( ) ;
}
void GlobalScheduler : : UnloadThread ( std : : size_t core ) {
Scheduler & sched = kernel . Scheduler ( core ) ;
sched . UnloadThread ( ) ;
}
void GlobalScheduler : : SelectThread ( std : : size_t core ) {
u32 GlobalScheduler : : SelectThreads ( ) {
const auto update_thread = [ ] ( Thread * thread , Scheduler & sched ) {
sched . guard . lock ( ) ;
if ( thread ! = sched . selected_thread . get ( ) ) {
if ( thread = = nullptr ) {
+ + sched . idle_selection_count ;
}
sched . selected_thread = SharedFrom ( thread ) ;
}
sched . is_context_switch_pending = sched . selected_thread ! = sched . current_thread ;
const bool reschedule_pending = sched . selected_thread ! = sched . current_thread ;
sched . is_context_switch_pending = reschedule_pending ;
std : : atomic_thread_fence ( std : : memory_order_seq_cst ) ;
sched . guard . unlock ( ) ;
return reschedule_pending ;
} ;
Scheduler & sched = kernel . Scheduler ( core ) ;
Thread * current_thread = nullptr ;
if ( ! is_reselection_pending . load ( ) ) {
return 0 ;
}
std : : array < Thread * , Core : : Hardware : : NUM_CPU_CORES > top_threads { } ;
u32 idle_cores { } ;
// Step 1: Get top thread in schedule queue.
current_thread = scheduled_queue [ core ] . empty ( ) ? nullptr : scheduled_queue [ core ] . front ( ) ;
if ( current_thread ) {
update_thread ( current_thread , sched ) ;
return ;
}
// Step 2: Try selecting a suggested thread.
Thread * winner = nullptr ;
std : : set < s32 > sug_cores ;
for ( auto thread : suggested_queue [ core ] ) {
s32 this_core = thread - > GetProcessorID ( ) ;
Thread * thread_on_core = nullptr ;
if ( this_core > = 0 ) {
thread_on_core = scheduled_queue [ this_core ] . front ( ) ;
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
Thread * top_thread =
scheduled_queue [ core ] . empty ( ) ? nullptr : scheduled_queue [ core ] . front ( ) ;
if ( top_thread ! = nullptr ) {
// TODO(Blinkhawk): Implement Thread Pinning
} else {
idle_cores | = ( 1ul < < core ) ;
}
if ( this_core < 0 | | thread ! = thread_on_core ) {
winner = thread ;
break ;
}
sug_cores . insert ( this_core ) ;
top_threads [ core ] = top_thread ;
}
// if we got a suggested thread, select it, else do a second pass.
if ( winner & & winner - > GetPriority ( ) > 2 ) {
if ( winner - > IsRunning ( ) ) {
UnloadThread ( static_cast < u32 > ( winner - > GetProcessorID ( ) ) ) ;
}
TransferToCore ( winner - > GetPriority ( ) , static_cast < s32 > ( core ) , winner ) ;
update_thread ( winner , sched ) ;
return ;
}
// Step 3: Select a suggested thread from another core
for ( auto & src_core : sug_cores ) {
auto it = scheduled_queue [ src_core ] . begin ( ) ;
it + + ;
if ( it ! = scheduled_queue [ src_core ] . end ( ) ) {
Thread * thread_on_core = scheduled_queue [ src_core ] . front ( ) ;
Thread * to_change = * it ;
if ( thread_on_core - > IsRunning ( ) | | to_change - > IsRunning ( ) ) {
UnloadThread ( static_cast < u32 > ( src_core ) ) ;
while ( idle_cores ! = 0 ) {
u32 core_id = Common : : CountTrailingZeroes32 ( idle_cores ) ;
if ( ! suggested_queue [ core_id ] . empty ( ) ) {
std : : array < s32 , Core : : Hardware : : NUM_CPU_CORES > migration_candidates { } ;
std : : size_t num_candidates = 0 ;
auto iter = suggested_queue [ core_id ] . begin ( ) ;
Thread * suggested = nullptr ;
// Step 2: Try selecting a suggested thread.
while ( iter ! = suggested_queue [ core_id ] . end ( ) ) {
suggested = * iter ;
iter + + ;
s32 suggested_core_id = suggested - > GetProcessorID ( ) ;
Thread * top_thread =
suggested_core_id > 0 ? top_threads [ suggested_core_id ] : nullptr ;
if ( top_thread ! = suggested ) {
if ( top_thread ! = nullptr & &
top_thread - > GetPriority ( ) < THREADPRIO_MAX_CORE_MIGRATION ) {
suggested = nullptr ;
break ;
// There's a too high thread to do core migration, cancel
}
TransferToCore ( suggested - > GetPriority ( ) , static_cast < s32 > ( core_id ) , suggested ) ;
break ;
}
migration_candidates [ num_candidates + + ] = suggested_core_id ;
}
TransferToCore ( thread_on_core - > GetPriority ( ) , static_cast < s32 > ( core ) , thread_on_core ) ;
current_thread = thread_on_core ;
break ;
// Step 3: Select a suggested thread from another core
if ( suggested = = nullptr ) {
for ( std : : size_t i = 0 ; i < num_candidates ; i + + ) {
s32 candidate_core = migration_candidates [ i ] ;
suggested = top_threads [ candidate_core ] ;
auto it = scheduled_queue [ candidate_core ] . begin ( ) ;
it + + ;
Thread * next = it ! = scheduled_queue [ candidate_core ] . end ( ) ? * it : nullptr ;
if ( next ! = nullptr ) {
TransferToCore ( suggested - > GetPriority ( ) , static_cast < s32 > ( core_id ) ,
suggested ) ;
top_threads [ candidate_core ] = next ;
break ;
}
}
}
top_threads [ core_id ] = suggested ;
}
idle_cores & = ~ ( 1ul < < core_id ) ;
}
u32 cores_needing_context_switch { } ;
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
Scheduler & sched = kernel . Scheduler ( core ) ;
if ( update_thread ( top_threads [ core ] , sched ) ) {
cores_needing_context_switch | = ( 1ul < < core ) ;
}
}
update_thread ( current_thread , sched ) ;
return cores_needing_context_switch ;
}
bool GlobalScheduler : : YieldThread ( Thread * yielding_thread ) {
@ -153,9 +187,6 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
if ( winner ! = nullptr ) {
if ( winner ! = yielding_thread ) {
if ( winner - > IsRunning ( ) ) {
UnloadThread ( static_cast < u32 > ( winner - > GetProcessorID ( ) ) ) ;
}
TransferToCore ( winner - > GetPriority ( ) , s32 ( core_id ) , winner ) ;
}
} else {
@ -195,9 +226,6 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
}
if ( winner ! = nullptr ) {
if ( winner ! = yielding_thread ) {
if ( winner - > IsRunning ( ) ) {
UnloadThread ( static_cast < u32 > ( winner - > GetProcessorID ( ) ) ) ;
}
TransferToCore ( winner - > GetPriority ( ) , static_cast < s32 > ( core_id ) , winner ) ;
}
} else {
@ -213,7 +241,9 @@ void GlobalScheduler::PreemptThreads() {
const u32 priority = preemption_priorities [ core_id ] ;
if ( scheduled_queue [ core_id ] . size ( priority ) > 0 ) {
scheduled_queue [ core_id ] . front ( priority ) - > IncrementYieldCount ( ) ;
if ( scheduled_queue [ core_id ] . size ( priority ) > 1 ) {
scheduled_queue [ core_id ] . front ( priority ) - > IncrementYieldCount ( ) ;
}
scheduled_queue [ core_id ] . yield ( priority ) ;
if ( scheduled_queue [ core_id ] . size ( priority ) > 1 ) {
scheduled_queue [ core_id ] . front ( priority ) - > IncrementYieldCount ( ) ;
@ -247,9 +277,6 @@ void GlobalScheduler::PreemptThreads() {
}
if ( winner ! = nullptr ) {
if ( winner - > IsRunning ( ) ) {
UnloadThread ( static_cast < u32 > ( winner - > GetProcessorID ( ) ) ) ;
}
TransferToCore ( winner - > GetPriority ( ) , s32 ( core_id ) , winner ) ;
current_thread =
winner - > GetPriority ( ) < = current_thread - > GetPriority ( ) ? winner : current_thread ;
@ -280,9 +307,6 @@ void GlobalScheduler::PreemptThreads() {
}
if ( winner ! = nullptr ) {
if ( winner - > IsRunning ( ) ) {
UnloadThread ( static_cast < u32 > ( winner - > GetProcessorID ( ) ) ) ;
}
TransferToCore ( winner - > GetPriority ( ) , s32 ( core_id ) , winner ) ;
current_thread = winner ;
}
@ -292,6 +316,28 @@ void GlobalScheduler::PreemptThreads() {
}
}
void GlobalScheduler : : EnableInterruptAndSchedule ( u32 cores_pending_reschedule ,
Core : : EmuThreadHandle global_thread ) {
u32 current_core = global_thread . host_handle ;
bool must_context_switch = global_thread . guest_handle ! = InvalidHandle & &
( current_core < Core : : Hardware : : NUM_CPU_CORES ) ;
while ( cores_pending_reschedule ! = 0 ) {
u32 core = Common : : CountTrailingZeroes32 ( cores_pending_reschedule ) ;
ASSERT ( core < Core : : Hardware : : NUM_CPU_CORES ) ;
if ( ! must_context_switch | | core ! = current_core ) {
auto & phys_core = kernel . PhysicalCore ( core ) ;
phys_core . Interrupt ( ) ;
} else {
must_context_switch = true ;
}
cores_pending_reschedule & = ~ ( 1ul < < core ) ;
}
if ( must_context_switch ) {
auto & core_scheduler = kernel . CurrentScheduler ( ) ;
core_scheduler . TryDoContextSwitch ( ) ;
}
}
void GlobalScheduler : : Suggest ( u32 priority , std : : size_t core , Thread * thread ) {
suggested_queue [ core ] . add ( thread , priority ) ;
}
@ -349,6 +395,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
}
}
void GlobalScheduler : : AdjustSchedulingOnStatus ( Thread * thread , u32 old_flags ) {
if ( old_flags = = thread - > scheduling_state ) {
return ;
}
if ( static_cast < ThreadSchedStatus > ( old_flags & static_cast < u32 > ( ThreadSchedMasks : : LowMask ) ) = =
ThreadSchedStatus : : Runnable ) {
// In this case the thread was running, now it's pausing/exitting
if ( thread - > processor_id > = 0 ) {
Unschedule ( thread - > current_priority , static_cast < u32 > ( thread - > processor_id ) , thread ) ;
}
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
if ( core ! = static_cast < u32 > ( thread - > processor_id ) & &
( ( thread - > affinity_mask > > core ) & 1 ) ! = 0 ) {
Unsuggest ( thread - > current_priority , core , thread ) ;
}
}
} else if ( thread - > GetSchedulingStatus ( ) = = ThreadSchedStatus : : Runnable ) {
// The thread is now set to running from being stopped
if ( thread - > processor_id > = 0 ) {
Schedule ( thread - > current_priority , static_cast < u32 > ( thread - > processor_id ) , thread ) ;
}
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
if ( core ! = static_cast < u32 > ( thread - > processor_id ) & &
( ( thread - > affinity_mask > > core ) & 1 ) ! = 0 ) {
Suggest ( thread - > current_priority , core , thread ) ;
}
}
}
SetReselectionPending ( ) ;
}
void GlobalScheduler : : AdjustSchedulingOnPriority ( Thread * thread , u32 old_priority ) {
if ( thread - > GetSchedulingStatus ( ) ! = ThreadSchedStatus : : Runnable ) {
return ;
}
if ( thread - > processor_id > = 0 ) {
Unschedule ( old_priority , static_cast < u32 > ( thread - > processor_id ) , thread ) ;
}
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
if ( core ! = static_cast < u32 > ( thread - > processor_id ) & &
( ( thread - > affinity_mask > > core ) & 1 ) ! = 0 ) {
Unsuggest ( old_priority , core , thread ) ;
}
}
if ( thread - > processor_id > = 0 ) {
// TODO(Blinkhawk): compare it with current thread running on current core, instead of
// checking running
if ( thread - > IsRunning ( ) ) {
SchedulePrepend ( thread - > current_priority , static_cast < u32 > ( thread - > processor_id ) ,
thread ) ;
} else {
Schedule ( thread - > current_priority , static_cast < u32 > ( thread - > processor_id ) , thread ) ;
}
}
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
if ( core ! = static_cast < u32 > ( thread - > processor_id ) & &
( ( thread - > affinity_mask > > core ) & 1 ) ! = 0 ) {
Suggest ( thread - > current_priority , core , thread ) ;
}
}
thread - > IncrementYieldCount ( ) ;
SetReselectionPending ( ) ;
}
void GlobalScheduler : : AdjustSchedulingOnAffinity ( Thread * thread , u64 old_affinity_mask ,
s32 old_core ) {
if ( thread - > GetSchedulingStatus ( ) ! = ThreadSchedStatus : : Runnable | |
thread - > current_priority > = THREADPRIO_COUNT ) {
return ;
}
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
if ( ( ( old_affinity_mask > > core ) & 1 ) ! = 0 ) {
if ( core = = static_cast < u32 > ( old_core ) ) {
Unschedule ( thread - > current_priority , core , thread ) ;
} else {
Unsuggest ( thread - > current_priority , core , thread ) ;
}
}
}
for ( u32 core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
if ( ( ( thread - > affinity_mask > > core ) & 1 ) ! = 0 ) {
if ( core = = static_cast < u32 > ( thread - > processor_id ) ) {
Schedule ( thread - > current_priority , core , thread ) ;
} else {
Suggest ( thread - > current_priority , core , thread ) ;
}
}
}
thread - > IncrementYieldCount ( ) ;
SetReselectionPending ( ) ;
}
void GlobalScheduler : : Shutdown ( ) {
for ( std : : size_t core = 0 ; core < Core : : Hardware : : NUM_CPU_CORES ; core + + ) {
scheduled_queue [ core ] . clear ( ) ;
@ -374,13 +522,12 @@ void GlobalScheduler::Unlock() {
ASSERT ( scope_lock > 0 ) ;
return ;
}
for ( std : : size_t i = 0 ; i < Core : : Hardware : : NUM_CPU_CORES ; i + + ) {
SelectThread ( i ) ;
}
u32 cores_pending_reschedule = SelectThreads ( ) ;
Core : : EmuThreadHandle leaving_thread = current_owner ;
current_owner = Core : : EmuThreadHandle : : InvalidHandle ( ) ;
scope_lock = 1 ;
inner_lock . unlock ( ) ;
// TODO(Blinkhawk): Setup the interrupts and change context on current core.
EnableInterruptAndSchedule ( cores_pending_reschedule , leaving_thread ) ;
}
Scheduler : : Scheduler ( Core : : System & system , std : : size_t core_id )
@ -393,56 +540,83 @@ bool Scheduler::HaveReadyThreads() const {
}
Thread * Scheduler : : GetCurrentThread ( ) const {
return current_thread . get ( ) ;
if ( current_thread ) {
return current_thread . get ( ) ;
}
return idle_thread . get ( ) ;
}
Thread * Scheduler : : GetSelectedThread ( ) const {
return selected_thread . get ( ) ;
}
void Scheduler : : SelectThreads ( ) {
system . GlobalScheduler ( ) . SelectThread ( core_id ) ;
}
u64 Scheduler : : GetLastContextSwitchTicks ( ) const {
return last_context_switch_time ;
}
void Scheduler : : TryDoContextSwitch ( ) {
auto & phys_core = system . Kernel ( ) . CurrentPhysicalCore ( ) ;
if ( phys_core . IsInterrupted ( ) ) {
phys_core . ClearInterrupt ( ) ;
}
guard . lock ( ) ;
if ( is_context_switch_pending ) {
SwitchContext ( ) ;
} else {
guard . unlock ( ) ;
}
}
void Scheduler : : UnloadThread ( ) {
Thread* const previous_thread = GetCurrentThread ( ) ;
Process * const previous_process = system . Kernel ( ) . CurrentProcess ( ) ;
void Scheduler : : OnThreadStart ( ) {
SwitchContextStep2 ( ) ;
}
UpdateLastContextSwitchTime ( previous_thread , previous_process ) ;
void Scheduler : : SwitchContextStep2 ( ) {
Thread * previous_thread = current_thread . get ( ) ;
Thread * new_thread = selected_thread . get ( ) ;
// Save context for previous thread
if ( previous_thread ) {
system . ArmInterface ( core_id ) . SaveContext ( previous_thread - > GetContext32 ( ) ) ;
system . ArmInterface ( core_id ) . SaveContext ( previous_thread - > GetContext64 ( ) ) ;
// Save the TPIDR_EL0 system register in case it was modified.
previous_thread - > SetTPIDR_EL0 ( system . ArmInterface ( core_id ) . GetTPIDR_EL0 ( ) ) ;
// Load context of new thread
Process * const previous_process =
previous_thread ! = nullptr ? previous_thread - > GetOwnerProcess ( ) : nullptr ;
if ( previous_thread - > GetStatus ( ) = = ThreadStatus : : Running ) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
previous_thread - > SetStatus ( ThreadStatus : : Ready ) ;
if ( new_thread ) {
new_thread - > context_guard . lock ( ) ;
ASSERT_MSG ( new_thread - > GetProcessorID ( ) = = s32 ( this - > core_id ) ,
" Thread must be assigned to this core. " ) ;
ASSERT_MSG ( new_thread - > GetStatus ( ) = = ThreadStatus : : Ready ,
" Thread must be ready to become running. " ) ;
// Cancel any outstanding wakeup events for this thread
current_thread = SharedFrom ( new_thread ) ;
new_thread - > SetStatus ( ThreadStatus : : Running ) ;
new_thread - > SetIsRunning ( true ) ;
auto * const thread_owner_process = current_thread - > GetOwnerProcess ( ) ;
if ( previous_process ! = thread_owner_process & & thread_owner_process ! = nullptr ) {
system . Kernel ( ) . MakeCurrentProcess ( thread_owner_process ) ;
}
previous_thread - > SetIsRunning ( false ) ;
if ( ! new_thread - > IsHLEThread ( ) ) {
auto & cpu_core = system . ArmInterface ( core_id ) ;
cpu_core . LoadContext ( new_thread - > GetContext32 ( ) ) ;
cpu_core . LoadContext ( new_thread - > GetContext64 ( ) ) ;
cpu_core . SetTlsAddress ( new_thread - > GetTLSAddress ( ) ) ;
cpu_core . SetTPIDR_EL0 ( new_thread - > GetTPIDR_EL0 ( ) ) ;
}
} else {
current_thread = nullptr ;
// Note: We do not reset the current process and current page table when idling because
// technically we haven't changed processes, our threads are just paused.
}
current_thread = nullptr ;
guard. unlock ( ) ;
}
void Scheduler : : SwitchContext ( ) {
Thread * const previous_thread = GetCurrentThread ( ) ;
Thread * const new_thread = GetSelectedThread ( ) ;
Thread * previous_thread = current_thread . get ( ) ;
Thread * new_thread = selected_thread . get ( ) ;
is_context_switch_pending = false ;
if ( new_thread = = previous_thread ) {
guard . unlock ( ) ;
return ;
}
@ -452,51 +626,44 @@ void Scheduler::SwitchContext() {
// Save context for previous thread
if ( previous_thread ) {
system . ArmInterface ( core_id ) . SaveContext ( previous_thread - > GetContext32 ( ) ) ;
system . ArmInterface ( core_id ) . SaveContext ( previous_thread - > GetContext64 ( ) ) ;
// Save the TPIDR_EL0 system register in case it was modified.
previous_thread - > SetTPIDR_EL0 ( system . ArmInterface ( core_id ) . GetTPIDR_EL0 ( ) ) ;
if ( ! previous_thread - > IsHLEThread ( ) ) {
auto & cpu_core = system . ArmInterface ( core_id ) ;
cpu_core . SaveContext ( previous_thread - > GetContext32 ( ) ) ;
cpu_core . SaveContext ( previous_thread - > GetContext64 ( ) ) ;
// Save the TPIDR_EL0 system register in case it was modified.
previous_thread - > SetTPIDR_EL0 ( cpu_core . GetTPIDR_EL0 ( ) ) ;
}
if ( previous_thread - > GetStatus ( ) = = ThreadStatus : : Running ) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
previous_thread - > SetStatus ( ThreadStatus : : Ready ) ;
}
previous_thread - > SetIsRunning ( false ) ;
previous_thread - > context_guard . unlock ( ) ;
}
// Load context of new thread
if ( new_thread ) {
ASSERT_MSG ( new_thread - > GetProcessorID ( ) = = s32 ( this - > core_id ) ,
" Thread must be assigned to this core. " ) ;
ASSERT_MSG ( new_thread - > GetStatus ( ) = = ThreadStatus : : Ready ,
" Thread must be ready to become running. " ) ;
// Cancel any outstanding wakeup events for this thread
new_thread - > CancelWakeupTimer ( ) ;
current_thread = SharedFrom ( new_thread ) ;
new_thread - > SetStatus ( ThreadStatus : : Running ) ;
new_thread - > SetIsRunning ( true ) ;
auto * const thread_owner_process = current_thread - > GetOwnerProcess ( ) ;
if ( previous_process ! = thread_owner_process ) {
system . Kernel ( ) . MakeCurrentProcess ( thread_owner_process ) ;
}
system . ArmInterface ( core_id ) . LoadContext ( new_thread - > GetContext32 ( ) ) ;
system . ArmInterface ( core_id ) . LoadContext ( new_thread - > GetContext64 ( ) ) ;
system . ArmInterface ( core_id ) . SetTlsAddress ( new_thread - > GetTLSAddress ( ) ) ;
system . ArmInterface ( core_id ) . SetTPIDR_EL0 ( new_thread - > GetTPIDR_EL0 ( ) ) ;
std : : shared_ptr < Common : : Fiber > old_context ;
if ( previous_thread ! = nullptr ) {
old_context = previous_thread - > GetHostContext ( ) ;
} else {
current_thread = nullptr ;
// Note: We do not reset the current process and current page table when idling because
// technically we haven't changed processes, our threads are just paused.
old_context = idle_thread - > GetHostContext ( ) ;
}
std : : shared_ptr < Common : : Fiber > next_context ;
if ( new_thread ! = nullptr ) {
next_context = new_thread - > GetHostContext ( ) ;
} else {
next_context = idle_thread - > GetHostContext ( ) ;
}
Common : : Fiber : : YieldTo ( old_context , next_context ) ;
/// When a thread wakes up, the scheduler may have changed to other in another core.
auto & next_scheduler = system . Kernel ( ) . CurrentScheduler ( ) ;
next_scheduler . SwitchContextStep2 ( ) ;
}
void Scheduler : : UpdateLastContextSwitchTime ( Thread * thread , Process * process ) {
const u64 prev_switch_ticks = last_context_switch_time ;
const u64 most_recent_switch_ticks = system . CoreTiming ( ) . GetTicks ( ) ;
const u64 most_recent_switch_ticks = system . CoreTiming ( ) . Get CPU Ticks( ) ;
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks ;
if ( thread ! = nullptr ) {
@ -510,6 +677,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
last_context_switch_time = most_recent_switch_ticks ;
}
void Scheduler : : Initialize ( ) {
std : : string name = " Idle Thread Id: " + std : : to_string ( core_id ) ;
std : : function < void ( void * ) > init_func = system . GetCpuManager ( ) . GetIdleThreadStartFunc ( ) ;
void * init_func_parameter = system . GetCpuManager ( ) . GetStartFuncParamater ( ) ;
ThreadType type = static_cast < ThreadType > ( THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE ) ;
auto thread_res = Thread : : Create ( system , type , name , 0 , 64 , 0 , static_cast < u32 > ( core_id ) , 0 ,
nullptr , std : : move ( init_func ) , init_func_parameter ) ;
idle_thread = std : : move ( thread_res ) . Unwrap ( ) ;
}
void Scheduler : : Shutdown ( ) {
current_thread = nullptr ;
selected_thread = nullptr ;