diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 0acf70a0a..943ff996e 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -104,6 +104,7 @@ add_library(common STATIC
detached_tasks.h
bit_cast.h
bit_field.h
+ bit_set.h
bit_util.h
cityhash.cpp
cityhash.h
@@ -140,7 +141,6 @@ add_library(common STATIC
microprofile.h
microprofileui.h
misc.cpp
- multi_level_queue.h
page_table.cpp
page_table.h
param_package.cpp
diff --git a/src/common/bit_set.h b/src/common/bit_set.h
new file mode 100644
index 000000000..9235ad412
--- /dev/null
+++ b/src/common/bit_set.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018-2020 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "common/common_types.h"
+
+namespace Common {
+
+namespace impl {
+
+template
+class BitSet {
+
+public:
+ constexpr BitSet() = default;
+
+ constexpr void SetBit(size_t i) {
+ this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord);
+ }
+
+ constexpr void ClearBit(size_t i) {
+ this->words[i / FlagsPerWord] &= ~GetBitMask(i % FlagsPerWord);
+ }
+
+ constexpr size_t CountLeadingZero() const {
+ for (size_t i = 0; i < NumWords; i++) {
+ if (this->words[i]) {
+ return FlagsPerWord * i + CountLeadingZeroImpl(this->words[i]);
+ }
+ }
+ return FlagsPerWord * NumWords;
+ }
+
+ constexpr size_t GetNextSet(size_t n) const {
+ for (size_t i = (n + 1) / FlagsPerWord; i < NumWords; i++) {
+ Storage word = this->words[i];
+ if (!IsAligned(n + 1, FlagsPerWord)) {
+ word &= GetBitMask(n % FlagsPerWord) - 1;
+ }
+ if (word) {
+ return FlagsPerWord * i + CountLeadingZeroImpl(word);
+ }
+ }
+ return FlagsPerWord * NumWords;
+ }
+
+private:
+ static_assert(std::is_unsigned_v);
+ static_assert(sizeof(Storage) <= sizeof(u64));
+
+ static constexpr size_t FlagsPerWord = BitSize();
+ static constexpr size_t NumWords = AlignUp(N, FlagsPerWord) / FlagsPerWord;
+
+ static constexpr auto CountLeadingZeroImpl(Storage word) {
+ return std::countl_zero(static_cast(word)) -
+ (BitSize() - FlagsPerWord);
+ }
+
+ static constexpr Storage GetBitMask(size_t bit) {
+ return Storage(1) << (FlagsPerWord - 1 - bit);
+ }
+
+ std::array words{};
+};
+
+} // namespace impl
+
+template
+using BitSet8 = impl::BitSet;
+
+template
+using BitSet16 = impl::BitSet;
+
+template
+using BitSet32 = impl::BitSet;
+
+template
+using BitSet64 = impl::BitSet;
+
+} // namespace Common
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
deleted file mode 100644
index 4b305bf40..000000000
--- a/src/common/multi_level_queue.h
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2019 TuxSH
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include
-#include
-#include
-#include
-
-#include "common/bit_util.h"
-#include "common/common_types.h"
-
-namespace Common {
-
-/**
- * A MultiLevelQueue is a type of priority queue which has the following characteristics:
- * - iteratable through each of its elements.
- * - back can be obtained.
- * - O(1) add, lookup (both front and back)
- * - discrete priorities and a max of 64 priorities (limited domain)
- * This type of priority queue is normaly used for managing threads within an scheduler
- */
-template
-class MultiLevelQueue {
-public:
- using value_type = T;
- using reference = value_type&;
- using const_reference = const value_type&;
- using pointer = value_type*;
- using const_pointer = const value_type*;
-
- using difference_type = typename std::pointer_traits::difference_type;
- using size_type = std::size_t;
-
- template
- class iterator_impl {
- public:
- using iterator_category = std::bidirectional_iterator_tag;
- using value_type = T;
- using pointer = std::conditional_t;
- using reference = std::conditional_t;
- using difference_type = typename std::pointer_traits::difference_type;
-
- friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
- if (lhs.IsEnd() && rhs.IsEnd())
- return true;
- return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
- }
-
- friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
- return !operator==(lhs, rhs);
- }
-
- reference operator*() const {
- return *it;
- }
-
- pointer operator->() const {
- return it.operator->();
- }
-
- iterator_impl& operator++() {
- if (IsEnd()) {
- return *this;
- }
-
- ++it;
-
- if (it == GetEndItForPrio()) {
- u64 prios = mlq.used_priorities;
- prios &= ~((1ULL << (current_priority + 1)) - 1);
- if (prios == 0) {
- current_priority = static_cast(mlq.depth());
- } else {
- current_priority = CountTrailingZeroes64(prios);
- it = GetBeginItForPrio();
- }
- }
- return *this;
- }
-
- iterator_impl& operator--() {
- if (IsEnd()) {
- if (mlq.used_priorities != 0) {
- current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
- it = GetEndItForPrio();
- --it;
- }
- } else if (it == GetBeginItForPrio()) {
- u64 prios = mlq.used_priorities;
- prios &= (1ULL << current_priority) - 1;
- if (prios != 0) {
- current_priority = CountTrailingZeroes64(prios);
- it = GetEndItForPrio();
- --it;
- }
- } else {
- --it;
- }
- return *this;
- }
-
- iterator_impl operator++(int) {
- const iterator_impl v{*this};
- ++(*this);
- return v;
- }
-
- iterator_impl operator--(int) {
- const iterator_impl v{*this};
- --(*this);
- return v;
- }
-
- // allow implicit const->non-const
- iterator_impl(const iterator_impl& other)
- : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
-
- iterator_impl(const iterator_impl& other)
- : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
-
- iterator_impl& operator=(const iterator_impl& other) {
- mlq = other.mlq;
- it = other.it;
- current_priority = other.current_priority;
- return *this;
- }
-
- friend class iterator_impl;
- iterator_impl() = default;
-
- private:
- friend class MultiLevelQueue;
- using container_ref =
- std::conditional_t;
- using list_iterator = std::conditional_t::const_iterator,
- typename std::list::iterator>;
-
- explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
- : mlq(mlq), it(it), current_priority(current_priority) {}
- explicit iterator_impl(container_ref mlq, u32 current_priority)
- : mlq(mlq), it(), current_priority(current_priority) {}
-
- bool IsEnd() const {
- return current_priority == mlq.depth();
- }
-
- list_iterator GetBeginItForPrio() const {
- return mlq.levels[current_priority].begin();
- }
-
- list_iterator GetEndItForPrio() const {
- return mlq.levels[current_priority].end();
- }
-
- container_ref mlq;
- list_iterator it;
- u32 current_priority;
- };
-
- using iterator = iterator_impl;
- using const_iterator = iterator_impl;
-
- void add(const T& element, u32 priority, bool send_back = true) {
- if (send_back)
- levels[priority].push_back(element);
- else
- levels[priority].push_front(element);
- used_priorities |= 1ULL << priority;
- }
-
- void remove(const T& element, u32 priority) {
- auto it = ListIterateTo(levels[priority], element);
- if (it == levels[priority].end())
- return;
- levels[priority].erase(it);
- if (levels[priority].empty()) {
- used_priorities &= ~(1ULL << priority);
- }
- }
-
- void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
- remove(element, old_priority);
- add(element, new_priority, !adjust_front);
- }
- void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
- adjust(*it, old_priority, new_priority, adjust_front);
- }
-
- void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
- ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
- ListIterateTo(levels[priority], element));
-
- other.used_priorities |= 1ULL << priority;
-
- if (levels[priority].empty()) {
- used_priorities &= ~(1ULL << priority);
- }
- }
-
- void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
- transfer_to_front(*it, priority, other);
- }
-
- void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
- ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
- ListIterateTo(levels[priority], element));
-
- other.used_priorities |= 1ULL << priority;
-
- if (levels[priority].empty()) {
- used_priorities &= ~(1ULL << priority);
- }
- }
-
- void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
- transfer_to_back(*it, priority, other);
- }
-
- void yield(u32 priority, std::size_t n = 1) {
- ListShiftForward(levels[priority], n);
- }
-
- [[nodiscard]] std::size_t depth() const {
- return Depth;
- }
-
- [[nodiscard]] std::size_t size(u32 priority) const {
- return levels[priority].size();
- }
-
- [[nodiscard]] std::size_t size() const {
- u64 priorities = used_priorities;
- std::size_t size = 0;
- while (priorities != 0) {
- const u64 current_priority = CountTrailingZeroes64(priorities);
- size += levels[current_priority].size();
- priorities &= ~(1ULL << current_priority);
- }
- return size;
- }
-
- [[nodiscard]] bool empty() const {
- return used_priorities == 0;
- }
-
- [[nodiscard]] bool empty(u32 priority) const {
- return (used_priorities & (1ULL << priority)) == 0;
- }
-
- [[nodiscard]] u32 highest_priority_set(u32 max_priority = 0) const {
- const u64 priorities =
- max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
- return priorities == 0 ? Depth : static_cast(CountTrailingZeroes64(priorities));
- }
-
- [[nodiscard]] u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
- const u64 priorities = min_priority >= Depth - 1
- ? used_priorities
- : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
- return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
- }
-
- [[nodiscard]] const_iterator cbegin(u32 max_prio = 0) const {
- const u32 priority = highest_priority_set(max_prio);
- return priority == Depth ? cend()
- : const_iterator{*this, levels[priority].cbegin(), priority};
- }
- [[nodiscard]] const_iterator begin(u32 max_prio = 0) const {
- return cbegin(max_prio);
- }
- [[nodiscard]] iterator begin(u32 max_prio = 0) {
- const u32 priority = highest_priority_set(max_prio);
- return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
- }
-
- [[nodiscard]] const_iterator cend(u32 min_prio = Depth - 1) const {
- return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
- }
- [[nodiscard]] const_iterator end(u32 min_prio = Depth - 1) const {
- return cend(min_prio);
- }
- [[nodiscard]] iterator end(u32 min_prio = Depth - 1) {
- return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
- }
-
- [[nodiscard]] T& front(u32 max_priority = 0) {
- const u32 priority = highest_priority_set(max_priority);
- return levels[priority == Depth ? 0 : priority].front();
- }
- [[nodiscard]] const T& front(u32 max_priority = 0) const {
- const u32 priority = highest_priority_set(max_priority);
- return levels[priority == Depth ? 0 : priority].front();
- }
-
- [[nodiscard]] T& back(u32 min_priority = Depth - 1) {
- const u32 priority = lowest_priority_set(min_priority); // intended
- return levels[priority == Depth ? 63 : priority].back();
- }
- [[nodiscard]] const T& back(u32 min_priority = Depth - 1) const {
- const u32 priority = lowest_priority_set(min_priority); // intended
- return levels[priority == Depth ? 63 : priority].back();
- }
-
- void clear() {
- used_priorities = 0;
- for (std::size_t i = 0; i < Depth; i++) {
- levels[i].clear();
- }
- }
-
-private:
- using const_list_iterator = typename std::list::const_iterator;
-
- static void ListShiftForward(std::list& list, const std::size_t shift = 1) {
- if (shift >= list.size()) {
- return;
- }
-
- const auto begin_range = list.begin();
- const auto end_range = std::next(begin_range, shift);
- list.splice(list.end(), list, begin_range, end_range);
- }
-
- static void ListSplice(std::list& in_list, const_list_iterator position,
- std::list& out_list, const_list_iterator element) {
- in_list.splice(position, out_list, element);
- }
-
- [[nodiscard]] static const_list_iterator ListIterateTo(const std::list& list,
- const T& element) {
- auto it = list.cbegin();
- while (it != list.cend() && *it != element) {
- ++it;
- }
- return it;
- }
-
- std::array, Depth> levels;
- u64 used_priorities = 0;
-};
-
-} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 949748178..2dad18e4d 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -149,10 +149,19 @@ add_library(core STATIC
hle/kernel/code_set.cpp
hle/kernel/code_set.h
hle/kernel/errors.h
+ hle/kernel/global_scheduler_context.cpp
+ hle/kernel/global_scheduler_context.h
hle/kernel/handle_table.cpp
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h
+ hle/kernel/k_affinity_mask.h
+ hle/kernel/k_priority_queue.h
+ hle/kernel/k_scheduler.cpp
+ hle/kernel/k_scheduler.h
+ hle/kernel/k_scheduler_lock.h
+ hle/kernel/k_scoped_lock.h
+ hle/kernel/k_scoped_scheduler_lock_and_sleep.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory/address_space_info.cpp
@@ -187,8 +196,6 @@ add_library(core STATIC
hle/kernel/readable_event.h
hle/kernel/resource_limit.cpp
hle/kernel/resource_limit.h
- hle/kernel/scheduler.cpp
- hle/kernel/scheduler.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 193fd7d62..e9c74b1a6 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -294,6 +294,9 @@ void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
}
void ARM_Dynarmic_32::ClearExclusiveState() {
+ if (!jit) {
+ return;
+ }
jit->ClearExclusiveState();
}
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 0f0585d0f..7a4eb88a2 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -15,8 +15,8 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/svc.h"
#include "core/memory.h"
#include "core/settings.h"
@@ -330,6 +330,9 @@ void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
}
void ARM_Dynarmic_64::ClearExclusiveState() {
+ if (!jit) {
+ return;
+ }
jit->ClearExclusiveState();
}
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 7e3c54618..0961c0819 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -27,10 +27,10 @@
#include "core/file_sys/vfs_real.h"
#include "core/hardware_interrupt_manager.h"
#include "core/hle/kernel/client_port.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/am/applets/applets.h"
#include "core/hle/service/apm/controller.h"
@@ -507,14 +507,6 @@ std::size_t System::CurrentCoreIndex() const {
return core;
}
-Kernel::Scheduler& System::CurrentScheduler() {
- return impl->kernel.CurrentScheduler();
-}
-
-const Kernel::Scheduler& System::CurrentScheduler() const {
- return impl->kernel.CurrentScheduler();
-}
-
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
return impl->kernel.CurrentPhysicalCore();
}
@@ -523,22 +515,14 @@ const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
return impl->kernel.CurrentPhysicalCore();
}
-Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
- return impl->kernel.Scheduler(core_index);
-}
-
-const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
- return impl->kernel.Scheduler(core_index);
+/// Gets the global scheduler
+Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() {
+ return impl->kernel.GlobalSchedulerContext();
}
/// Gets the global scheduler
-Kernel::GlobalScheduler& System::GlobalScheduler() {
- return impl->kernel.GlobalScheduler();
-}
-
-/// Gets the global scheduler
-const Kernel::GlobalScheduler& System::GlobalScheduler() const {
- return impl->kernel.GlobalScheduler();
+const Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() const {
+ return impl->kernel.GlobalSchedulerContext();
}
Kernel::Process* System::CurrentProcess() {
diff --git a/src/core/core.h b/src/core/core.h
index 29b8fb92a..579a774e4 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -26,11 +26,11 @@ class VfsFilesystem;
} // namespace FileSys
namespace Kernel {
-class GlobalScheduler;
+class GlobalSchedulerContext;
class KernelCore;
class PhysicalCore;
class Process;
-class Scheduler;
+class KScheduler;
} // namespace Kernel
namespace Loader {
@@ -213,12 +213,6 @@ public:
/// Gets the index of the currently running CPU core
[[nodiscard]] std::size_t CurrentCoreIndex() const;
- /// Gets the scheduler for the CPU core that is currently running
- [[nodiscard]] Kernel::Scheduler& CurrentScheduler();
-
- /// Gets the scheduler for the CPU core that is currently running
- [[nodiscard]] const Kernel::Scheduler& CurrentScheduler() const;
-
/// Gets the physical core for the CPU core that is currently running
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
@@ -261,17 +255,11 @@ public:
/// Gets an immutable reference to the renderer.
[[nodiscard]] const VideoCore::RendererBase& Renderer() const;
- /// Gets the scheduler for the CPU core with the specified index
- [[nodiscard]] Kernel::Scheduler& Scheduler(std::size_t core_index);
-
- /// Gets the scheduler for the CPU core with the specified index
- [[nodiscard]] const Kernel::Scheduler& Scheduler(std::size_t core_index) const;
+ /// Gets the global scheduler
+ [[nodiscard]] Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
/// Gets the global scheduler
- [[nodiscard]] Kernel::GlobalScheduler& GlobalScheduler();
-
- /// Gets the global scheduler
- [[nodiscard]] const Kernel::GlobalScheduler& GlobalScheduler() const;
+ [[nodiscard]] const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
/// Gets the manager for the guest device memory
[[nodiscard]] Core::DeviceMemory& DeviceMemory();
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 0cff985e9..373395047 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -10,9 +10,9 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "video_core/gpu.h"
@@ -109,11 +109,8 @@ void* CpuManager::GetStartFuncParamater() {
void CpuManager::MultiCoreRunGuestThread() {
auto& kernel = system.Kernel();
- {
- auto& sched = kernel.CurrentScheduler();
- sched.OnThreadStart();
- }
- auto* thread = kernel.CurrentScheduler().GetCurrentThread();
+ kernel.CurrentScheduler()->OnThreadStart();
+ auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
auto& host_context = thread->GetHostContext();
host_context->SetRewindPoint(GuestRewindFunction, this);
MultiCoreRunGuestLoop();
@@ -130,8 +127,8 @@ void CpuManager::MultiCoreRunGuestLoop() {
physical_core = &kernel.CurrentPhysicalCore();
}
system.ExitDynarmicProfile();
- auto& scheduler = kernel.CurrentScheduler();
- scheduler.TryDoContextSwitch();
+ physical_core->ArmInterface().ClearExclusiveState();
+ kernel.CurrentScheduler()->RescheduleCurrentCore();
}
}
@@ -140,25 +137,21 @@ void CpuManager::MultiCoreRunIdleThread() {
while (true) {
auto& physical_core = kernel.CurrentPhysicalCore();
physical_core.Idle();
- auto& scheduler = kernel.CurrentScheduler();
- scheduler.TryDoContextSwitch();
+ kernel.CurrentScheduler()->RescheduleCurrentCore();
}
}
void CpuManager::MultiCoreRunSuspendThread() {
auto& kernel = system.Kernel();
- {
- auto& sched = kernel.CurrentScheduler();
- sched.OnThreadStart();
- }
+ kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto core = kernel.GetCurrentHostThreadID();
- auto& scheduler = kernel.CurrentScheduler();
+ auto& scheduler = *kernel.CurrentScheduler();
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
- scheduler.TryDoContextSwitch();
+ scheduler.RescheduleCurrentCore();
}
}
@@ -206,11 +199,8 @@ void CpuManager::MultiCorePause(bool paused) {
void CpuManager::SingleCoreRunGuestThread() {
auto& kernel = system.Kernel();
- {
- auto& sched = kernel.CurrentScheduler();
- sched.OnThreadStart();
- }
- auto* thread = kernel.CurrentScheduler().GetCurrentThread();
+ kernel.CurrentScheduler()->OnThreadStart();
+ auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
auto& host_context = thread->GetHostContext();
host_context->SetRewindPoint(GuestRewindFunction, this);
SingleCoreRunGuestLoop();
@@ -218,7 +208,7 @@ void CpuManager::SingleCoreRunGuestThread() {
void CpuManager::SingleCoreRunGuestLoop() {
auto& kernel = system.Kernel();
- auto* thread = kernel.CurrentScheduler().GetCurrentThread();
+ auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
while (true) {
auto* physical_core = &kernel.CurrentPhysicalCore();
system.EnterDynarmicProfile();
@@ -230,9 +220,10 @@ void CpuManager::SingleCoreRunGuestLoop() {
thread->SetPhantomMode(true);
system.CoreTiming().Advance();
thread->SetPhantomMode(false);
+ physical_core->ArmInterface().ClearExclusiveState();
PreemptSingleCore();
auto& scheduler = kernel.Scheduler(current_core);
- scheduler.TryDoContextSwitch();
+ scheduler.RescheduleCurrentCore();
}
}
@@ -244,51 +235,53 @@ void CpuManager::SingleCoreRunIdleThread() {
system.CoreTiming().AddTicks(1000U);
idle_count++;
auto& scheduler = physical_core.Scheduler();
- scheduler.TryDoContextSwitch();
+ scheduler.RescheduleCurrentCore();
}
}
void CpuManager::SingleCoreRunSuspendThread() {
auto& kernel = system.Kernel();
- {
- auto& sched = kernel.CurrentScheduler();
- sched.OnThreadStart();
- }
+ kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto core = kernel.GetCurrentHostThreadID();
- auto& scheduler = kernel.CurrentScheduler();
+ auto& scheduler = *kernel.CurrentScheduler();
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
- scheduler.TryDoContextSwitch();
+ scheduler.RescheduleCurrentCore();
}
}
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
- std::size_t old_core = current_core;
- auto& scheduler = system.Kernel().Scheduler(old_core);
- Kernel::Thread* current_thread = scheduler.GetCurrentThread();
- if (idle_count >= 4 || from_running_enviroment) {
- if (!from_running_enviroment) {
- system.CoreTiming().Idle();
+ {
+ auto& scheduler = system.Kernel().Scheduler(current_core);
+ Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ if (idle_count >= 4 || from_running_enviroment) {
+ if (!from_running_enviroment) {
+ system.CoreTiming().Idle();
+ idle_count = 0;
+ }
+ current_thread->SetPhantomMode(true);
+ system.CoreTiming().Advance();
+ current_thread->SetPhantomMode(false);
+ }
+ current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
+ system.CoreTiming().ResetTicks();
+ scheduler.Unload(scheduler.GetCurrentThread());
+
+ auto& next_scheduler = system.Kernel().Scheduler(current_core);
+ Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
+ }
+
+ // May have changed scheduler
+ {
+ auto& scheduler = system.Kernel().Scheduler(current_core);
+ scheduler.Reload(scheduler.GetCurrentThread());
+ auto* currrent_thread2 = scheduler.GetCurrentThread();
+ if (!currrent_thread2->IsIdleThread()) {
idle_count = 0;
}
- current_thread->SetPhantomMode(true);
- system.CoreTiming().Advance();
- current_thread->SetPhantomMode(false);
- }
- current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
- system.CoreTiming().ResetTicks();
- scheduler.Unload();
- auto& next_scheduler = system.Kernel().Scheduler(current_core);
- Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
- /// May have changed scheduler
- auto& current_scheduler = system.Kernel().Scheduler(current_core);
- current_scheduler.Reload();
- auto* currrent_thread2 = current_scheduler.GetCurrentThread();
- if (!currrent_thread2->IsIdleThread()) {
- idle_count = 0;
}
}
@@ -369,8 +362,7 @@ void CpuManager::RunThread(std::size_t core) {
return;
}
- auto& scheduler = system.Kernel().CurrentScheduler();
- Kernel::Thread* current_thread = scheduler.GetCurrentThread();
+ auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
data.is_running = true;
Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
data.is_running = false;
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 048acd30e..20ffa7d47 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -12,8 +12,9 @@
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
@@ -58,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
}
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
const std::vector> waiting_threads =
GetThreadsWaitingOnAddress(address);
WakeThreads(waiting_threads, num_to_wake);
@@ -67,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
@@ -92,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
@@ -153,11 +154,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
bool should_decrement) {
auto& memory = system.Memory();
auto& kernel = system.Kernel();
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
+ Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
if (current_thread->IsPendingTermination()) {
lock.CancelSleep();
@@ -210,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (current_thread->IsWaitingForArbitration()) {
RemoveThread(SharedFrom(current_thread));
current_thread->WaitForArbitration(false);
@@ -223,11 +224,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
auto& memory = system.Memory();
auto& kernel = system.Kernel();
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
+ Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
if (current_thread->IsPendingTermination()) {
lock.CancelSleep();
@@ -265,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (current_thread->IsWaitingForArbitration()) {
RemoveThread(SharedFrom(current_thread));
current_thread->WaitForArbitration(false);
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
new file mode 100644
index 000000000..a133e8ed0
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -0,0 +1,52 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/hle/kernel/global_scheduler_context.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
+ : kernel{kernel}, scheduler_lock{kernel} {}
+
+GlobalSchedulerContext::~GlobalSchedulerContext() = default;
+
+void GlobalSchedulerContext::AddThread(std::shared_ptr thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.push_back(std::move(thread));
+}
+
+void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
+ thread_list.end());
+}
+
+void GlobalSchedulerContext::PreemptThreads() {
+ // The priority levels at which the global scheduler preempts threads every 10 ms. They are
+ // ordered from Core 0 to Core 3.
+ static constexpr std::array preemption_priorities{
+ 59,
+ 59,
+ 59,
+ 63,
+ };
+
+ ASSERT(IsLocked());
+ for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ const u32 priority = preemption_priorities[core_id];
+ kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
+ }
+}
+
+bool GlobalSchedulerContext::IsLocked() const {
+ return scheduler_lock.IsLockedByCurrentThread();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
new file mode 100644
index 000000000..5c7b89290
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -0,0 +1,81 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include
+#include
+
+#include "common/common_types.h"
+#include "common/spin_lock.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_priority_queue.h"
+#include "core/hle/kernel/k_scheduler_lock.h"
+#include "core/hle/kernel/thread.h"
+
+namespace Kernel {
+
+class KernelCore;
+class SchedulerLock;
+
+using KSchedulerPriorityQueue =
+ KPriorityQueue;
+constexpr s32 HighestCoreMigrationAllowedPriority = 2;
+
+class GlobalSchedulerContext final {
+ friend class KScheduler;
+
+public:
+ using LockType = KAbstractSchedulerLock;
+
+ explicit GlobalSchedulerContext(KernelCore& kernel);
+ ~GlobalSchedulerContext();
+
+ /// Adds a new thread to the scheduler
+ void AddThread(std::shared_ptr thread);
+
+ /// Removes a thread from the scheduler
+ void RemoveThread(std::shared_ptr thread);
+
+ /// Returns a list of all threads managed by the scheduler
+ [[nodiscard]] const std::vector>& GetThreadList() const {
+ return thread_list;
+ }
+
+ /**
+ * Rotates the scheduling queues of threads at a preemption priority and then does
+ * some core rebalancing. Preemption priorities can be found in the array
+ * 'preemption_priorities'.
+ *
+ * @note This operation happens every 10ms.
+ */
+ void PreemptThreads();
+
+ /// Returns true if the global scheduler lock is acquired
+ bool IsLocked() const;
+
+ [[nodiscard]] LockType& SchedulerLock() {
+ return scheduler_lock;
+ }
+
+ [[nodiscard]] const LockType& SchedulerLock() const {
+ return scheduler_lock;
+ }
+
+private:
+ friend class KScopedSchedulerLock;
+ friend class KScopedSchedulerLockAndSleep;
+
+ KernelCore& kernel;
+
+ std::atomic_bool scheduler_update_needed{};
+ KSchedulerPriorityQueue priority_queue;
+ LockType scheduler_lock;
+
+ /// Lists all thread ids that aren't deleted/etc.
+ std::vector> thread_list;
+ Common::SpinLock global_list_guard{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3e745c18b..40988b0fd 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -8,9 +8,9 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
@@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const {
std::shared_ptr