Merge pull request #6266 from bunnei/kautoobject-refactor

Kernel Rework: Migrate kernel objects to KAutoObject
master
bunnei 2021-05-07 23:30:17 +07:00 committed by GitHub
commit faa067f175
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
181 changed files with 4837 additions and 2856 deletions

@ -108,6 +108,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
} \ } \
} }
#define YUZU_NON_COPYABLE(cls) \
cls(const cls&) = delete; \
cls& operator=(const cls&) = delete
#define YUZU_NON_MOVEABLE(cls) \
cls(cls&&) = delete; \
cls& operator=(cls&&) = delete
#define R_SUCCEEDED(res) (res.IsSuccess()) #define R_SUCCEEDED(res) (res.IsSuccess())
/// Evaluates an expression that returns a result, and returns the result if it would fail. /// Evaluates an expression that returns a result, and returns the result if it would fail.
@ -128,4 +136,19 @@ namespace Common {
return u32(a) | u32(b) << 8 | u32(c) << 16 | u32(d) << 24; return u32(a) | u32(b) << 8 | u32(c) << 16 | u32(d) << 24;
} }
// std::size() does not support zero-size C arrays. We're fixing that.
template <class C>
constexpr auto Size(const C& c) -> decltype(c.size()) {
return std::size(c);
}
template <class C>
constexpr std::size_t Size(const C& c) {
if constexpr (sizeof(C) == 0) {
return 0;
} else {
return std::size(c);
}
}
} // namespace Common } // namespace Common

@ -509,7 +509,6 @@ private:
private: private:
static constexpr TypedStorage<Derived> DerivedStorage = {}; static constexpr TypedStorage<Derived> DerivedStorage = {};
static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
}; };
template <auto T, class Derived = impl::GetParentType<T>> template <auto T, class Derived = impl::GetParentType<T>>

@ -133,27 +133,27 @@ template <auto MemberPtr>
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member; using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>> template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
static inline std::ptrdiff_t OffsetOf = [] { constexpr std::ptrdiff_t OffsetOf() {
using DeducedParentType = GetParentType<MemberPtr>; using DeducedParentType = GetParentType<MemberPtr>;
using MemberType = GetMemberType<MemberPtr>; using MemberType = GetMemberType<MemberPtr>;
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value || static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
std::is_same<RealParentType, DeducedParentType>::value); std::is_same<RealParentType, DeducedParentType>::value);
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr); return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
}(); };
} // namespace impl } // namespace impl
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>> template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) { constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>; std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<RealParentType*>( return *static_cast<RealParentType*>(
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset)); static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
} }
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>> template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) { constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>; std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>();
return *static_cast<const RealParentType*>(static_cast<const void*>( return *static_cast<const RealParentType*>(static_cast<const void*>(
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset)); static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
} }

@ -144,31 +144,40 @@ add_library(core STATIC
hle/kernel/board/nintendo/nx/k_system_control.cpp hle/kernel/board/nintendo/nx/k_system_control.cpp
hle/kernel/board/nintendo/nx/k_system_control.h hle/kernel/board/nintendo/nx/k_system_control.h
hle/kernel/board/nintendo/nx/secure_monitor.h hle/kernel/board/nintendo/nx/secure_monitor.h
hle/kernel/client_port.cpp
hle/kernel/client_port.h
hle/kernel/client_session.cpp
hle/kernel/client_session.h
hle/kernel/code_set.cpp hle/kernel/code_set.cpp
hle/kernel/code_set.h hle/kernel/code_set.h
hle/kernel/svc_results.h hle/kernel/svc_results.h
hle/kernel/global_scheduler_context.cpp hle/kernel/global_scheduler_context.cpp
hle/kernel/global_scheduler_context.h hle/kernel/global_scheduler_context.h
hle/kernel/handle_table.cpp
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h hle/kernel/hle_ipc.h
hle/kernel/init/init_slab_setup.cpp
hle/kernel/init/init_slab_setup.h
hle/kernel/k_address_arbiter.cpp hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h hle/kernel/k_address_arbiter.h
hle/kernel/k_address_space_info.cpp hle/kernel/k_address_space_info.cpp
hle/kernel/k_address_space_info.h hle/kernel/k_address_space_info.h
hle/kernel/k_auto_object.cpp
hle/kernel/k_auto_object.h
hle/kernel/k_auto_object_container.cpp
hle/kernel/k_auto_object_container.h
hle/kernel/k_affinity_mask.h hle/kernel/k_affinity_mask.h
hle/kernel/k_class_token.cpp
hle/kernel/k_class_token.h
hle/kernel/k_client_port.cpp
hle/kernel/k_client_port.h
hle/kernel/k_client_session.cpp
hle/kernel/k_client_session.h
hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h hle/kernel/k_condition_variable.h
hle/kernel/k_event.cpp hle/kernel/k_event.cpp
hle/kernel/k_event.h hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_light_condition_variable.h hle/kernel/k_light_condition_variable.h
hle/kernel/k_light_lock.cpp hle/kernel/k_light_lock.cpp
hle/kernel/k_light_lock.h hle/kernel/k_light_lock.h
hle/kernel/k_linked_list.h
hle/kernel/k_memory_block.h hle/kernel/k_memory_block.h
hle/kernel/k_memory_block_manager.cpp hle/kernel/k_memory_block_manager.cpp
hle/kernel/k_memory_block_manager.h hle/kernel/k_memory_block_manager.h
@ -185,7 +194,11 @@ add_library(core STATIC
hle/kernel/k_page_linked_list.h hle/kernel/k_page_linked_list.h
hle/kernel/k_page_table.cpp hle/kernel/k_page_table.cpp
hle/kernel/k_page_table.h hle/kernel/k_page_table.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_priority_queue.h hle/kernel/k_priority_queue.h
hle/kernel/k_process.cpp
hle/kernel/k_process.h
hle/kernel/k_readable_event.cpp hle/kernel/k_readable_event.cpp
hle/kernel/k_readable_event.h hle/kernel/k_readable_event.h
hle/kernel/k_resource_limit.cpp hle/kernel/k_resource_limit.cpp
@ -196,6 +209,12 @@ add_library(core STATIC
hle/kernel/k_scoped_lock.h hle/kernel/k_scoped_lock.h
hle/kernel/k_scoped_resource_reservation.h hle/kernel/k_scoped_resource_reservation.h
hle/kernel/k_scoped_scheduler_lock_and_sleep.h hle/kernel/k_scoped_scheduler_lock_and_sleep.h
hle/kernel/k_server_port.cpp
hle/kernel/k_server_port.h
hle/kernel/k_server_session.cpp
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
hle/kernel/k_shared_memory.cpp hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h hle/kernel/k_shared_memory.h
hle/kernel/k_slab_heap.h hle/kernel/k_slab_heap.h
@ -208,28 +227,21 @@ add_library(core STATIC
hle/kernel/k_thread.h hle/kernel/k_thread.h
hle/kernel/k_thread_queue.h hle/kernel/k_thread_queue.h
hle/kernel/k_trace.h hle/kernel/k_trace.h
hle/kernel/k_transfer_memory.cpp
hle/kernel/k_transfer_memory.h
hle/kernel/k_writable_event.cpp hle/kernel/k_writable_event.cpp
hle/kernel/k_writable_event.h hle/kernel/k_writable_event.h
hle/kernel/kernel.cpp hle/kernel/kernel.cpp
hle/kernel/kernel.h hle/kernel/kernel.h
hle/kernel/memory_types.h hle/kernel/memory_types.h
hle/kernel/object.cpp
hle/kernel/object.h
hle/kernel/physical_core.cpp hle/kernel/physical_core.cpp
hle/kernel/physical_core.h hle/kernel/physical_core.h
hle/kernel/physical_memory.h hle/kernel/physical_memory.h
hle/kernel/process.cpp
hle/kernel/process.h
hle/kernel/process_capability.cpp hle/kernel/process_capability.cpp
hle/kernel/process_capability.h hle/kernel/process_capability.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
hle/kernel/server_session.h
hle/kernel/service_thread.cpp hle/kernel/service_thread.cpp
hle/kernel/service_thread.h hle/kernel/service_thread.h
hle/kernel/session.cpp hle/kernel/slab_helpers.h
hle/kernel/session.h
hle/kernel/svc.cpp hle/kernel/svc.cpp
hle/kernel/svc.h hle/kernel/svc.h
hle/kernel/svc_common.h hle/kernel/svc_common.h
@ -237,8 +249,6 @@ add_library(core STATIC
hle/kernel/svc_wrap.h hle/kernel/svc_wrap.h
hle/kernel/time_manager.cpp hle/kernel/time_manager.cpp
hle/kernel/time_manager.h hle/kernel/time_manager.h
hle/kernel/transfer_memory.cpp
hle/kernel/transfer_memory.h
hle/lock.cpp hle/lock.cpp
hle/lock.h hle/lock.h
hle/result.h hle/result.h

@ -16,8 +16,8 @@
#include "core/core.h" #include "core/core.h"
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/hardware_properties.h" #include "core/hardware_properties.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc.h"
#include "core/memory.h" #include "core/memory.h"

@ -27,12 +27,12 @@
#include "core/file_sys/vfs_concat.h" #include "core/file_sys/vfs_concat.h"
#include "core/file_sys/vfs_real.h" #include "core/file_sys/vfs_real.h"
#include "core/hardware_interrupt_manager.h" #include "core/hardware_interrupt_manager.h"
#include "core/hle/kernel/client_port.h" #include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/am/applets/applets.h" #include "core/hle/service/am/applets/applets.h"
#include "core/hle/service/apm/controller.h" #include "core/hle/service/apm/controller.h"
#include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/filesystem/filesystem.h"
@ -166,9 +166,9 @@ struct System::Impl {
cpu_manager.SetAsyncGpu(is_async_gpu); cpu_manager.SetAsyncGpu(is_async_gpu);
core_timing.SetMulticore(is_multicore); core_timing.SetMulticore(is_multicore);
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
kernel.Initialize(); kernel.Initialize();
cpu_manager.Initialize(); cpu_manager.Initialize();
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto current_time = std::chrono::duration_cast<std::chrono::seconds>( const auto current_time = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch()); std::chrono::system_clock::now().time_since_epoch());
@ -233,8 +233,11 @@ struct System::Impl {
} }
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider); telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
auto main_process = auto main_process = Kernel::KProcess::Create(system.Kernel());
Kernel::Process::Create(system, "main", Kernel::Process::ProcessType::Userland); ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
Kernel::KProcess::ProcessType::Userland)
.IsSuccess());
main_process->Open();
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) { if (load_result != Loader::ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result); LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
@ -244,7 +247,7 @@ struct System::Impl {
static_cast<u32>(load_result)); static_cast<u32>(load_result));
} }
AddGlueRegistrationForProcess(*app_loader, *main_process); AddGlueRegistrationForProcess(*app_loader, *main_process);
kernel.MakeCurrentProcess(main_process.get()); kernel.MakeCurrentProcess(main_process);
kernel.InitializeCores(); kernel.InitializeCores();
// Initialize cheat engine // Initialize cheat engine
@ -311,6 +314,7 @@ struct System::Impl {
gpu_core.reset(); gpu_core.reset();
perf_stats.reset(); perf_stats.reset();
kernel.Shutdown(); kernel.Shutdown();
memory.Reset();
applet_manager.ClearAll(); applet_manager.ClearAll();
LOG_DEBUG(Core, "Shutdown OK"); LOG_DEBUG(Core, "Shutdown OK");
@ -322,7 +326,7 @@ struct System::Impl {
return app_loader->ReadTitle(out); return app_loader->ReadTitle(out);
} }
void AddGlueRegistrationForProcess(Loader::AppLoader& loader, Kernel::Process& process) { void AddGlueRegistrationForProcess(Loader::AppLoader& loader, Kernel::KProcess& process) {
std::vector<u8> nacp_data; std::vector<u8> nacp_data;
FileSys::NACP nacp; FileSys::NACP nacp;
if (loader.ReadControlData(nacp) == Loader::ResultStatus::Success) { if (loader.ReadControlData(nacp) == Loader::ResultStatus::Success) {
@ -513,7 +517,7 @@ const Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() const {
return impl->kernel.GlobalSchedulerContext(); return impl->kernel.GlobalSchedulerContext();
} }
Kernel::Process* System::CurrentProcess() { Kernel::KProcess* System::CurrentProcess() {
return impl->kernel.CurrentProcess(); return impl->kernel.CurrentProcess();
} }
@ -525,7 +529,7 @@ const Core::DeviceMemory& System::DeviceMemory() const {
return *impl->device_memory; return *impl->device_memory;
} }
const Kernel::Process* System::CurrentProcess() const { const Kernel::KProcess* System::CurrentProcess() const {
return impl->kernel.CurrentProcess(); return impl->kernel.CurrentProcess();
} }

@ -12,7 +12,6 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "core/file_sys/vfs_types.h" #include "core/file_sys/vfs_types.h"
#include "core/hle/kernel/object.h"
namespace Core::Frontend { namespace Core::Frontend {
class EmuWindow; class EmuWindow;
@ -29,7 +28,7 @@ namespace Kernel {
class GlobalSchedulerContext; class GlobalSchedulerContext;
class KernelCore; class KernelCore;
class PhysicalCore; class PhysicalCore;
class Process; class KProcess;
class KScheduler; class KScheduler;
} // namespace Kernel } // namespace Kernel
@ -264,10 +263,10 @@ public:
[[nodiscard]] const Core::DeviceMemory& DeviceMemory() const; [[nodiscard]] const Core::DeviceMemory& DeviceMemory() const;
/// Provides a pointer to the current process /// Provides a pointer to the current process
[[nodiscard]] Kernel::Process* CurrentProcess(); [[nodiscard]] Kernel::KProcess* CurrentProcess();
/// Provides a constant pointer to the current process. /// Provides a constant pointer to the current process.
[[nodiscard]] const Kernel::Process* CurrentProcess() const; [[nodiscard]] const Kernel::KProcess* CurrentProcess() const;
/// Provides a reference to the core timing instance. /// Provides a reference to the core timing instance.
[[nodiscard]] Timing::CoreTiming& CoreTiming(); [[nodiscard]] Timing::CoreTiming& CoreTiming();

@ -13,7 +13,7 @@
#include "core/file_sys/patch_manager.h" #include "core/file_sys/patch_manager.h"
#include "core/file_sys/registered_cache.h" #include "core/file_sys/registered_cache.h"
#include "core/file_sys/romfs_factory.h" #include "core/file_sys/romfs_factory.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/filesystem/filesystem.h"
#include "core/loader/loader.h" #include "core/loader/loader.h"

@ -9,7 +9,7 @@
#include "core/core.h" #include "core/core.h"
#include "core/file_sys/savedata_factory.h" #include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs.h" #include "core/file_sys/vfs.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/k_process.h"
namespace FileSys { namespace FileSys {

@ -13,12 +13,9 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/ipc.h" #include "core/hle/ipc.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/server_session.h" #include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace IPC { namespace IPC {
@ -137,9 +134,11 @@ public:
if (context->Session()->IsDomain()) { if (context->Session()->IsDomain()) {
context->AddDomainObject(std::move(iface)); context->AddDomainObject(std::move(iface));
} else { } else {
auto [client, server] = Kernel::Session::Create(kernel, iface->GetServiceName()); auto* session = Kernel::KSession::Create(kernel);
context->AddMoveObject(std::move(client)); session->Initialize(nullptr, iface->GetServiceName());
iface->ClientConnected(std::move(server));
context->AddMoveObject(&session->GetClientSession());
iface->ClientConnected(&session->GetServerSession());
} }
} }
@ -215,10 +214,16 @@ public:
void PushRaw(const T& value); void PushRaw(const T& value);
template <typename... O> template <typename... O>
void PushMoveObjects(std::shared_ptr<O>... pointers); void PushMoveObjects(O*... pointers);
template <typename... O> template <typename... O>
void PushCopyObjects(std::shared_ptr<O>... pointers); void PushMoveObjects(O&... pointers);
template <typename... O>
void PushCopyObjects(O*... pointers);
template <typename... O>
void PushCopyObjects(O&... pointers);
private: private:
u32 normal_params_size{}; u32 normal_params_size{};
@ -301,18 +306,34 @@ void ResponseBuilder::Push(const First& first_value, const Other&... other_value
} }
template <typename... O> template <typename... O>
inline void ResponseBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) { inline void ResponseBuilder::PushCopyObjects(O*... pointers) {
auto objects = {pointers...}; auto objects = {pointers...};
for (auto& object : objects) { for (auto& object : objects) {
context->AddCopyObject(std::move(object)); context->AddCopyObject(object);
} }
} }
template <typename... O> template <typename... O>
inline void ResponseBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) { inline void ResponseBuilder::PushCopyObjects(O&... pointers) {
auto objects = {&pointers...};
for (auto& object : objects) {
context->AddCopyObject(object);
}
}
template <typename... O>
inline void ResponseBuilder::PushMoveObjects(O*... pointers) {
auto objects = {pointers...}; auto objects = {pointers...};
for (auto& object : objects) { for (auto& object : objects) {
context->AddMoveObject(std::move(object)); context->AddMoveObject(object);
}
}
template <typename... O>
inline void ResponseBuilder::PushMoveObjects(O&... pointers) {
auto objects = {&pointers...};
for (auto& object : objects) {
context->AddMoveObject(object);
} }
} }
@ -359,12 +380,6 @@ public:
template <typename T> template <typename T>
T PopRaw(); T PopRaw();
template <typename T>
std::shared_ptr<T> GetMoveObject(std::size_t index);
template <typename T>
std::shared_ptr<T> GetCopyObject(std::size_t index);
template <class T> template <class T>
std::shared_ptr<T> PopIpcInterface() { std::shared_ptr<T> PopIpcInterface() {
ASSERT(context->Session()->IsDomain()); ASSERT(context->Session()->IsDomain());
@ -469,14 +484,4 @@ void RequestParser::Pop(First& first_value, Other&... other_values) {
Pop(other_values...); Pop(other_values...);
} }
template <typename T>
std::shared_ptr<T> RequestParser::GetMoveObject(std::size_t index) {
return context->GetMoveObject<T>(index);
}
template <typename T>
std::shared_ptr<T> RequestParser::GetCopyObject(std::size_t index) {
return context->GetCopyObject<T>(index);
}
} // namespace IPC } // namespace IPC

@ -1,47 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {}
ClientPort::~ClientPort() = default;
std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
return server_port;
}
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
if (active_sessions >= max_sessions) {
return ResultMaxConnectionsReached;
}
active_sessions++;
auto [client, server] = Kernel::Session::Create(kernel, name);
if (server_port->HasHLEHandler()) {
server_port->GetHLEHandler()->ClientConnected(std::move(server));
} else {
server_port->AppendPendingSession(std::move(server));
}
return MakeResult(std::move(client));
}
void ClientPort::ConnectionClosed() {
if (active_sessions == 0) {
return;
}
--active_sessions;
}
} // namespace Kernel

@ -1,63 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientSession;
class KernelCore;
class ServerPort;
class ClientPort final : public Object {
public:
explicit ClientPort(KernelCore& kernel);
~ClientPort() override;
friend class ServerPort;
std::string GetTypeName() const override {
return "ClientPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<ServerPort> GetServerPort() const;
/**
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
* list of pending sessions, and signals the ServerPort, causing any threads
* waiting on it to awake.
* @returns ClientSession The client endpoint of the created Session pair, or error code.
*/
ResultVal<std::shared_ptr<ClientSession>> Connect();
/**
* Signifies that a previously active connection has been closed,
* decreasing the total number of active connections to this port.
*/
void ConnectionClosed();
void Finalize() override {}
private:
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
u32 active_sessions = 0; ///< Number of currently open sessions to this port
std::string name; ///< Name of client port (optional)
};
} // namespace Kernel

@ -1,53 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/result.h"
namespace Kernel {
ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
// the emulated application.
if (parent->Server()) {
parent->Server()->ClientDisconnected();
}
}
bool ClientSession::IsSignaled() const {
UNIMPLEMENTED();
return true;
}
ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel,
std::shared_ptr<Session> parent,
std::string name) {
std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)};
client_session->name = std::move(name);
client_session->parent = std::move(parent);
return MakeResult(std::move(client_session));
}
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) {
// Keep ServerSession alive until we're done working with it.
if (!parent->Server()) {
return ResultSessionClosedByRemote;
}
// Signal the server session that new data is available
return parent->Server()->HandleSyncRequest(std::move(thread), memory, core_timing);
}
} // namespace Kernel

@ -1,68 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
union ResultCode;
namespace Core::Memory {
class Memory;
}
namespace Core::Timing {
class CoreTiming;
}
namespace Kernel {
class KernelCore;
class Session;
class KThread;
class ClientSession final : public KSynchronizationObject {
public:
explicit ClientSession(KernelCore& kernel);
~ClientSession() override;
friend class Session;
std::string GetTypeName() const override {
return "ClientSession";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing);
bool IsSignaled() const override;
void Finalize() override {}
private:
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
std::shared_ptr<Session> parent,
std::string name = "Unknown");
/// The parent session, which links to the server endpoint.
std::shared_ptr<Session> parent;
/// Name of the client session (optional)
std::string name;
};
} // namespace Kernel

@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
GlobalSchedulerContext::~GlobalSchedulerContext() = default; GlobalSchedulerContext::~GlobalSchedulerContext() = default;
void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) { void GlobalSchedulerContext::AddThread(KThread* thread) {
std::scoped_lock lock{global_list_guard}; std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread)); thread_list.push_back(thread);
} }
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) { void GlobalSchedulerContext::RemoveThread(KThread* thread) {
std::scoped_lock lock{global_list_guard}; std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end()); thread_list.end());

@ -38,13 +38,13 @@ public:
~GlobalSchedulerContext(); ~GlobalSchedulerContext();
/// Adds a new thread to the scheduler /// Adds a new thread to the scheduler
void AddThread(std::shared_ptr<KThread> thread); void AddThread(KThread* thread);
/// Removes a thread from the scheduler /// Removes a thread from the scheduler
void RemoveThread(std::shared_ptr<KThread> thread); void RemoveThread(KThread* thread);
/// Returns a list of all threads managed by the scheduler /// Returns a list of all threads managed by the scheduler
[[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const { [[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
return thread_list; return thread_list;
} }
@ -79,7 +79,7 @@ private:
LockType scheduler_lock; LockType scheduler_lock;
/// Lists all thread ids that aren't deleted/etc. /// Lists all thread ids that aren't deleted/etc.
std::vector<std::shared_ptr<KThread>> thread_list; std::vector<KThread*> thread_list;
Common::SpinLock global_list_guard{}; Common::SpinLock global_list_guard{};
}; };

@ -1,131 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
namespace {
constexpr u16 GetSlot(Handle handle) {
return static_cast<u16>(handle >> 15);
}
constexpr u16 GetGeneration(Handle handle) {
return static_cast<u16>(handle & 0x7FFF);
}
} // Anonymous namespace
HandleTable::HandleTable(KernelCore& kernel) : kernel{kernel} {
Clear();
}
HandleTable::~HandleTable() = default;
ResultCode HandleTable::SetSize(s32 handle_table_size) {
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
return ResultOutOfMemory;
}
// Values less than or equal to zero indicate to use the maximum allowable
// size for the handle table in the actual kernel, so we ignore the given
// value in that case, since we assume this by default unless this function
// is called.
if (handle_table_size > 0) {
table_size = static_cast<u16>(handle_table_size);
}
return RESULT_SUCCESS;
}
ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
DEBUG_ASSERT(obj != nullptr);
const u16 slot = next_free_slot;
if (slot >= table_size) {
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
return ResultHandleTableFull;
}
next_free_slot = generations[slot];
const u16 generation = next_generation++;
// Overflow count so it fits in the 15 bits dedicated to the generation in the handle.
// Horizon OS uses zero to represent an invalid handle, so skip to 1.
if (next_generation >= (1 << 15)) {
next_generation = 1;
}
generations[slot] = generation;
objects[slot] = std::move(obj);
Handle handle = generation | (slot << 15);
return MakeResult<Handle>(handle);
}
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
std::shared_ptr<Object> object = GetGeneric(handle);
if (object == nullptr) {
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
return ResultInvalidHandle;
}
return Create(std::move(object));
}
ResultCode HandleTable::Close(Handle handle) {
if (!IsValid(handle)) {
LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
return ResultInvalidHandle;
}
const u16 slot = GetSlot(handle);
if (objects[slot].use_count() == 1) {
objects[slot]->Finalize();
}
objects[slot] = nullptr;
generations[slot] = next_free_slot;
next_free_slot = slot;
return RESULT_SUCCESS;
}
bool HandleTable::IsValid(Handle handle) const {
const std::size_t slot = GetSlot(handle);
const u16 generation = GetGeneration(handle);
return slot < table_size && objects[slot] != nullptr && generations[slot] == generation;
}
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
} else if (handle == CurrentProcess) {
return SharedFrom(kernel.CurrentProcess());
}
if (!IsValid(handle)) {
return nullptr;
}
return objects[GetSlot(handle)];
}
void HandleTable::Clear() {
for (u16 i = 0; i < table_size; ++i) {
generations[i] = static_cast<u16>(i + 1);
objects[i] = nullptr;
}
next_free_slot = 0;
}
} // namespace Kernel

@ -1,144 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <cstddef>
#include <memory>
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
enum KernelHandle : Handle {
InvalidHandle = 0,
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
/**
* This class allows the creation of Handles, which are references to objects that can be tested
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
* emulated process. it has been designed so that it follows the same handle format and has
* approximately the same restrictions as the handle manager in the CTR-OS.
*
* Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0).
* The slot index is used to index into the arrays in this class to access the data corresponding
* to the Handle.
*
* To prevent accidental use of a freed Handle whose slot has already been reused, a global counter
* is kept and incremented every time a Handle is created. This is the Handle's "generation". The
* value of the counter is stored into the Handle as well as in the handle table (in the
* "generations" array). When looking up a handle, the Handle's generation must match with the
* value stored on the class, otherwise the Handle is considered invalid.
*
* To find free slots when allocating a Handle without needing to scan the entire object array, the
* generations field of unallocated slots is re-purposed as a linked list of indices to free slots.
* When a Handle is created, an index is popped off the list and used for the new Handle. When it
* is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is
* likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been
* verified and isn't likely to cause any problems.
*/
class HandleTable final : NonCopyable {
public:
/// This is the maximum limit of handles allowed per process in Horizon
static constexpr std::size_t MAX_COUNT = 1024;
explicit HandleTable(KernelCore& kernel);
~HandleTable();
/**
* Sets the number of handles that may be in use at one time
* for this handle table.
*
* @param handle_table_size The desired size to limit the handle table to.
*
* @returns an error code indicating if initialization was successful.
* If initialization was not successful, then ERR_OUT_OF_MEMORY
* will be returned.
*
* @pre handle_table_size must be within the range [0, 1024]
*/
ResultCode SetSize(s32 handle_table_size);
/**
* Allocates a handle for the given object.
* @return The created Handle or one of the following errors:
* - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded.
*/
ResultVal<Handle> Create(std::shared_ptr<Object> obj);
/**
* Returns a new handle that points to the same object as the passed in handle.
* @return The duplicated Handle or one of the following errors:
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
* - Any errors returned by `Create()`.
*/
ResultVal<Handle> Duplicate(Handle handle);
/**
* Closes a handle, removing it from the table and decreasing the object's ref-count.
* @return `RESULT_SUCCESS` or one of the following errors:
* - `ERR_INVALID_HANDLE`: an invalid handle was passed in.
*/
ResultCode Close(Handle handle);
/// Checks if a handle is valid and points to an existing object.
bool IsValid(Handle handle) const;
/**
* Looks up a handle.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
*/
std::shared_ptr<Object> GetGeneric(Handle handle) const;
/**
* Looks up a handle while verifying its type.
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its
* type differs from the requested one.
*/
template <class T>
std::shared_ptr<T> Get(Handle handle) const {
return DynamicObjectCast<T>(GetGeneric(handle));
}
/// Closes all handles held in this table.
void Clear();
private:
/// Stores the Object referenced by the handle or null if the slot is empty.
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
/**
* The value of `next_generation` when the handle was created, used to check for validity. For
* empty slots, contains the index of the next free slot in the list.
*/
std::array<u16, MAX_COUNT> generations;
/**
* The limited size of the handle table. This can be specified by process
* capabilities in order to restrict the overall number of handles that
* can be created in a process instance
*/
u16 table_size = static_cast<u16>(MAX_COUNT);
/**
* Global counter of the number of created handles. Stored in `generations` when a handle is
* created, and wraps around to 1 when it hits 0x8000.
*/
u16 next_generation = 1;
/// Head of the free slots linked list.
u16 next_free_slot = 0;
/// Underlying kernel instance that this handle table operates under.
KernelCore& kernel;
};
} // namespace Kernel

@ -14,17 +14,16 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/hle/ipc_helpers.h" #include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"
#include "core/memory.h" #include "core/memory.h"
@ -35,28 +34,23 @@ SessionRequestHandler::SessionRequestHandler() = default;
SessionRequestHandler::~SessionRequestHandler() = default; SessionRequestHandler::~SessionRequestHandler() = default;
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) { void SessionRequestHandler::ClientConnected(KServerSession* session) {
server_session->SetHleHandler(shared_from_this()); session->SetHleHandler(shared_from_this());
connected_sessions.push_back(std::move(server_session));
} }
void SessionRequestHandler::ClientDisconnected( void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
const std::shared_ptr<ServerSession>& server_session) { session->SetHleHandler(nullptr);
server_session->SetHleHandler(nullptr);
boost::range::remove_erase(connected_sessions, server_session);
} }
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
std::shared_ptr<ServerSession> server_session, KServerSession* server_session_, KThread* thread_)
std::shared_ptr<KThread> thread) : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} {
: server_session(std::move(server_session)),
thread(std::move(thread)), kernel{kernel}, memory{memory} {
cmd_buf[0] = 0; cmd_buf[0] = 0;
} }
HLERequestContext::~HLERequestContext() = default; HLERequestContext::~HLERequestContext() = default;
void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf,
bool incoming) { bool incoming) {
IPC::RequestParser rp(src_cmdbuf); IPC::RequestParser rp(src_cmdbuf);
command_header = rp.PopRaw<IPC::CommandHeader>(); command_header = rp.PopRaw<IPC::CommandHeader>();
@ -77,12 +71,12 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) { for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
const u32 copy_handle{rp.Pop<Handle>()}; const u32 copy_handle{rp.Pop<Handle>()};
copy_handles.push_back(copy_handle); copy_handles.push_back(copy_handle);
copy_objects.push_back(handle_table.GetGeneric(copy_handle)); copy_objects.push_back(handle_table.GetObject(copy_handle).GetPointerUnsafe());
} }
for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) { for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
const u32 move_handle{rp.Pop<Handle>()}; const u32 move_handle{rp.Pop<Handle>()};
move_handles.push_back(move_handle); move_handles.push_back(move_handle);
move_objects.push_back(handle_table.GetGeneric(move_handle)); move_objects.push_back(handle_table.GetObject(move_handle).GetPointerUnsafe());
} }
} else { } else {
// For responses we just ignore the handles, they're empty and will be populated when // For responses we just ignore the handles, they're empty and will be populated when
@ -169,7 +163,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part. rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
} }
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
u32_le* src_cmdbuf) { u32_le* src_cmdbuf) {
ParseCommandBuffer(handle_table, src_cmdbuf, true); ParseCommandBuffer(handle_table, src_cmdbuf, true);
if (command_header->type == IPC::CommandType::Close) { if (command_header->type == IPC::CommandType::Close) {
@ -223,12 +217,12 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
// for specific values in each of these descriptors. // for specific values in each of these descriptors.
for (auto& object : copy_objects) { for (auto& object : copy_objects) {
ASSERT(object != nullptr); ASSERT(object != nullptr);
dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap(); R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object));
} }
for (auto& object : move_objects) { for (auto& object : move_objects) {
ASSERT(object != nullptr); ASSERT(object != nullptr);
dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap(); R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object));
} }
} }

@ -16,7 +16,8 @@
#include "common/concepts.h" #include "common/concepts.h"
#include "common/swap.h" #include "common/swap.h"
#include "core/hle/ipc.h" #include "core/hle/ipc.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/svc_common.h"
union ResultCode; union ResultCode;
@ -35,13 +36,14 @@ class ServiceFrameworkBase;
namespace Kernel { namespace Kernel {
class Domain; class Domain;
class HandleTable;
class HLERequestContext; class HLERequestContext;
class KernelCore; class KernelCore;
class Process; class KHandleTable;
class ServerSession; class KProcess;
class KServerSession;
class KThread; class KThread;
class KReadableEvent; class KReadableEvent;
class KSession;
class KWritableEvent; class KWritableEvent;
enum class ThreadWakeupReason; enum class ThreadWakeupReason;
@ -71,20 +73,14 @@ public:
* associated ServerSession alive for the duration of the connection. * associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection. * @param server_session Owning pointer to the ServerSession associated with the connection.
*/ */
void ClientConnected(std::shared_ptr<ServerSession> server_session); void ClientConnected(KServerSession* session);
/** /**
* Signals that a client has just disconnected from this HLE handler and releases the * Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession. * associated ServerSession.
* @param server_session ServerSession associated with the connection. * @param server_session ServerSession associated with the connection.
*/ */
void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session); void ClientDisconnected(KServerSession* session);
protected:
/// List of sessions that are connected to this handler.
/// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list
/// for the duration of the connection.
std::vector<std::shared_ptr<ServerSession>> connected_sessions;
}; };
/** /**
@ -109,8 +105,7 @@ protected:
class HLERequestContext { class HLERequestContext {
public: public:
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> session, KServerSession* session, KThread* thread);
std::shared_ptr<KThread> thread);
~HLERequestContext(); ~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request. /// Returns a pointer to the IPC command buffer for this request.
@ -122,12 +117,12 @@ public:
* Returns the session through which this request was made. This can be used as a map key to * Returns the session through which this request was made. This can be used as a map key to
* access per-client data on services. * access per-client data on services.
*/ */
const std::shared_ptr<Kernel::ServerSession>& Session() const { Kernel::KServerSession* Session() {
return server_session; return server_session;
} }
/// Populates this context with data from the requesting process/thread. /// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
u32_le* src_cmdbuf); u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread. /// Writes data from this context back to the requesting process/thread.
@ -218,22 +213,12 @@ public:
return move_handles.at(index); return move_handles.at(index);
} }
template <typename T> void AddMoveObject(KAutoObject* object) {
std::shared_ptr<T> GetCopyObject(std::size_t index) { move_objects.emplace_back(object);
return DynamicObjectCast<T>(copy_objects.at(index));
} }
template <typename T> void AddCopyObject(KAutoObject* object) {
std::shared_ptr<T> GetMoveObject(std::size_t index) { copy_objects.emplace_back(object);
return DynamicObjectCast<T>(move_objects.at(index));
}
void AddMoveObject(std::shared_ptr<Object> object) {
move_objects.emplace_back(std::move(object));
}
void AddCopyObject(std::shared_ptr<Object> object) {
copy_objects.emplace_back(std::move(object));
} }
void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) { void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) {
@ -276,10 +261,6 @@ public:
return *thread; return *thread;
} }
const KThread& GetThread() const {
return *thread;
}
bool IsThreadWaiting() const { bool IsThreadWaiting() const {
return is_thread_waiting; return is_thread_waiting;
} }
@ -287,16 +268,17 @@ public:
private: private:
friend class IPC::ResponseBuilder; friend class IPC::ResponseBuilder;
void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); void ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<Kernel::ServerSession> server_session; Kernel::KServerSession* server_session{};
std::shared_ptr<KThread> thread; KThread* thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly // TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<Handle, 8> move_handles; boost::container::small_vector<Handle, 8> move_handles;
boost::container::small_vector<Handle, 8> copy_handles; boost::container::small_vector<Handle, 8> copy_handles;
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; boost::container::small_vector<KAutoObject*, 8> move_objects;
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; boost::container::small_vector<KAutoObject*, 8> copy_objects;
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
std::optional<IPC::CommandHeader> command_header; std::optional<IPC::CommandHeader> command_header;

@ -0,0 +1,192 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/core.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/memory_types.h"
#include "core/memory.h"
namespace Kernel::Init {
#define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS
#define FOREACH_SLAB_TYPE(HANDLER, ...) \
HANDLER(KProcess, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \
HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
namespace {
#define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME,
enum KSlabType : u32 {
FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER) KSlabType_Count,
};
#undef DEFINE_SLAB_TYPE_ENUM_MEMBER
// Constexpr counts.
constexpr size_t SlabCountKProcess = 80;
constexpr size_t SlabCountKThread = 800;
constexpr size_t SlabCountKEvent = 700;
constexpr size_t SlabCountKInterruptEvent = 100;
constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew.
constexpr size_t SlabCountKSharedMemory = 80;
constexpr size_t SlabCountKTransferMemory = 200;
constexpr size_t SlabCountKCodeMemory = 10;
constexpr size_t SlabCountKDeviceAddressSpace = 300;
constexpr size_t SlabCountKSession = 933;
constexpr size_t SlabCountKLightSession = 100;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
constexpr size_t SlabCountKAlpha = 1;
constexpr size_t SlabCountKBeta = 6;
constexpr size_t SlabCountExtraKThread = 160;
template <typename T>
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
size_t num_objects) {
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
VAddr start = Common::AlignUp(address, alignof(T));
if (size > 0) {
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr);
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size);
}
return start + size;
}
} // namespace
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
return {
.num_KProcess = SlabCountKProcess,
.num_KThread = SlabCountKThread,
.num_KEvent = SlabCountKEvent,
.num_KInterruptEvent = SlabCountKInterruptEvent,
.num_KPort = SlabCountKPort,
.num_KSharedMemory = SlabCountKSharedMemory,
.num_KTransferMemory = SlabCountKTransferMemory,
.num_KCodeMemory = SlabCountKCodeMemory,
.num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace,
.num_KSession = SlabCountKSession,
.num_KLightSession = SlabCountKLightSession,
.num_KObjectName = SlabCountKObjectName,
.num_KResourceLimit = SlabCountKResourceLimit,
.num_KDebug = SlabCountKDebug,
.num_KAlpha = SlabCountKAlpha,
.num_KBeta = SlabCountKBeta,
};
}
void InitializeSlabResourceCounts(KernelCore& kernel) {
kernel.SlabResourceCounts() = KSlabResourceCounts::CreateDefault();
if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) {
kernel.SlabResourceCounts().num_KThread += SlabCountExtraKThread;
}
}
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
size_t size = 0;
#define ADD_SLAB_SIZE(NAME, COUNT, ...) \
{ \
size += alignof(NAME); \
size += Common::AlignUp(sizeof(NAME) * (COUNT), alignof(void*)); \
};
// Add the size required for each slab.
FOREACH_SLAB_TYPE(ADD_SLAB_SIZE)
#undef ADD_SLAB_SIZE
// Add the reserved size.
size += KernelSlabHeapGapsSize;
return size;
}
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
// Get the start of the slab region, since that's where we'll be working.
VAddr address = memory_layout.GetSlabRegionAddress();
// Initialize slab type array to be in sorted order.
std::array<KSlabType, KSlabType_Count> slab_types;
for (size_t i = 0; i < slab_types.size(); i++) {
slab_types[i] = static_cast<KSlabType>(i);
}
// N shuffles the slab type array with the following simple algorithm.
for (size_t i = 0; i < slab_types.size(); i++) {
const size_t rnd = KSystemControl::GenerateRandomRange(0, slab_types.size() - 1);
std::swap(slab_types[i], slab_types[rnd]);
}
// Create an array to represent the gaps between the slabs.
const size_t total_gap_size = KernelSlabHeapGapsSize;
std::array<size_t, slab_types.size()> slab_gaps;
for (size_t i = 0; i < slab_gaps.size(); i++) {
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
// will include it ourselves.
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
}
// Sort the array, so that we can treat differences between values as offsets to the starts of
// slabs.
for (size_t i = 1; i < slab_gaps.size(); i++) {
for (size_t j = i; j > 0 && slab_gaps[j - 1] > slab_gaps[j]; j--) {
std::swap(slab_gaps[j], slab_gaps[j - 1]);
}
}
for (size_t i = 0; i < slab_types.size(); i++) {
// Add the random gap to the address.
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
case KSlabType_##NAME: \
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
break;
// Initialize the slabheap.
switch (slab_types[i]) {
// For each of the slab types, we want to initialize that heap.
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
// If we somehow get an invalid type, abort.
default:
UNREACHABLE();
}
}
}
} // namespace Kernel::Init

@ -0,0 +1,43 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
namespace Core {
class System;
} // namespace Core
namespace Kernel {
class KernelCore;
class KMemoryLayout;
} // namespace Kernel
namespace Kernel::Init {
struct KSlabResourceCounts {
static KSlabResourceCounts CreateDefault();
size_t num_KProcess;
size_t num_KThread;
size_t num_KEvent;
size_t num_KInterruptEvent;
size_t num_KPort;
size_t num_KSharedMemory;
size_t num_KTransferMemory;
size_t num_KCodeMemory;
size_t num_KDeviceAddressSpace;
size_t num_KSession;
size_t num_KLightSession;
size_t num_KObjectName;
size_t num_KResourceLimit;
size_t num_KDebug;
size_t num_KAlpha;
size_t num_KBeta;
};
void InitializeSlabResourceCounts(KernelCore& kernel);
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
} // namespace Kernel::Init

@ -0,0 +1,14 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_auto_object.h"
namespace Kernel {
KAutoObject* KAutoObject::Create(KAutoObject* obj) {
obj->m_ref_count = 1;
return obj;
}
} // namespace Kernel

@ -0,0 +1,306 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <string>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_class_token.h"
namespace Kernel {
class KernelCore;
class KProcess;
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
YUZU_NON_COPYABLE(CLASS); \
YUZU_NON_MOVEABLE(CLASS); \
\
private: \
friend class ::Kernel::KClassTokenGenerator; \
static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \
static constexpr inline const char* const TypeName = #CLASS; \
static constexpr inline ClassTokenType ClassToken() { \
return ::Kernel::ClassToken<CLASS>; \
} \
\
public: \
using BaseClass = BASE_CLASS; \
static constexpr TypeObj GetStaticTypeObj() { \
constexpr ClassTokenType Token = ClassToken(); \
return TypeObj(TypeName, Token); \
} \
static constexpr const char* GetStaticTypeName() { \
return TypeName; \
} \
virtual TypeObj GetTypeObj() const { \
return GetStaticTypeObj(); \
} \
virtual const char* GetTypeName() const { \
return GetStaticTypeName(); \
} \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
class KAutoObject {
protected:
class TypeObj {
public:
constexpr explicit TypeObj(const char* n, ClassTokenType tok)
: m_name(n), m_class_token(tok) {}
constexpr const char* GetName() const {
return m_name;
}
constexpr ClassTokenType GetClassToken() const {
return m_class_token;
}
constexpr bool operator==(const TypeObj& rhs) const {
return this->GetClassToken() == rhs.GetClassToken();
}
constexpr bool operator!=(const TypeObj& rhs) const {
return this->GetClassToken() != rhs.GetClassToken();
}
constexpr bool IsDerivedFrom(const TypeObj& rhs) const {
return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
}
private:
const char* m_name;
ClassTokenType m_class_token;
};
private:
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {}
virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
// Destroy is responsible for destroying the auto object's resources when ref_count hits zero.
virtual void Destroy() {
UNIMPLEMENTED();
}
// Finalize is responsible for cleaning up resource, but does not destroy the object.
virtual void Finalize() {}
virtual KProcess* GetOwner() const {
return nullptr;
}
u32 GetReferenceCount() const {
return m_ref_count.load();
}
bool IsDerivedFrom(const TypeObj& rhs) const {
return this->GetTypeObj().IsDerivedFrom(rhs);
}
bool IsDerivedFrom(const KAutoObject& rhs) const {
return this->IsDerivedFrom(rhs.GetTypeObj());
}
template <typename Derived>
Derived DynamicCast() {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
template <typename Derived>
const Derived DynamicCast() const {
static_assert(std::is_pointer_v<Derived>);
using DerivedType = std::remove_pointer_t<Derived>;
if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
bool Open() {
// Atomically increment the reference count, only if it's positive.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
if (cur_ref_count == 0) {
return false;
}
ASSERT(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1,
std::memory_order_relaxed));
return true;
}
void Close() {
// Atomically decrement the reference count, not allowing it to become negative.
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
std::memory_order_relaxed));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
this->Destroy();
}
}
protected:
KernelCore& kernel;
std::string name;
private:
std::atomic<u32> m_ref_count{};
};
class KAutoObjectWithListContainer;
class KAutoObjectWithList : public KAutoObject {
public:
explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_), kernel(kernel_) {}
static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
const u64 lid = lhs.GetId();
const u64 rid = rhs.GetId();
if (lid < rid) {
return -1;
} else if (lid > rid) {
return 1;
} else {
return 0;
}
}
public:
virtual u64 GetId() const {
return reinterpret_cast<u64>(this);
}
virtual const std::string& GetName() const {
return name;
}
private:
friend class KAutoObjectWithListContainer;
private:
Common::IntrusiveRedBlackTreeNode list_node;
protected:
KernelCore& kernel;
};
template <typename T>
class KScopedAutoObject {
YUZU_NON_COPYABLE(KScopedAutoObject);
public:
constexpr KScopedAutoObject() = default;
constexpr KScopedAutoObject(T* o) : m_obj(o) {
if (m_obj != nullptr) {
m_obj->Open();
}
}
~KScopedAutoObject() {
if (m_obj != nullptr) {
m_obj->Close();
}
m_obj = nullptr;
}
template <typename U>
requires(std::derived_from<T, U> ||
std::derived_from<U, T>) constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
rhs.m_obj = nullptr;
} else {
// Downcast.
T* derived = nullptr;
if (rhs.m_obj != nullptr) {
derived = rhs.m_obj->template DynamicCast<T*>();
if (derived == nullptr) {
rhs.m_obj->Close();
}
}
m_obj = derived;
rhs.m_obj = nullptr;
}
}
constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) {
rhs.Swap(*this);
return *this;
}
constexpr T* operator->() {
return m_obj;
}
constexpr T& operator*() {
return *m_obj;
}
constexpr void Reset(T* o) {
KScopedAutoObject(o).Swap(*this);
}
constexpr T* GetPointerUnsafe() {
return m_obj;
}
constexpr T* GetPointerUnsafe() const {
return m_obj;
}
constexpr T* ReleasePointerUnsafe() {
T* ret = m_obj;
m_obj = nullptr;
return ret;
}
constexpr bool IsNull() const {
return m_obj == nullptr;
}
constexpr bool IsNotNull() const {
return m_obj != nullptr;
}
private:
template <typename U>
friend class KScopedAutoObject;
private:
T* m_obj{};
private:
constexpr void Swap(KScopedAutoObject& rhs) noexcept {
std::swap(m_obj, rhs.m_obj);
}
};
} // namespace Kernel

@ -0,0 +1,28 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_auto_object_container.h"
namespace Kernel {
void KAutoObjectWithListContainer::Register(KAutoObjectWithList* obj) {
KScopedLightLock lk(m_lock);
m_object_list.insert(*obj);
}
void KAutoObjectWithListContainer::Unregister(KAutoObjectWithList* obj) {
KScopedLightLock lk(m_lock);
m_object_list.erase(m_object_list.iterator_to(*obj));
}
size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess* owner) {
KScopedLightLock lk(m_lock);
return std::count_if(m_object_list.begin(), m_object_list.end(),
[&](const auto& obj) { return obj.GetOwner() == owner; });
}
} // namespace Kernel

@ -0,0 +1,70 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
namespace Kernel {
class KernelCore;
class KProcess;
class KAutoObjectWithListContainer {
YUZU_NON_COPYABLE(KAutoObjectWithListContainer);
YUZU_NON_MOVEABLE(KAutoObjectWithListContainer);
public:
using ListType = Common::IntrusiveRedBlackTreeMemberTraits<
&KAutoObjectWithList::list_node>::TreeType<KAutoObjectWithList>;
public:
class ListAccessor : public KScopedLightLock {
public:
explicit ListAccessor(KAutoObjectWithListContainer* container)
: KScopedLightLock(container->m_lock), m_list(container->m_object_list) {}
explicit ListAccessor(KAutoObjectWithListContainer& container)
: KScopedLightLock(container.m_lock), m_list(container.m_object_list) {}
typename ListType::iterator begin() const {
return m_list.begin();
}
typename ListType::iterator end() const {
return m_list.end();
}
typename ListType::iterator find(typename ListType::const_reference ref) const {
return m_list.find(ref);
}
private:
ListType& m_list;
};
friend class ListAccessor;
public:
KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {}
void Initialize() {}
void Finalize() {}
void Register(KAutoObjectWithList* obj);
void Unregister(KAutoObjectWithList* obj);
size_t GetOwnedCount(KProcess* owner);
private:
KLightLock m_lock;
ListType m_object_list;
};
} // namespace Kernel

@ -0,0 +1,133 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_class_token.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/k_writable_event.h"
namespace Kernel {
// Ensure that we generate correct class tokens for all types.
// Ensure that the absolute token values are correct.
static_assert(ClassToken<KAutoObject> == 0b00000000'00000000);
static_assert(ClassToken<KSynchronizationObject> == 0b00000000'00000001);
static_assert(ClassToken<KReadableEvent> == 0b00000000'00000011);
// static_assert(ClassToken<KInterruptEvent> == 0b00000111'00000011);
// static_assert(ClassToken<KDebug> == 0b00001011'00000001);
static_assert(ClassToken<KThread> == 0b00010011'00000001);
static_assert(ClassToken<KServerPort> == 0b00100011'00000001);
static_assert(ClassToken<KServerSession> == 0b01000011'00000001);
static_assert(ClassToken<KClientPort> == 0b10000011'00000001);
static_assert(ClassToken<KClientSession> == 0b00001101'00000000);
static_assert(ClassToken<KProcess> == 0b00010101'00000001);
static_assert(ClassToken<KResourceLimit> == 0b00100101'00000000);
// static_assert(ClassToken<KLightSession> == 0b01000101'00000000);
static_assert(ClassToken<KPort> == 0b10000101'00000000);
static_assert(ClassToken<KSession> == 0b00011001'00000000);
static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000);
static_assert(ClassToken<KEvent> == 0b01001001'00000000);
static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
// static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000);
// static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000);
static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000);
// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
// Ensure that the token hierarchy is correct.
// Base classes
static_assert(ClassToken<KAutoObject> == (0b00000000));
static_assert(ClassToken<KSynchronizationObject> == (0b00000001 | ClassToken<KAutoObject>));
static_assert(ClassToken<KReadableEvent> == (0b00000010 | ClassToken<KSynchronizationObject>));
// Final classes
// static_assert(ClassToken<KInterruptEvent> == ((0b00000111 << 8) | ClassToken<KReadableEvent>));
// static_assert(ClassToken<KDebug> == ((0b00001011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KThread> == ((0b00010011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KServerPort> == ((0b00100011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KServerSession> ==
((0b01000011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KClientPort> == ((0b10000011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KClientSession> == ((0b00001101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KProcess> == ((0b00010101 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KResourceLimit> == ((0b00100101 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KLightSession> == ((0b01000101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
// Ensure that the token hierarchy reflects the class hierarchy.
// Base classes.
static_assert(!std::is_final<KSynchronizationObject>::value &&
std::is_base_of<KAutoObject, KSynchronizationObject>::value);
static_assert(!std::is_final<KReadableEvent>::value &&
std::is_base_of<KSynchronizationObject, KReadableEvent>::value);
// Final classes
// static_assert(std::is_final<KInterruptEvent>::value &&
// std::is_base_of<KReadableEvent, KInterruptEvent>::value);
// static_assert(std::is_final<KDebug>::value &&
// std::is_base_of<KSynchronizationObject, KDebug>::value);
static_assert(std::is_final<KThread>::value &&
std::is_base_of<KSynchronizationObject, KThread>::value);
static_assert(std::is_final<KServerPort>::value &&
std::is_base_of<KSynchronizationObject, KServerPort>::value);
static_assert(std::is_final<KServerSession>::value &&
std::is_base_of<KSynchronizationObject, KServerSession>::value);
static_assert(std::is_final<KClientPort>::value &&
std::is_base_of<KSynchronizationObject, KClientPort>::value);
static_assert(std::is_final<KClientSession>::value &&
std::is_base_of<KAutoObject, KClientSession>::value);
static_assert(std::is_final<KProcess>::value &&
std::is_base_of<KSynchronizationObject, KProcess>::value);
static_assert(std::is_final<KResourceLimit>::value &&
std::is_base_of<KAutoObject, KResourceLimit>::value);
// static_assert(std::is_final<KLightSession>::value &&
// std::is_base_of<KAutoObject, KLightSession>::value);
static_assert(std::is_final<KPort>::value && std::is_base_of<KAutoObject, KPort>::value);
static_assert(std::is_final<KSession>::value && std::is_base_of<KAutoObject, KSession>::value);
static_assert(std::is_final<KSharedMemory>::value &&
std::is_base_of<KAutoObject, KSharedMemory>::value);
static_assert(std::is_final<KEvent>::value && std::is_base_of<KAutoObject, KEvent>::value);
static_assert(std::is_final<KWritableEvent>::value &&
std::is_base_of<KAutoObject, KWritableEvent>::value);
// static_assert(std::is_final<KLightClientSession>::value &&
// std::is_base_of<KAutoObject, KLightClientSession>::value);
// static_assert(std::is_final<KLightServerSession>::value &&
// std::is_base_of<KAutoObject, KLightServerSession>::value);
static_assert(std::is_final<KTransferMemory>::value &&
std::is_base_of<KAutoObject, KTransferMemory>::value);
// static_assert(std::is_final<KDeviceAddressSpace>::value &&
// std::is_base_of<KAutoObject, KDeviceAddressSpace>::value);
// static_assert(std::is_final<KSessionRequest>::value &&
// std::is_base_of<KAutoObject, KSessionRequest>::value);
// static_assert(std::is_final<KCodeMemory>::value &&
// std::is_base_of<KAutoObject, KCodeMemory>::value);
} // namespace Kernel

@ -0,0 +1,131 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
namespace Kernel {
class KAutoObject;
class KClassTokenGenerator {
public:
using TokenBaseType = u16;
public:
static constexpr size_t BaseClassBits = 8;
static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits;
// One bit per base class.
static constexpr size_t NumBaseClasses = BaseClassBits;
// Final classes are permutations of three bits.
static constexpr size_t NumFinalClasses = [] {
TokenBaseType index = 0;
for (size_t i = 0; i < FinalClassBits; i++) {
for (size_t j = i + 1; j < FinalClassBits; j++) {
for (size_t k = j + 1; k < FinalClassBits; k++) {
index++;
}
}
}
return index;
}();
private:
template <TokenBaseType Index>
static constexpr inline TokenBaseType BaseClassToken = 1U << Index;
template <TokenBaseType Index>
static constexpr inline TokenBaseType FinalClassToken = [] {
TokenBaseType index = 0;
for (size_t i = 0; i < FinalClassBits; i++) {
for (size_t j = i + 1; j < FinalClassBits; j++) {
for (size_t k = j + 1; k < FinalClassBits; k++) {
if ((index++) == Index) {
return static_cast<TokenBaseType>(((1ULL << i) | (1ULL << j) | (1ULL << k))
<< BaseClassBits);
}
}
}
}
}();
template <typename T>
static constexpr inline TokenBaseType GetClassToken() {
static_assert(std::is_base_of<KAutoObject, T>::value);
if constexpr (std::is_same<T, KAutoObject>::value) {
static_assert(T::ObjectType == ObjectType::KAutoObject);
return 0;
} else if constexpr (!std::is_final<T>::value) {
static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&
T::ObjectType < ObjectType::BaseClassesEnd);
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
static_cast<TokenBaseType>(ObjectType::BaseClassesStart);
return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
} else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType &&
T::ObjectType < ObjectType::FinalClassesEnd) {
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
static_cast<TokenBaseType>(ObjectType::FinalClassesStart);
return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
} else {
static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type");
}
};
public:
enum class ObjectType {
KAutoObject,
BaseClassesStart,
KSynchronizationObject = BaseClassesStart,
KReadableEvent,
BaseClassesEnd,
FinalClassesStart = BaseClassesEnd,
KInterruptEvent = FinalClassesStart,
KDebug,
KThread,
KServerPort,
KServerSession,
KClientPort,
KClientSession,
KProcess,
KResourceLimit,
KLightSession,
KPort,
KSession,
KSharedMemory,
KEvent,
KWritableEvent,
KLightClientSession,
KLightServerSession,
KTransferMemory,
KDeviceAddressSpace,
KSessionRequest,
KCodeMemory,
// NOTE: True order for these has not been determined yet.
KAlpha,
KBeta,
FinalClassesEnd = FinalClassesStart + NumFinalClasses,
};
template <typename T>
static constexpr inline TokenBaseType ClassToken = GetClassToken<T>();
};
using ClassTokenType = KClassTokenGenerator::TokenBaseType;
template <typename T>
static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>;
} // namespace Kernel

@ -0,0 +1,125 @@
// Copyright 2021 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/scope_exit.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KClientPort::~KClientPort() = default;
void KClientPort::Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_) {
// Set member variables.
num_sessions = 0;
peak_sessions = 0;
parent = parent_;
max_sessions = max_sessions_;
name = std::move(name_);
}
void KClientPort::OnSessionFinalized() {
KScopedSchedulerLock sl{kernel};
const auto prev = num_sessions--;
if (prev == max_sessions) {
this->NotifyAvailable();
}
}
void KClientPort::OnServerClosed() {}
bool KClientPort::IsLight() const {
return this->GetParent()->IsLight();
}
bool KClientPort::IsServerClosed() const {
return this->GetParent()->IsServerClosed();
}
void KClientPort::Destroy() {
// Note with our parent that we're closed.
parent->OnClientClosed();
// Close our reference to our parent.
parent->Close();
}
bool KClientPort::IsSignaled() const {
return num_sessions < max_sessions;
}
ResultCode KClientPort::CreateSession(KClientSession** out) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
LimitableResource::Sessions);
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
// Update the session counts.
{
// Atomically increment the number of sessions.
s32 new_sessions;
{
const auto max = max_sessions;
auto cur_sessions = num_sessions.load(std::memory_order_acquire);
do {
R_UNLESS(cur_sessions < max, ResultOutOfSessions);
new_sessions = cur_sessions + 1;
} while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
std::memory_order_relaxed));
}
// Atomically update the peak session tracking.
{
auto peak = peak_sessions.load(std::memory_order_acquire);
do {
if (peak >= new_sessions) {
break;
}
} while (!peak_sessions.compare_exchange_weak(peak, new_sessions,
std::memory_order_relaxed));
}
}
// Create a new session.
KSession* session = KSession::Create(kernel);
if (session == nullptr) {
/* Decrement the session count. */
const auto prev = num_sessions--;
if (prev == max_sessions) {
this->NotifyAvailable();
}
return ResultOutOfResource;
}
// Initialize the session.
session->Initialize(this, parent->GetName());
// Commit the session reservation.
session_reservation.Commit();
// Register the session.
KSession::Register(kernel, session);
auto session_guard = SCOPE_GUARD({
session->GetClientSession().Close();
session->GetServerSession().Close();
});
// Enqueue the session with our parent.
R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession())));
// We succeeded, so set the output.
session_guard.Cancel();
*out = std::addressof(session->GetClientSession());
return RESULT_SUCCESS;
}
} // namespace Kernel

@ -0,0 +1,61 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
class KClientSession;
class KernelCore;
class KPort;
class KClientPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
public:
explicit KClientPort(KernelCore& kernel);
virtual ~KClientPort() override;
void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_);
void OnSessionFinalized();
void OnServerClosed();
const KPort* GetParent() const {
return parent;
}
s32 GetNumSessions() const {
return num_sessions;
}
s32 GetPeakSessions() const {
return peak_sessions;
}
s32 GetMaxSessions() const {
return max_sessions;
}
bool IsLight() const;
bool IsServerClosed() const;
// Overridden virtual functions.
virtual void Destroy() override;
virtual bool IsSignaled() const override;
ResultCode CreateSession(KClientSession** out);
private:
std::atomic<s32> num_sessions{};
std::atomic<s32> peak_sessions{};
s32 max_sessions{};
KPort* parent{};
};
} // namespace Kernel

@ -0,0 +1,31 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/result.h"
namespace Kernel {
KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KClientSession::~KClientSession() = default;
void KClientSession::Destroy() {
parent->OnClientClosed();
parent->Close();
}
void KClientSession::OnServerClosed() {}
ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) {
// Signal the server session that new data is available
return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
}
} // namespace Kernel

@ -0,0 +1,61 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
union ResultCode;
namespace Core::Memory {
class Memory;
}
namespace Core::Timing {
class CoreTiming;
}
namespace Kernel {
class KernelCore;
class KSession;
class KThread;
class KClientSession final
: public KAutoObjectWithSlabHeapAndContainer<KClientSession, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public:
explicit KClientSession(KernelCore& kernel);
virtual ~KClientSession();
void Initialize(KSession* parent_, std::string&& name_) {
// Set member variables.
parent = parent_;
name = std::move(name_);
}
virtual void Destroy() override;
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
KSession* GetParent() const {
return parent;
}
ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing);
void OnServerClosed();
private:
KSession* parent{};
};
} // namespace Kernel

@ -7,12 +7,13 @@
#include "core/arm/exclusive_monitor.h" #include "core/arm/exclusive_monitor.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_linked_list.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/memory.h" #include "core/memory.h"
@ -107,8 +108,8 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
// Wait for the address. // Wait for the address.
{ {
std::shared_ptr<KThread> owner_thread; KScopedAutoObject<KThread> owner_thread;
ASSERT(!owner_thread); ASSERT(owner_thread.IsNull());
{ {
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(kernel);
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
@ -126,8 +127,10 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
// Get the lock owner thread. // Get the lock owner thread.
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle); owner_thread =
R_UNLESS(owner_thread, ResultInvalidHandle); kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
handle);
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
// Update the lock. // Update the lock.
cur_thread->SetAddressKey(addr, value); cur_thread->SetAddressKey(addr, value);
@ -137,7 +140,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
cur_thread->SetMutexWaitAddressForDebugging(addr); cur_thread->SetMutexWaitAddressForDebugging(addr);
} }
} }
ASSERT(owner_thread); ASSERT(owner_thread.IsNotNull());
} }
// Remove the thread as a waiter from the lock owner. // Remove the thread as a waiter from the lock owner.
@ -176,19 +179,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
KThread* thread_to_close = nullptr; KThread* thread_to_close = nullptr;
if (can_access) { if (can_access) {
if (prev_tag == InvalidHandle) { if (prev_tag == Svc::InvalidHandle) {
// If nobody held the lock previously, we're all good. // If nobody held the lock previously, we're all good.
thread->SetSyncedObject(nullptr, RESULT_SUCCESS); thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
thread->Wakeup(); thread->Wakeup();
} else { } else {
// Get the previous owner. // Get the previous owner.
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>( KThread* owner_thread = kernel.CurrentProcess()
prev_tag & ~Svc::HandleWaitMask); ->GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
.ReleasePointerUnsafe();
if (owner_thread) { if (owner_thread) {
// Add the thread as a waiter on the owner. // Add the thread as a waiter on the owner.
owner_thread->AddWaiter(thread); owner_thread->AddWaiter(thread);
thread_to_close = owner_thread.get(); thread_to_close = owner_thread;
} else { } else {
// The lock was tagged with a thread that doesn't exist. // The lock was tagged with a thread that doesn't exist.
thread->SetSyncedObject(nullptr, ResultInvalidState); thread->SetSyncedObject(nullptr, ResultInvalidState);
@ -208,9 +214,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
// Prepare for signaling. // Prepare for signaling.
constexpr int MaxThreads = 16; constexpr int MaxThreads = 16;
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using KLinkedList<KThread> thread_list{kernel};
// std::shared_ptr.
std::vector<std::shared_ptr<KThread>> thread_list;
std::array<KThread*, MaxThreads> thread_array; std::array<KThread*, MaxThreads> thread_array;
s32 num_to_close{}; s32 num_to_close{};
@ -228,7 +232,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
if (num_to_close < MaxThreads) { if (num_to_close < MaxThreads) {
thread_array[num_to_close++] = thread; thread_array[num_to_close++] = thread;
} else { } else {
thread_list.push_back(SharedFrom(thread)); thread_list.push_back(*thread);
} }
} }
@ -250,8 +254,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
} }
// Close threads in the list. // Close threads in the list.
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { for (auto it = thread_list.begin(); it != thread_list.end();
(*it)->Close(); it = thread_list.erase(kernel, it)) {
(*it).Close();
} }
} }

@ -3,30 +3,53 @@
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/k_resource_limit.h"
namespace Kernel { namespace Kernel {
KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {} KEvent::KEvent(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, readable_event{kernel}, writable_event{kernel} {}
KEvent::~KEvent() = default; KEvent::~KEvent() = default;
std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) { void KEvent::Initialize(std::string&& name_) {
return std::make_shared<KEvent>(kernel, std::move(name)); // Increment reference count.
} // Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both readable and
// writable events are closed this object will be destroyed.
Open();
void KEvent::Initialize() {
// Create our sub events. // Create our sub events.
readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable"); KAutoObject::Create(std::addressof(readable_event));
writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable"); KAutoObject::Create(std::addressof(writable_event));
// Initialize our sub sessions. // Initialize our sub sessions.
readable_event->Initialize(this); readable_event.Initialize(this, name_ + ":Readable");
writable_event->Initialize(this); writable_event.Initialize(this, name_ + ":Writable");
// Set our owner process.
owner = kernel.CurrentProcess();
if (owner) {
owner->Open();
}
// Mark initialized. // Mark initialized.
name = std::move(name_);
initialized = true; initialized = true;
} }
void KEvent::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
}
void KEvent::PostDestroy(uintptr_t arg) {
// Release the event count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
if (owner) {
owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
owner->Close();
}
}
} // namespace Kernel } // namespace Kernel

@ -4,53 +4,54 @@
#pragma once #pragma once
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class KReadableEvent; class KReadableEvent;
class KWritableEvent; class KWritableEvent;
class KProcess;
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
class KEvent final : public Object {
public: public:
explicit KEvent(KernelCore& kernel, std::string&& name); explicit KEvent(KernelCore& kernel);
~KEvent() override; virtual ~KEvent();
static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name); void Initialize(std::string&& name);
void Initialize(); virtual void Finalize() override;
void Finalize() override {} virtual bool IsInitialized() const override {
return initialized;
std::string GetTypeName() const override {
return "KEvent";
} }
static constexpr HandleType HANDLE_TYPE = HandleType::Event; virtual uintptr_t GetPostDestroyArgument() const override {
HandleType GetHandleType() const override { return reinterpret_cast<uintptr_t>(owner);
return HANDLE_TYPE;
} }
std::shared_ptr<KReadableEvent>& GetReadableEvent() { static void PostDestroy(uintptr_t arg);
virtual KProcess* GetOwner() const override {
return owner;
}
KReadableEvent& GetReadableEvent() {
return readable_event; return readable_event;
} }
std::shared_ptr<KWritableEvent>& GetWritableEvent() { KWritableEvent& GetWritableEvent() {
return writable_event;
}
const std::shared_ptr<KReadableEvent>& GetReadableEvent() const {
return readable_event;
}
const std::shared_ptr<KWritableEvent>& GetWritableEvent() const {
return writable_event; return writable_event;
} }
private: private:
std::shared_ptr<KReadableEvent> readable_event; KReadableEvent readable_event;
std::shared_ptr<KWritableEvent> writable_event; KWritableEvent writable_event;
KProcess* owner{};
bool initialized{}; bool initialized{};
}; };

@ -0,0 +1,135 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_handle_table.h"
namespace Kernel {
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
KHandleTable ::~KHandleTable() = default;
ResultCode KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size);
}
// Close and free all entries.
for (size_t i = 0; i < saved_table_size; i++) {
if (KAutoObject* obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
return RESULT_SUCCESS;
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (Svc::IsPseudoHandle(handle)) {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
KScopedSpinLock lk(m_lock);
if (this->IsValidHandle(handle)) {
const auto index = handle_pack.index;
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
// Close the object.
obj->Close();
return true;
}
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
// Allocate entry, set output handle.
{
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].info = {.linear_id = linear_id, .type = type};
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
}
return RESULT_SUCCESS;
}
ResultCode KHandleTable::Reserve(Handle* out_handle) {
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
*out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId());
return RESULT_SUCCESS;
}
void KHandleTable::Unreserve(Handle handle) {
KScopedSpinLock lk(m_lock);
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
if (index < m_table_size) {
// NOTE: This code does not check the linear id.
ASSERT(m_objects[index] == nullptr);
this->FreeEntry(index);
}
}
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
KScopedSpinLock lk(m_lock);
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
if (index < m_table_size) {
// Set the entry.
ASSERT(m_objects[index] == nullptr);
m_entry_infos[index].info = {.linear_id = static_cast<u16>(linear_id), .type = type};
m_objects[index] = obj;
obj->Open();
}
}
} // namespace Kernel

@ -0,0 +1,310 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
class KHandleTable {
YUZU_NON_COPYABLE(KHandleTable);
YUZU_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
public:
explicit KHandleTable(KernelCore& kernel_);
~KHandleTable();
ResultCode Initialize(s32 size) {
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = i - 1;
m_free_head_index = i;
}
return RESULT_SUCCESS;
}
size_t GetTableSize() const {
return m_table_size;
}
size_t GetCount() const {
return m_count;
}
size_t GetMaxCount() const {
return m_max_count;
}
ResultCode Finalize();
bool Remove(Handle handle);
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
KScopedSpinLock lk(m_lock);
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) {
return obj->DynamicCast<T*>();
} else {
return nullptr;
}
}
}
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObject(Handle handle) const {
// Handle pseudo-handles.
if constexpr (std::derived_from<KProcess, T>) {
if (handle == Svc::PseudoHandle::CurrentProcess) {
auto* const cur_process = kernel.CurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == Svc::PseudoHandle::CurrentThread) {
auto* const cur_thread = GetCurrentThreadPointer(kernel);
ASSERT(cur_thread != nullptr);
return cur_thread;
}
}
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
ResultCode Reserve(Handle* out_handle);
void Unreserve(Handle handle);
template <typename T>
ResultCode Add(Handle* out_handle, T* obj) {
static_assert(std::is_base_of_v<KAutoObject, T>);
return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken());
}
template <typename T>
void Register(Handle handle, T* obj) {
static_assert(std::is_base_of_v<KAutoObject, T>);
return this->Register(handle, obj, obj->GetTypeObj().GetClassToken());
}
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
// Try to convert and open all the handles.
size_t num_opened;
{
// Lock the table.
KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
const auto cur_handle = handles[num_opened];
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) {
break;
}
// Open a reference to the current object.
cur_t->Open();
out[num_opened] = cur_t;
}
}
// If we converted every object, succeed.
if (num_opened == num_handles) {
return true;
}
// If we didn't convert entry object, close the ones we opened.
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private:
ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type);
void Register(Handle handle, KAutoObject* obj, u16 type);
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
const auto index = m_free_head_index;
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
void FreeEntry(s32 index) {
ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = m_free_head_index;
m_free_head_index = index;
--m_count;
}
u16 AllocateLinearId() {
const u16 id = m_next_linear_id++;
if (m_next_linear_id > MaxLinearId) {
m_next_linear_id = MinLinearId;
}
return id;
}
bool IsValidHandle(Handle handle) const {
// Unpack the handle.
const auto handle_pack = HandlePack(handle);
const auto raw_value = handle_pack.raw;
const auto index = handle_pack.index;
const auto linear_id = handle_pack.linear_id;
const auto reserved = handle_pack.reserved;
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) {
return false;
}
if (linear_id == 0) {
return false;
}
if (index >= m_table_size) {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) {
return false;
}
return true;
}
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) {
return nullptr;
}
if (this->IsValidHandle(handle)) {
return m_objects[handle_pack.index];
} else {
return nullptr;
}
}
KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const {
// Index must be in bounds.
if (index >= m_table_size) {
return nullptr;
}
// Ensure entry has an object.
if (KAutoObject* obj = m_objects[index]; obj != nullptr) {
*out_handle = EncodeHandle(static_cast<u16>(index), m_entry_infos[index].GetLinearId());
return obj;
} else {
return nullptr;
}
}
private:
union HandlePack {
HandlePack() = default;
HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw;
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
handle.linear_id.Assign(linear_id);
handle.reserved.Assign(0);
return handle.raw;
}
union EntryInfo {
struct {
u16 linear_id;
u16 type;
} info;
s32 next_free_index;
constexpr u16 GetLinearId() const {
return info.linear_id;
}
constexpr u16 GetType() const {
return info.type;
}
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
};
private:
std::array<EntryInfo, MaxTableSize> m_entry_infos{};
std::array<KAutoObject*, MaxTableSize> m_objects{};
s32 m_free_head_index{-1};
u16 m_table_size{};
u16 m_max_count{};
u16 m_next_linear_id{MinLinearId};
u16 m_count{};
mutable KSpinLock m_lock;
KernelCore& kernel;
};
} // namespace Kernel

@ -0,0 +1,238 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <boost/intrusive/list.hpp>
#include "common/assert.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KernelCore;
class KLinkedListNode : public boost::intrusive::list_base_hook<>,
public KSlabAllocated<KLinkedListNode> {
public:
KLinkedListNode() = default;
void Initialize(void* it) {
m_item = it;
}
void* GetItem() const {
return m_item;
}
private:
void* m_item = nullptr;
};
template <typename T>
class KLinkedList : private boost::intrusive::list<KLinkedListNode> {
private:
using BaseList = boost::intrusive::list<KLinkedListNode>;
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
template <bool Const>
class Iterator {
private:
using BaseIterator = BaseList::iterator;
friend class KLinkedList;
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename KLinkedList::value_type;
using difference_type = typename KLinkedList::difference_type;
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
using reference =
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
public:
explicit Iterator(BaseIterator it) : m_base_it(it) {}
pointer GetItem() const {
return static_cast<pointer>(m_base_it->GetItem());
}
bool operator==(const Iterator& rhs) const {
return m_base_it == rhs.m_base_it;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return this->GetItem();
}
reference operator*() const {
return *this->GetItem();
}
Iterator& operator++() {
++m_base_it;
return *this;
}
Iterator& operator--() {
--m_base_it;
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
operator Iterator<true>() const {
return Iterator<true>(m_base_it);
}
private:
BaseIterator m_base_it;
};
public:
constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {}
~KLinkedList() {
// Erase all elements.
for (auto it = this->begin(); it != this->end(); it = this->erase(kernel, it)) {
}
// Ensure we succeeded.
ASSERT(this->empty());
}
// Iterator accessors.
iterator begin() {
return iterator(BaseList::begin());
}
const_iterator begin() const {
return const_iterator(BaseList::begin());
}
iterator end() {
return iterator(BaseList::end());
}
const_iterator end() const {
return const_iterator(BaseList::end());
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
reverse_iterator rbegin() {
return reverse_iterator(this->end());
}
const_reverse_iterator rbegin() const {
return const_reverse_iterator(this->end());
}
reverse_iterator rend() {
return reverse_iterator(this->begin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(this->begin());
}
const_reverse_iterator crbegin() const {
return this->rbegin();
}
const_reverse_iterator crend() const {
return this->rend();
}
// Content management.
using BaseList::empty;
using BaseList::size;
reference back() {
return *(--this->end());
}
const_reference back() const {
return *(--this->end());
}
reference front() {
return *this->begin();
}
const_reference front() const {
return *this->begin();
}
iterator insert(const_iterator pos, reference ref) {
KLinkedListNode* node = KLinkedListNode::Allocate(kernel);
ASSERT(node != nullptr);
node->Initialize(std::addressof(ref));
return iterator(BaseList::insert(pos.m_base_it, *node));
}
void push_back(reference ref) {
this->insert(this->end(), ref);
}
void push_front(reference ref) {
this->insert(this->begin(), ref);
}
void pop_back() {
this->erase(--this->end());
}
void pop_front() {
this->erase(this->begin());
}
iterator erase(KernelCore& kernel, const iterator pos) {
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
iterator ret = iterator(BaseList::erase(pos.m_base_it));
KLinkedListNode::Free(kernel, freed_node);
return ret;
}
private:
KernelCore& kernel;
};
} // namespace Kernel

@ -134,6 +134,10 @@ enum class KMemoryPermission : u8 {
}; };
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>(perm);
}
enum class KMemoryAttribute : u8 { enum class KMemoryAttribute : u8 {
None = 0x00, None = 0x00,
Mask = 0x7F, Mask = 0x7F,

@ -11,11 +11,11 @@
#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_page_linked_list.h" #include "core/hle/kernel/k_page_linked_list.h"
#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_system_control.h" #include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/memory.h" #include "core/memory.h"
@ -420,7 +420,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
remaining_size); remaining_size);
if (!memory_reservation.Succeeded()) { if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
return ResultResourceLimitedExceeded; return ResultLimitReached;
} }
KPageLinkedList page_linked_list; KPageLinkedList page_linked_list;
@ -578,7 +578,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
AddRegionToPages(dst_addr, num_pages, dst_pages); AddRegionToPages(dst_addr, num_pages, dst_pages);
if (!dst_pages.IsEqual(src_pages)) { if (!dst_pages.IsEqual(src_pages)) {
return ResultInvalidMemoryRange; return ResultInvalidMemoryRegion;
} }
{ {
@ -641,6 +641,45 @@ ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, K
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
const std::size_t num_pages{(addr - cur_addr) / PageSize};
if (const auto result{
Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
result.IsError()) {
return result;
}
cur_addr += node.GetNumPages() * PageSize;
}
return RESULT_SUCCESS;
}
ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
KMemoryState state) {
std::lock_guard lock{page_table_lock};
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
return ResultInvalidCurrentMemory;
}
if (IsRegionMapped(addr, num_pages * PageSize)) {
return ResultInvalidCurrentMemory;
}
CASCADE_CODE(UnmapPages(addr, page_linked_list));
block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
return RESULT_SUCCESS;
}
ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size,
KMemoryPermission perm) { KMemoryPermission perm) {
@ -790,7 +829,7 @@ ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) {
if (!memory_reservation.Succeeded()) { if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
return ResultResourceLimitedExceeded; return ResultLimitReached;
} }
KPageLinkedList page_linked_list; KPageLinkedList page_linked_list;
@ -1067,7 +1106,7 @@ constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
} }
} }
constexpr bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
const VAddr end{addr + size}; const VAddr end{addr + size};
const VAddr last{end - 1}; const VAddr last{end - 1};
const VAddr region_start{GetRegionAddress(state)}; const VAddr region_start{GetRegionAddress(state)};

@ -40,6 +40,7 @@ public:
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm); KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm);
KMemoryInfo QueryInfo(VAddr addr); KMemoryInfo QueryInfo(VAddr addr);
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
@ -63,6 +64,8 @@ public:
return page_table_impl; return page_table_impl;
} }
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
private: private:
enum class OperationType : u32 { enum class OperationType : u32 {
Map, Map,
@ -79,6 +82,7 @@ private:
ResultCode InitializeMemoryLayout(VAddr start, VAddr end); ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
KMemoryPermission perm); KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end); void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
bool IsRegionMapped(VAddr address, u64 size); bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const; bool IsRegionContiguous(VAddr addr, u64 size) const;
@ -92,7 +96,6 @@ private:
OperationType operation, PAddr map_addr = 0); OperationType operation, PAddr map_addr = 0);
constexpr VAddr GetRegionAddress(KMemoryState state) const; constexpr VAddr GetRegionAddress(KMemoryState state) const;
constexpr std::size_t GetRegionSize(KMemoryState state) const; constexpr std::size_t GetRegionSize(KMemoryState state) const;
constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask, KMemoryState state, KMemoryPermission perm_mask,
@ -216,8 +219,6 @@ public:
constexpr PAddr GetPhysicalAddr(VAddr addr) { constexpr PAddr GetPhysicalAddr(VAddr addr) {
return page_table_impl.backing_addr[addr >> PageBits] + addr; return page_table_impl.backing_addr[addr >> PageBits] + addr;
} }
private:
constexpr bool Contains(VAddr addr) const { constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1; return address_space_start <= addr && addr <= address_space_end - 1;
} }
@ -225,6 +226,8 @@ private:
return address_space_start <= addr && addr < addr + size && return address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= address_space_end - 1; addr + size - 1 <= address_space_end - 1;
} }
private:
constexpr bool IsKernel() const { constexpr bool IsKernel() const {
return is_kernel; return is_kernel;
} }

@ -0,0 +1,68 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KPort::KPort(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {}
KPort::~KPort() = default;
void KPort::Initialize(s32 max_sessions_, bool is_light_, const std::string& name_) {
// Open a new reference count to the initialized port.
Open();
// Create and initialize our server/client pair.
KAutoObject::Create(std::addressof(server));
KAutoObject::Create(std::addressof(client));
server.Initialize(this, name_ + ":Server");
client.Initialize(this, max_sessions_, name_ + ":Client");
// Set our member variables.
is_light = is_light_;
name = name_;
state = State::Normal;
}
void KPort::OnClientClosed() {
KScopedSchedulerLock sl{kernel};
if (state == State::Normal) {
state = State::ClientClosed;
}
}
void KPort::OnServerClosed() {
KScopedSchedulerLock sl{kernel};
if (state == State::Normal) {
state = State::ServerClosed;
}
}
bool KPort::IsServerClosed() const {
KScopedSchedulerLock sl{kernel};
return state == State::ServerClosed;
}
ResultCode KPort::EnqueueSession(KServerSession* session) {
KScopedSchedulerLock sl{kernel};
R_UNLESS(state == State::Normal, ResultPortClosed);
if (server.HasHLEHandler()) {
server.GetHLEHandler()->ClientConnected(session);
} else {
server.EnqueueSession(session);
}
return RESULT_SUCCESS;
}
} // namespace Kernel

@ -0,0 +1,69 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Kernel {
class KServerSession;
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public:
explicit KPort(KernelCore& kernel);
virtual ~KPort();
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
void Initialize(s32 max_sessions_, bool is_light_, const std::string& name_);
void OnClientClosed();
void OnServerClosed();
bool IsLight() const {
return is_light;
}
bool IsServerClosed() const;
ResultCode EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return client;
}
KServerPort& GetServerPort() {
return server;
}
const KClientPort& GetClientPort() const {
return client;
}
const KServerPort& GetServerPort() const {
return server;
}
private:
enum class State : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
private:
KServerPort server;
KClientPort client;
State state{State::Invalid};
bool is_light{};
};
} // namespace Kernel

@ -17,13 +17,14 @@
#include "core/hle/kernel/code_set.h" #include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_slab_heap.h" #include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/lock.h" #include "core/hle/lock.h"
#include "core/memory.h" #include "core/memory.h"
@ -37,17 +38,20 @@ namespace {
* @param owner_process The parent process for the main thread * @param owner_process The parent process for the main thread
* @param priority The priority to give the main thread * @param priority The priority to give the main thread
*/ */
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
auto thread_res =
KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0,
owner_process.GetIdealCoreId(), stack_top, &owner_process);
std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap(); KThread* thread = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
owner_process.GetIdealCoreId(), &owner_process)
.IsSuccess());
// Register 1 must be a handle to the main thread // Register 1 must be a handle to the main thread
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); Handle thread_handle{};
owner_process.GetHandleTable().Add(&thread_handle, thread);
thread->SetName("main");
thread->GetContext32().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[0] = 0;
thread->GetContext64().cpu_registers[0] = 0; thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext32().cpu_registers[1] = thread_handle;
@ -114,10 +118,10 @@ private:
std::bitset<num_slot_entries> is_slot_used; std::bitset<num_slot_entries> is_slot_used;
}; };
std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) { ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string name,
ProcessType type) {
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
std::shared_ptr<Process> process = std::make_shared<Process>(system);
process->name = std::move(name); process->name = std::move(name);
process->resource_limit = kernel.GetSystemResourceLimit(); process->resource_limit = kernel.GetSystemResourceLimit();
@ -126,6 +130,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
: kernel.CreateNewUserProcessID(); : kernel.CreateNewUserProcessID();
process->capabilities.InitializeForMetadatalessProcess(); process->capabilities.InitializeForMetadatalessProcess();
process->is_initialized = true;
std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
std::uniform_int_distribution<u64> distribution; std::uniform_int_distribution<u64> distribution;
@ -133,14 +138,18 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
[&] { return distribution(rng); }); [&] { return distribution(rng); });
kernel.AppendNewProcess(process); kernel.AppendNewProcess(process);
return process;
// Open a reference to the resource limit.
process->resource_limit->Open();
return RESULT_SUCCESS;
} }
std::shared_ptr<KResourceLimit> Process::GetResourceLimit() const { KResourceLimit* KProcess::GetResourceLimit() const {
return resource_limit; return resource_limit;
} }
void Process::IncrementThreadCount() { void KProcess::IncrementThreadCount() {
ASSERT(num_threads >= 0); ASSERT(num_threads >= 0);
num_created_threads++; num_created_threads++;
@ -149,7 +158,7 @@ void Process::IncrementThreadCount() {
} }
} }
void Process::DecrementThreadCount() { void KProcess::DecrementThreadCount() {
ASSERT(num_threads > 0); ASSERT(num_threads > 0);
if (const auto count = --num_threads; count == 0) { if (const auto count = --num_threads; count == 0) {
@ -157,31 +166,34 @@ void Process::DecrementThreadCount() {
} }
} }
u64 Process::GetTotalPhysicalMemoryAvailable() const { u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
main_thread_stack_size}; main_thread_stack_size};
ASSERT(capacity == kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application)); if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) {
LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
}
if (capacity < memory_usage_capacity) { if (capacity < memory_usage_capacity) {
return capacity; return capacity;
} }
return memory_usage_capacity; return memory_usage_capacity;
} }
u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
} }
u64 Process::GetTotalPhysicalMemoryUsed() const { u64 KProcess::GetTotalPhysicalMemoryUsed() const {
return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() + return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
GetSystemResourceSize(); GetSystemResourceSize();
} }
u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
} }
bool Process::ReleaseUserException(KThread* thread) { bool KProcess::ReleaseUserException(KThread* thread) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
if (exception_thread == thread) { if (exception_thread == thread) {
@ -206,7 +218,7 @@ bool Process::ReleaseUserException(KThread* thread) {
} }
} }
void Process::PinCurrentThread() { void KProcess::PinCurrentThread() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread. // Get the current thread.
@ -221,7 +233,7 @@ void Process::PinCurrentThread() {
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(kernel);
} }
void Process::UnpinCurrentThread() { void KProcess::UnpinCurrentThread() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread. // Get the current thread.
@ -236,15 +248,39 @@ void Process::UnpinCurrentThread() {
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(kernel);
} }
void Process::RegisterThread(const KThread* thread) { ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(state_lock);
// TODO(bunnei): Manage KSharedMemoryInfo list here.
// Open a reference to the shared memory.
shmem->Open();
return RESULT_SUCCESS;
}
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(state_lock);
// TODO(bunnei): Manage KSharedMemoryInfo list here.
// Close a reference to the shared memory.
shmem->Close();
}
void KProcess::RegisterThread(const KThread* thread) {
thread_list.push_back(thread); thread_list.push_back(thread);
} }
void Process::UnregisterThread(const KThread* thread) { void KProcess::UnregisterThread(const KThread* thread) {
thread_list.remove(thread); thread_list.remove(thread);
} }
ResultCode Process::Reset() { ResultCode KProcess::Reset() {
// Lock the process and the scheduler. // Lock the process and the scheduler.
KScopedLightLock lk(state_lock); KScopedLightLock lk(state_lock);
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{kernel};
@ -258,7 +294,7 @@ ResultCode Process::Reset() {
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
std::size_t code_size) { std::size_t code_size) {
program_id = metadata.GetTitleID(); program_id = metadata.GetTitleID();
ideal_core = metadata.GetMainThreadCore(); ideal_core = metadata.GetMainThreadCore();
@ -271,7 +307,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
if (!memory_reservation.Succeeded()) { if (!memory_reservation.Succeeded()) {
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
code_size + system_resource_size); code_size + system_resource_size);
return ResultResourceLimitedExceeded; return ResultLimitReached;
} }
// Initialize proces address space // Initialize proces address space
if (const ResultCode result{ if (const ResultCode result{
@ -318,10 +354,10 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
tls_region_address = CreateTLSRegion(); tls_region_address = CreateTLSRegion();
memory_reservation.Commit(); memory_reservation.Commit();
return handle_table.SetSize(capabilities.GetHandleTableSize()); return handle_table.Initialize(capabilities.GetHandleTableSize());
} }
void Process::Run(s32 main_thread_priority, u64 stack_size) { void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
AllocateMainThreadStack(stack_size); AllocateMainThreadStack(stack_size);
resource_limit->Reserve(LimitableResource::Threads, 1); resource_limit->Reserve(LimitableResource::Threads, 1);
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
@ -331,18 +367,18 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
ChangeStatus(ProcessStatus::Running); ChangeStatus(ProcessStatus::Running);
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
} }
void Process::PrepareForTermination() { void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting); ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) { const auto stop_threads = [this](const std::vector<KThread*>& thread_list) {
for (auto& thread : thread_list) { for (auto& thread : thread_list) {
if (thread->GetOwnerProcess() != this) if (thread->GetOwnerProcess() != this)
continue; continue;
if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) if (thread == kernel.CurrentScheduler()->GetCurrentThread())
continue; continue;
// TODO(Subv): When are the other running/ready threads terminated? // TODO(Subv): When are the other running/ready threads terminated?
@ -353,7 +389,7 @@ void Process::PrepareForTermination() {
} }
}; };
stop_threads(system.GlobalSchedulerContext().GetThreadList()); stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
FreeTLSRegion(tls_region_address); FreeTLSRegion(tls_region_address);
tls_region_address = 0; tls_region_address = 0;
@ -366,6 +402,16 @@ void Process::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exited); ChangeStatus(ProcessStatus::Exited);
} }
void KProcess::Finalize() {
// Release memory to the resource limit.
if (resource_limit != nullptr) {
resource_limit->Close();
}
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize();
}
/** /**
* Attempts to find a TLS page that contains a free slot for * Attempts to find a TLS page that contains a free slot for
* use by a thread. * use by a thread.
@ -379,8 +425,8 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
[](const auto& page) { return page.HasAvailableSlots(); }); [](const auto& page) { return page.HasAvailableSlots(); });
} }
VAddr Process::CreateTLSRegion() { VAddr KProcess::CreateTLSRegion() {
KScopedSchedulerLock lock(system.Kernel()); KScopedSchedulerLock lock(kernel);
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
tls_page_iter != tls_pages.cend()) { tls_page_iter != tls_pages.cend()) {
return *tls_page_iter->ReserveSlot(); return *tls_page_iter->ReserveSlot();
@ -391,7 +437,7 @@ VAddr Process::CreateTLSRegion() {
const VAddr start{page_table->GetKernelMapRegionStart()}; const VAddr start{page_table->GetKernelMapRegionStart()};
const VAddr size{page_table->GetKernelMapRegionEnd() - start}; const VAddr size{page_table->GetKernelMapRegionEnd() - start};
const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
const VAddr tls_page_addr{page_table const VAddr tls_page_addr{page_table
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize, ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
KMemoryState::ThreadLocal, KMemoryState::ThreadLocal,
@ -410,8 +456,8 @@ VAddr Process::CreateTLSRegion() {
return *reserve_result; return *reserve_result;
} }
void Process::FreeTLSRegion(VAddr tls_address) { void KProcess::FreeTLSRegion(VAddr tls_address) {
KScopedSchedulerLock lock(system.Kernel()); KScopedSchedulerLock lock(kernel);
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
auto iter = auto iter =
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
@ -425,33 +471,34 @@ void Process::FreeTLSRegion(VAddr tls_address) {
iter->ReleaseSlot(tls_address); iter->ReleaseSlot(tls_address);
} }
void Process::LoadModule(CodeSet code_set, VAddr base_addr) { void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
std::lock_guard lock{HLE::g_hle_lock}; std::lock_guard lock{HLE::g_hle_lock};
const auto ReprotectSegment = [&](const CodeSet::Segment& segment, const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
KMemoryPermission permission) { KMemoryPermission permission) {
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
}; };
system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size()); kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
code_set.memory.size());
ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute); ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute);
ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read); ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read);
ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite); ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite);
} }
bool Process::IsSignaled() const { bool KProcess::IsSignaled() const {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(kernel.GlobalSchedulerContext().IsLocked());
return is_signaled; return is_signaled;
} }
Process::Process(Core::System& system) KProcess::KProcess(KernelCore& kernel)
: KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<KPageTable>(system)}, : KAutoObjectWithSlabHeapAndContainer{kernel},
handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system}, page_table{std::make_unique<KPageTable>(kernel.System())}, handle_table{kernel},
state_lock{system.Kernel()}, system{system} {} address_arbiter{kernel.System()}, condition_var{kernel.System()}, state_lock{kernel} {}
Process::~Process() = default; KProcess::~KProcess() = default;
void Process::ChangeStatus(ProcessStatus new_status) { void KProcess::ChangeStatus(ProcessStatus new_status) {
if (status == new_status) { if (status == new_status) {
return; return;
} }
@ -461,7 +508,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
NotifyAvailable(); NotifyAvailable();
} }
ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size); ASSERT(stack_size);
// The kernel always ensures that the given stack size is page aligned. // The kernel always ensures that the given stack size is page aligned.

@ -11,11 +11,13 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_address_arbiter.h" #include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace Core { namespace Core {
@ -60,10 +62,13 @@ enum class ProcessStatus {
DebugBreak, DebugBreak,
}; };
class Process final : public KSynchronizationObject { class KProcess final
: public KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject> {
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public: public:
explicit Process(Core::System& system); explicit KProcess(KernelCore& kernel);
~Process() override; ~KProcess() override;
enum : u64 { enum : u64 {
/// Lowest allowed process ID for a kernel initial process. /// Lowest allowed process ID for a kernel initial process.
@ -85,21 +90,9 @@ public:
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
static std::shared_ptr<Process> Create(Core::System& system, std::string name, static ResultCode Initialize(KProcess* process, Core::System& system, std::string name,
ProcessType type); ProcessType type);
std::string GetTypeName() const override {
return "Process";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Process;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/// Gets a reference to the process' page table. /// Gets a reference to the process' page table.
KPageTable& PageTable() { KPageTable& PageTable() {
return *page_table; return *page_table;
@ -111,12 +104,12 @@ public:
} }
/// Gets a reference to the process' handle table. /// Gets a reference to the process' handle table.
HandleTable& GetHandleTable() { KHandleTable& GetHandleTable() {
return handle_table; return handle_table;
} }
/// Gets a const reference to the process' handle table. /// Gets a const reference to the process' handle table.
const HandleTable& GetHandleTable() const { const KHandleTable& GetHandleTable() const {
return handle_table; return handle_table;
} }
@ -167,7 +160,7 @@ public:
} }
/// Gets the resource limit descriptor for this process /// Gets the resource limit descriptor for this process
std::shared_ptr<KResourceLimit> GetResourceLimit() const; KResourceLimit* GetResourceLimit() const;
/// Gets the ideal CPU core ID for this process /// Gets the ideal CPU core ID for this process
u8 GetIdealCoreId() const { u8 GetIdealCoreId() const {
@ -338,9 +331,19 @@ public:
void LoadModule(CodeSet code_set, VAddr base_addr); void LoadModule(CodeSet code_set, VAddr base_addr);
bool IsSignaled() const override; virtual bool IsInitialized() const override {
return is_initialized;
}
void Finalize() override {} static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
virtual void Finalize();
virtual u64 GetId() const override final {
return GetProcessID();
}
virtual bool IsSignaled() const override;
void PinCurrentThread(); void PinCurrentThread();
void UnpinCurrentThread(); void UnpinCurrentThread();
@ -349,6 +352,9 @@ public:
return state_lock; return state_lock;
} }
ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
/////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management // Thread-local storage management
@ -399,7 +405,7 @@ private:
u32 system_resource_size = 0; u32 system_resource_size = 0;
/// Resource limit descriptor for this process /// Resource limit descriptor for this process
std::shared_ptr<KResourceLimit> resource_limit; KResourceLimit* resource_limit{};
/// The ideal CPU core for this process, threads are scheduled on this core by default. /// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0; u8 ideal_core = 0;
@ -423,7 +429,7 @@ private:
u64 total_process_running_time_ticks = 0; u64 total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in. /// Per-process handle table for storing created object handles in.
HandleTable handle_table; KHandleTable handle_table;
/// Per-process address arbiter. /// Per-process address arbiter.
KAddressArbiter address_arbiter; KAddressArbiter address_arbiter;
@ -454,14 +460,12 @@ private:
/// Process total image size /// Process total image size
std::size_t image_size{}; std::size_t image_size{};
/// Name of this process
std::string name;
/// Schedule count of this process /// Schedule count of this process
s64 schedule_count{}; s64 schedule_count{};
bool is_signaled{}; bool is_signaled{};
bool is_suspended{}; bool is_suspended{};
bool is_initialized{};
std::atomic<s32> num_created_threads{}; std::atomic<s32> num_created_threads{};
std::atomic<u16> num_threads{}; std::atomic<u16> num_threads{};
@ -474,9 +478,6 @@ private:
KThread* exception_thread{}; KThread* exception_thread{};
KLightLock state_lock; KLightLock state_lock;
/// System context
Core::System& system;
}; };
} // namespace Kernel } // namespace Kernel

@ -2,21 +2,18 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <algorithm>
#include "common/assert.h" #include "common/assert.h"
#include "common/common_funcs.h" #include "core/hle/kernel/k_event.h"
#include "common/logging/log.h"
#include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
namespace Kernel { namespace Kernel {
KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name) KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
: KSynchronizationObject{kernel, std::move(name)} {}
KReadableEvent::~KReadableEvent() = default; KReadableEvent::~KReadableEvent() = default;
bool KReadableEvent::IsSignaled() const { bool KReadableEvent::IsSignaled() const {
@ -25,6 +22,12 @@ bool KReadableEvent::IsSignaled() const {
return is_signaled; return is_signaled;
} }
void KReadableEvent::Destroy() {
if (parent) {
parent->Close();
}
}
ResultCode KReadableEvent::Signal() { ResultCode KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel}; KScopedSchedulerLock lk{kernel};

@ -4,8 +4,9 @@
#pragma once #pragma once
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace Kernel { namespace Kernel {
@ -13,31 +14,25 @@ namespace Kernel {
class KernelCore; class KernelCore;
class KEvent; class KEvent;
class KReadableEvent final : public KSynchronizationObject { class KReadableEvent : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
public: public:
explicit KReadableEvent(KernelCore& kernel, std::string&& name); explicit KReadableEvent(KernelCore& kernel);
~KReadableEvent() override; ~KReadableEvent() override;
std::string GetTypeName() const override { void Initialize(KEvent* parent_, std::string&& name_) {
return "KReadableEvent"; is_signaled = false;
} parent = parent_;
name = std::move(name_);
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
} }
KEvent* GetParent() const { KEvent* GetParent() const {
return parent; return parent;
} }
void Initialize(KEvent* parent_) { virtual bool IsSignaled() const override;
is_signaled = false; virtual void Destroy() override;
parent = parent_;
}
bool IsSignaled() const override;
void Finalize() override {}
ResultCode Signal(); ResultCode Signal();
ResultCode Clear(); ResultCode Clear();

@ -10,10 +10,16 @@
namespace Kernel { namespace Kernel {
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
KResourceLimit::KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_) KResourceLimit::KResourceLimit(KernelCore& kernel)
: Object{kernel}, lock{kernel}, cond_var{kernel}, core_timing(core_timing_) {} : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {}
KResourceLimit::~KResourceLimit() = default; KResourceLimit::~KResourceLimit() = default;
void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {
core_timing = core_timing_;
}
void KResourceLimit::Finalize() {}
s64 KResourceLimit::GetLimitValue(LimitableResource which) const { s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which); const auto index = static_cast<std::size_t>(which);
s64 value{}; s64 value{};
@ -78,7 +84,7 @@ ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
} }
bool KResourceLimit::Reserve(LimitableResource which, s64 value) { bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
return Reserve(which, value, core_timing.GetGlobalTimeNs().count() + DefaultTimeout); return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout);
} }
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) { bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
@ -109,7 +115,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
} }
if (current_hints[index] + value <= limit_values[index] && if (current_hints[index] + value <= limit_values[index] &&
(timeout < 0 || core_timing.GetGlobalTimeNs().count() < timeout)) { (timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) {
waiter_count++; waiter_count++;
cond_var.Wait(&lock, timeout); cond_var.Wait(&lock, timeout);
waiter_count--; waiter_count--;

@ -8,7 +8,6 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/k_light_condition_variable.h" #include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/object.h"
union ResultCode; union ResultCode;
@ -32,10 +31,16 @@ constexpr bool IsValidResourceType(LimitableResource type) {
return type < LimitableResource::Count; return type < LimitableResource::Count;
} }
class KResourceLimit final : public Object { class KResourceLimit final
: public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
public: public:
explicit KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_); explicit KResourceLimit(KernelCore& kernel);
~KResourceLimit(); virtual ~KResourceLimit();
void Initialize(const Core::Timing::CoreTiming* core_timing_);
virtual void Finalize() override;
s64 GetLimitValue(LimitableResource which) const; s64 GetLimitValue(LimitableResource which) const;
s64 GetCurrentValue(LimitableResource which) const; s64 GetCurrentValue(LimitableResource which) const;
@ -49,19 +54,7 @@ public:
void Release(LimitableResource which, s64 value); void Release(LimitableResource which, s64 value);
void Release(LimitableResource which, s64 value, s64 hint); void Release(LimitableResource which, s64 value, s64 hint);
std::string GetTypeName() const override { static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
return "KResourceLimit";
}
std::string GetName() const override {
return GetTypeName();
}
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
virtual void Finalize() override {}
private: private:
using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>; using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
@ -72,6 +65,6 @@ private:
mutable KLightLock lock; mutable KLightLock lock;
s32 waiter_count{}; s32 waiter_count{};
KLightConditionVariable cond_var; KLightConditionVariable cond_var;
const Core::Timing::CoreTiming& core_timing; const Core::Timing::CoreTiming* core_timing{};
}; };
} // namespace Kernel } // namespace Kernel

@ -15,12 +15,12 @@
#include "core/core.h" #include "core/core.h"
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/cpu_manager.h" #include "core/cpu_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"
namespace Kernel { namespace Kernel {
@ -71,7 +71,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
} }
if (state.should_count_idle) { if (state.should_count_idle) {
if (highest_thread != nullptr) { if (highest_thread != nullptr) {
if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
process->SetRunningThread(core_id, highest_thread, state.idle_count); process->SetRunningThread(core_id, highest_thread, state.idle_count);
} }
} else { } else {
@ -104,7 +104,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
if (top_thread != nullptr) { if (top_thread != nullptr) {
// If the thread has no waiters, we need to check if the process has a thread pinned. // If the thread has no waiters, we need to check if the process has a thread pinned.
if (top_thread->GetNumKernelWaiters() == 0) { if (top_thread->GetNumKernelWaiters() == 0) {
if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
pinned != nullptr && pinned != top_thread) { pinned != nullptr && pinned != top_thread) {
// We prefer our parent's pinned thread if possible. However, we also don't // We prefer our parent's pinned thread if possible. However, we also don't
@ -411,7 +411,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Get the current thread and process. // Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel); KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess(); KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do. // If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@ -450,7 +450,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Get the current thread and process. // Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel); KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess(); KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do. // If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@ -538,7 +538,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Get the current thread and process. // Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel); KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess(); KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do. // If the thread's yield count matches, there's nothing for us to do.
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
@ -617,7 +617,12 @@ KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core
state.highest_priority_thread = nullptr; state.highest_priority_thread = nullptr;
} }
KScheduler::~KScheduler() = default; KScheduler::~KScheduler() {
if (idle_thread) {
idle_thread->Close();
idle_thread = nullptr;
}
}
KThread* KScheduler::GetCurrentThread() const { KThread* KScheduler::GetCurrentThread() const {
if (auto result = current_thread.load(); result) { if (auto result = current_thread.load(); result) {
@ -719,7 +724,7 @@ void KScheduler::ScheduleImpl() {
current_thread.store(next_thread); current_thread.store(next_thread);
Process* const previous_process = system.Kernel().CurrentProcess(); KProcess* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process); UpdateLastContextSwitchTime(previous_thread, previous_process);
@ -775,7 +780,7 @@ void KScheduler::SwitchToCurrent() {
} }
} }
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) { void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) {
const u64 prev_switch_ticks = last_context_switch_time; const u64 prev_switch_ticks = last_context_switch_time;
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
@ -792,13 +797,9 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process)
} }
void KScheduler::Initialize() { void KScheduler::Initialize() {
std::string name = "Idle Thread Id:" + std::to_string(core_id); idle_thread = KThread::Create(system.Kernel());
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
auto thread_res = KThread::CreateThread(
system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0,
static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter);
idle_thread = thread_res.Unwrap().get();
} }
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)

@ -24,7 +24,7 @@ class System;
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class Process; class KProcess;
class SchedulerLock; class SchedulerLock;
class KThread; class KThread;
@ -165,7 +165,7 @@ private:
* most recent tick count retrieved. No special arithmetic is * most recent tick count retrieved. No special arithmetic is
* applied to it. * applied to it.
*/ */
void UpdateLastContextSwitchTime(KThread* thread, Process* process); void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
static void OnSwitch(void* this_scheduler); static void OnSwitch(void* this_scheduler);
void SwitchToCurrent(); void SwitchToCurrent();
@ -173,12 +173,12 @@ private:
KThread* prev_thread{}; KThread* prev_thread{};
std::atomic<KThread*> current_thread{}; std::atomic<KThread*> current_thread{};
KThread* idle_thread; KThread* idle_thread{};
std::shared_ptr<Common::Fiber> switch_fiber{}; std::shared_ptr<Common::Fiber> switch_fiber{};
struct SchedulingState { struct SchedulingState {
std::atomic<bool> needs_scheduling; std::atomic<bool> needs_scheduling{};
bool interrupt_task_thread_runnable{}; bool interrupt_task_thread_runnable{};
bool should_count_idle{}; bool should_count_idle{};
u64 idle_count{}; u64 idle_count{};

@ -8,15 +8,14 @@
#pragma once #pragma once
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/process.h"
namespace Kernel { namespace Kernel {
class KScopedResourceReservation { class KScopedResourceReservation {
public: public:
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r, explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout)
s64 v, s64 timeout)
: resource_limit(std::move(l)), value(v), resource(r) { : resource_limit(std::move(l)), value(v), resource(r) {
if (resource_limit && value) { if (resource_limit && value) {
success = resource_limit->Reserve(resource, value, timeout); success = resource_limit->Reserve(resource, value, timeout);
@ -25,8 +24,7 @@ public:
} }
} }
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r, explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1)
s64 v = 1)
: resource_limit(std::move(l)), value(v), resource(r) { : resource_limit(std::move(l)), value(v), resource(r) {
if (resource_limit && value) { if (resource_limit && value) {
success = resource_limit->Reserve(resource, value); success = resource_limit->Reserve(resource, value);
@ -35,10 +33,10 @@ public:
} }
} }
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t) explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v, s64 t)
: KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {} : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1) explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v = 1)
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {} : KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
~KScopedResourceReservation() noexcept { ~KScopedResourceReservation() noexcept {
@ -58,7 +56,7 @@ public:
} }
private: private:
std::shared_ptr<KResourceLimit> resource_limit; KResourceLimit* resource_limit{};
s64 value; s64 value;
LimitableResource resource; LimitableResource resource;
bool success; bool success;

@ -8,7 +8,7 @@
#pragma once #pragma once
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"

@ -0,0 +1,104 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <tuple>
#include "common/assert.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KServerPort::~KServerPort() = default;
void KServerPort::Initialize(KPort* parent_, std::string&& name_) {
// Set member variables.
parent = parent_;
name = std::move(name_);
}
bool KServerPort::IsLight() const {
return this->GetParent()->IsLight();
}
void KServerPort::CleanupSessions() {
// Ensure our preconditions are met.
if (this->IsLight()) {
UNIMPLEMENTED();
}
// Cleanup the session list.
while (true) {
// Get the last session in the list
KServerSession* session = nullptr;
{
KScopedSchedulerLock sl{kernel};
if (!session_list.empty()) {
session = std::addressof(session_list.front());
session_list.pop_front();
}
}
// Close the session.
if (session != nullptr) {
session->Close();
} else {
break;
}
}
}
void KServerPort::Destroy() {
// Note with our parent that we're closed.
parent->OnServerClosed();
// Perform necessary cleanup of our session lists.
this->CleanupSessions();
// Close our reference to our parent.
parent->Close();
}
bool KServerPort::IsSignaled() const {
if (this->IsLight()) {
UNIMPLEMENTED();
return false;
} else {
return !session_list.empty();
}
}
void KServerPort::EnqueueSession(KServerSession* session) {
ASSERT(!this->IsLight());
KScopedSchedulerLock sl{kernel};
// Add the session to our queue.
session_list.push_back(*session);
if (session_list.size() == 1) {
this->NotifyAvailable();
}
}
KServerSession* KServerPort::AcceptSession() {
ASSERT(!this->IsLight());
KScopedSchedulerLock sl{kernel};
// Return the first session in the list.
if (session_list.empty()) {
return nullptr;
}
KServerSession* session = std::addressof(session_list.front());
session_list.pop_front();
return session;
}
} // namespace Kernel

@ -0,0 +1,80 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <boost/intrusive/list.hpp>
#include "common/common_types.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
class KPort;
class SessionRequestHandler;
class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
private:
using SessionList = boost::intrusive::list<KServerSession>;
public:
explicit KServerPort(KernelCore& kernel);
virtual ~KServerPort() override;
using HLEHandler = std::shared_ptr<SessionRequestHandler>;
void Initialize(KPort* parent_, std::string&& name_);
/// Whether or not this server port has an HLE handler available.
bool HasHLEHandler() const {
return hle_handler != nullptr;
}
/// Gets the HLE handler for this port.
HLEHandler GetHLEHandler() const {
return hle_handler;
}
/**
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
void SetHleHandler(HLEHandler hle_handler_) {
hle_handler = std::move(hle_handler_);
}
void EnqueueSession(KServerSession* pending_session);
KServerSession* AcceptSession();
const KPort* GetParent() const {
return parent;
}
bool IsLight() const;
// Overridden virtual functions.
virtual void Destroy() override;
virtual bool IsSignaled() const override;
private:
void CleanupSessions();
private:
SessionList session_list;
HLEHandler hle_handler;
KPort* parent{};
};
} // namespace Kernel

@ -10,49 +10,39 @@
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/hle/ipc_helpers.h" #include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/memory.h" #include "core/memory.h"
namespace Kernel { namespace Kernel {
ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} KServerSession::KServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ServerSession::~ServerSession() { KServerSession::~KServerSession() {
kernel.ReleaseServiceThread(service_thread); kernel.ReleaseServiceThread(service_thread);
} }
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel, void KServerSession::Initialize(KSession* parent_, std::string&& name_) {
std::shared_ptr<Session> parent, // Set member variables.
std::string name) { parent = parent_;
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; name = std::move(name_);
service_thread = kernel.CreateServiceThread(name);
session->name = std::move(name);
session->parent = std::move(parent);
session->service_thread = kernel.CreateServiceThread(session->name);
return MakeResult(std::move(session));
} }
bool ServerSession::IsSignaled() const { void KServerSession::Destroy() {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive. parent->OnServerClosed();
if (!parent->Client()) {
return true;
}
// Wait if we have no pending requests, or if we're currently handling a request. parent->Close();
return !pending_requesting_threads.empty() && currently_handling == nullptr;
} }
void ServerSession::ClientDisconnected() { void KServerSession::OnClientClosed() {
// We keep a shared pointer to the hle handler to keep it alive throughout // We keep a shared pointer to the hle handler to keep it alive throughout
// the call to ClientDisconnected, as ClientDisconnected invalidates the // the call to ClientDisconnected, as ClientDisconnected invalidates the
// hle_handler member itself during the course of the function executing. // hle_handler member itself during the course of the function executing.
@ -60,24 +50,31 @@ void ServerSession::ClientDisconnected() {
if (handler) { if (handler) {
// Note that after this returns, this server session's hle_handler is // Note that after this returns, this server session's hle_handler is
// invalidated (set to null). // invalidated (set to null).
handler->ClientDisconnected(SharedFrom(this)); handler->ClientDisconnected(this);
} }
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
} }
void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { bool KServerSession::IsSignaled() const {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the client is closed, we're always signaled.
if (parent->IsClientClosed()) {
return true;
}
// Otherwise, we're signaled if we have a request and aren't handling one.
return false;
}
void KServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
domain_request_handlers.push_back(std::move(handler)); domain_request_handlers.push_back(std::move(handler));
} }
std::size_t ServerSession::NumDomainRequestHandlers() const { std::size_t KServerSession::NumDomainRequestHandlers() const {
return domain_request_handlers.size(); return domain_request_handlers.size();
} }
ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
if (!context.HasDomainMessageHeader()) { if (!context.HasDomainMessageHeader()) {
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
@ -116,23 +113,21 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread, ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context = auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
if (auto strong_ptr = service_thread.lock()) { if (auto strong_ptr = service_thread.lock()) {
strong_ptr->QueueSyncRequest(*this, std::move(context)); strong_ptr->QueueSyncRequest(*parent, std::move(context));
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
ResultCode result = RESULT_SUCCESS; ResultCode result = RESULT_SUCCESS;
// If the session has been converted to a domain, handle the domain request // If the session has been converted to a domain, handle the domain request
if (IsDomain() && context.HasDomainMessageHeader()) { if (IsDomain() && context.HasDomainMessageHeader()) {
@ -161,10 +156,9 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
return result; return result;
} }
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread, ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) { Core::Timing::CoreTiming& core_timing) {
return QueueSyncRequest(std::move(thread), memory); return QueueSyncRequest(thread, memory);
} }
} // namespace Kernel } // namespace Kernel

@ -9,6 +9,8 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include <boost/intrusive/list.hpp>
#include "common/threadsafe_queue.h" #include "common/threadsafe_queue.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/service_thread.h"
@ -27,55 +29,35 @@ namespace Kernel {
class HLERequestContext; class HLERequestContext;
class KernelCore; class KernelCore;
class Session; class KSession;
class SessionRequestHandler; class SessionRequestHandler;
class KThread; class KThread;
/** class KServerSession final : public KSynchronizationObject,
* Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS public boost::intrusive::list_base_hook<> {
* primitive for communication between different processes, and are used to implement service calls KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject);
* to the various system services.
*
* To make a service call, the client must write the command header and parameters to the buffer
* located at offset 0x80 of the TLS (Thread-Local Storage) area, then execute a SendSyncRequest
* SVC call with its ClientSession handle. The kernel will read the command header, using it to
* marshall the parameters to the process at the server endpoint of the session.
* After the server replies to the request, the response is marshalled back to the caller's
* TLS buffer and control is transferred back to it.
*/
class ServerSession final : public KSynchronizationObject {
friend class ServiceThread; friend class ServiceThread;
public: public:
explicit ServerSession(KernelCore& kernel); explicit KServerSession(KernelCore& kernel);
~ServerSession() override; virtual ~KServerSession() override;
friend class Session; virtual void Destroy() override;
static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel, void Initialize(KSession* parent_, std::string&& name_);
std::shared_ptr<Session> parent,
std::string name = "Unknown");
std::string GetTypeName() const override { KSession* GetParent() {
return "ServerSession"; return parent;
} }
std::string GetName() const override { const KSession* GetParent() const {
return name; return parent;
} }
static constexpr HandleType HANDLE_TYPE = HandleType::ServerSession; virtual bool IsSignaled() const override;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
Session* GetParent() { void OnClientClosed();
return parent.get();
}
const Session* GetParent() const {
return parent.get();
}
/** /**
* Sets the HLE handler for the session. This handler will be called to service IPC requests * Sets the HLE handler for the session. This handler will be called to service IPC requests
@ -95,12 +77,9 @@ public:
* *
* @returns ResultCode from the operation. * @returns ResultCode from the operation.
*/ */
ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing); Core::Timing::CoreTiming& core_timing);
/// Called when a client disconnection occurs.
void ClientDisconnected();
/// Adds a new domain request handler to the collection of request handlers within /// Adds a new domain request handler to the collection of request handlers within
/// this ServerSession instance. /// this ServerSession instance.
void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler); void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler);
@ -124,13 +103,9 @@ public:
convert_to_domain = true; convert_to_domain = true;
} }
bool IsSignaled() const override;
void Finalize() override {}
private: private:
/// Queues a sync request from the emulated application. /// Queues a sync request from the emulated application.
ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory); ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application. /// Completes a sync request from the emulated application.
ResultCode CompleteSyncRequest(HLERequestContext& context); ResultCode CompleteSyncRequest(HLERequestContext& context);
@ -139,33 +114,20 @@ private:
/// object handle. /// object handle.
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context); ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
/// The parent session, which links to the client endpoint.
std::shared_ptr<Session> parent;
/// This session's HLE request handler (applicable when not a domain) /// This session's HLE request handler (applicable when not a domain)
std::shared_ptr<SessionRequestHandler> hle_handler; std::shared_ptr<SessionRequestHandler> hle_handler;
/// This is the list of domain request handlers (after conversion to a domain) /// This is the list of domain request handlers (after conversion to a domain)
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
std::shared_ptr<KThread> currently_handling;
/// When set to True, converts the session to a domain at the end of the command /// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{}; bool convert_to_domain{};
/// The name of this session (optional)
std::string name;
/// Thread to dispatch service requests /// Thread to dispatch service requests
std::weak_ptr<ServiceThread> service_thread; std::weak_ptr<ServiceThread> service_thread;
/// KSession that owns this KServerSession
KSession* parent{};
}; };
} // namespace Kernel } // namespace Kernel

@ -0,0 +1,85 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
namespace Kernel {
KSession::KSession(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {}
KSession::~KSession() = default;
void KSession::Initialize(KClientPort* port_, const std::string& name_) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
// this object will be destroyed.
Open();
// Create our sub sessions.
KAutoObject::Create(std::addressof(server));
KAutoObject::Create(std::addressof(client));
// Initialize our sub sessions.
server.Initialize(this, name_ + ":Server");
client.Initialize(this, name_ + ":Client");
// Set state and name.
SetState(State::Normal);
name = name_;
// Set our owner process.
process = kernel.CurrentProcess();
process->Open();
// Set our port.
port = port_;
if (port != nullptr) {
port->Open();
}
// Mark initialized.
initialized = true;
}
void KSession::Finalize() {
if (port == nullptr) {
return;
}
port->OnSessionFinalized();
port->Close();
}
void KSession::OnServerClosed() {
if (GetState() != State::Normal) {
return;
}
SetState(State::ServerClosed);
client.OnServerClosed();
}
void KSession::OnClientClosed() {
if (GetState() != State::Normal) {
return;
}
SetState(State::ClientClosed);
server.OnClientClosed();
}
void KSession::PostDestroy(uintptr_t arg) {
// Release the session count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1);
owner->Close();
}
} // namespace Kernel

@ -0,0 +1,96 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <string>
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public:
explicit KSession(KernelCore& kernel);
virtual ~KSession() override;
void Initialize(KClientPort* port_, const std::string& name_);
virtual void Finalize() override;
virtual bool IsInitialized() const override {
return initialized;
}
virtual uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(process);
}
static void PostDestroy(uintptr_t arg);
void OnServerClosed();
void OnClientClosed();
bool IsServerClosed() const {
return this->GetState() != State::Normal;
}
bool IsClientClosed() const {
return this->GetState() != State::Normal;
}
KClientSession& GetClientSession() {
return client;
}
KServerSession& GetServerSession() {
return server;
}
const KClientSession& GetClientSession() const {
return client;
}
const KServerSession& GetServerSession() const {
return server;
}
const KClientPort* GetParent() const {
return port;
}
private:
enum class State : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
private:
void SetState(State state) {
atomic_state = static_cast<u8>(state);
}
State GetState() const {
return static_cast<State>(atomic_state.load(std::memory_order_relaxed));
}
private:
KServerSession server;
KClientSession client;
std::atomic<std::underlying_type_t<State>> atomic_state{
static_cast<std::underlying_type_t<State>>(State::Invalid)};
KClientPort* port{};
KProcess* process{};
bool initialized{};
};
} // namespace Kernel

@ -8,50 +8,74 @@
#include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel { namespace Kernel {
KSharedMemory::KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory) KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
: Object{kernel}, device_memory{device_memory} {}
KSharedMemory::~KSharedMemory() { KSharedMemory::~KSharedMemory() {
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
} }
std::shared_ptr<KSharedMemory> KSharedMemory::Create( ResultCode KSharedMemory::Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_,
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, KProcess* owner_process_, KPageLinkedList&& page_list_,
KPageLinkedList&& page_list, KMemoryPermission owner_permission, Svc::MemoryPermission owner_permission_,
KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) { Svc::MemoryPermission user_permission_,
PAddr physical_address_, std::size_t size_,
std::string name_) {
// Set members.
owner_process = owner_process_;
device_memory = &device_memory_;
page_list = std::move(page_list_);
owner_permission = owner_permission_;
user_permission = user_permission_;
physical_address = physical_address_;
size = size_;
name = name_;
const auto resource_limit = kernel.GetSystemResourceLimit(); // Get the resource limit.
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, KResourceLimit* reslimit = kernel.GetSystemResourceLimit();
size);
ASSERT(memory_reservation.Succeeded());
std::shared_ptr<KSharedMemory> shared_memory{ // Reserve memory for ourselves.
std::make_shared<KSharedMemory>(kernel, device_memory)}; KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory,
size_);
shared_memory->owner_process = owner_process; R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
shared_memory->page_list = std::move(page_list);
shared_memory->owner_permission = owner_permission;
shared_memory->user_permission = user_permission;
shared_memory->physical_address = physical_address;
shared_memory->size = size;
shared_memory->name = name;
// Commit our reservation.
memory_reservation.Commit(); memory_reservation.Commit();
return shared_memory;
// Set our resource limit.
resource_limit = reslimit;
resource_limit->Open();
// Mark initialized.
is_initialized = true;
// Clear all pages in the memory.
std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
return RESULT_SUCCESS;
} }
ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_t size, void KSharedMemory::Finalize() {
KMemoryPermission permissions) { // Release the memory reservation.
resource_limit->Release(LimitableResource::PhysicalMemory, size);
resource_limit->Close();
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
}
ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t size,
Svc::MemoryPermission permissions) {
const u64 page_count{(size + PageSize - 1) / PageSize}; const u64 page_count{(size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) { if (page_list.GetNumPages() != page_count) {
UNIMPLEMENTED_MSG("Page count does not match"); UNIMPLEMENTED_MSG("Page count does not match");
} }
const KMemoryPermission expected = const Svc::MemoryPermission expected =
&target_process == owner_process ? owner_permission : user_permission; &target_process == owner_process ? owner_permission : user_permission;
if (permissions != expected) { if (permissions != expected) {
@ -59,7 +83,17 @@ ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_
} }
return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared, return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared,
permissions); ConvertToKMemoryPermission(permissions));
}
ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t size) {
const u64 page_count{(size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
UNIMPLEMENTED_MSG("Page count does not match");
}
return target_process.PageTable().UnmapPages(address, page_list, KMemoryState::Shared);
} }
} // namespace Kernel } // namespace Kernel

@ -11,37 +11,27 @@
#include "core/device_memory.h" #include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_linked_list.h" #include "core/hle/kernel/k_page_linked_list.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class KSharedMemory final : public Object { class KSharedMemory final
: public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public: public:
explicit KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory); explicit KSharedMemory(KernelCore& kernel);
~KSharedMemory() override; ~KSharedMemory() override;
static std::shared_ptr<KSharedMemory> Create( ResultCode Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_,
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, KProcess* owner_process_, KPageLinkedList&& page_list_,
KPageLinkedList&& page_list, KMemoryPermission owner_permission, Svc::MemoryPermission owner_permission_,
KMemoryPermission user_permission, PAddr physical_address, std::size_t size, Svc::MemoryPermission user_permission_, PAddr physical_address_,
std::string name); std::size_t size_, std::string name_);
std::string GetTypeName() const override {
return "SharedMemory";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/** /**
* Maps a shared memory block to an address in the target process' address space * Maps a shared memory block to an address in the target process' address space
@ -50,8 +40,16 @@ public:
* @param size Size of the shared memory block to map * @param size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field) * @param permissions Memory block map permissions (specified by SVC field)
*/ */
ResultCode Map(Process& target_process, VAddr address, std::size_t size, ResultCode Map(KProcess& target_process, VAddr address, std::size_t size,
KMemoryPermission permissions); Svc::MemoryPermission permissions);
/**
* Unmaps a shared memory block from an address in the target process' address space
* @param target_process Process on which to unmap the memory block
* @param address Address in system memory to unmap shared memory block
* @param size Size of the shared memory block to unmap
*/
ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t size);
/** /**
* Gets a pointer to the shared memory block * Gets a pointer to the shared memory block
@ -59,7 +57,7 @@ public:
* @return A pointer to the shared memory block from the specified offset * @return A pointer to the shared memory block from the specified offset
*/ */
u8* GetPointer(std::size_t offset = 0) { u8* GetPointer(std::size_t offset = 0) {
return device_memory.GetPointer(physical_address + offset); return device_memory->GetPointer(physical_address + offset);
} }
/** /**
@ -68,20 +66,26 @@ public:
* @return A pointer to the shared memory block from the specified offset * @return A pointer to the shared memory block from the specified offset
*/ */
const u8* GetPointer(std::size_t offset = 0) const { const u8* GetPointer(std::size_t offset = 0) const {
return device_memory.GetPointer(physical_address + offset); return device_memory->GetPointer(physical_address + offset);
} }
void Finalize() override {} virtual void Finalize() override;
virtual bool IsInitialized() const override {
return is_initialized;
}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
private: private:
Core::DeviceMemory& device_memory; Core::DeviceMemory* device_memory;
Process* owner_process{}; KProcess* owner_process{};
KPageLinkedList page_list; KPageLinkedList page_list;
KMemoryPermission owner_permission{}; Svc::MemoryPermission owner_permission{};
KMemoryPermission user_permission{}; Svc::MemoryPermission user_permission{};
PAddr physical_address{}; PAddr physical_address{};
std::size_t size{}; std::size_t size{};
std::string name; KResourceLimit* resource_limit{};
bool is_initialized{};
}; };
} // namespace Kernel } // namespace Kernel

@ -97,6 +97,7 @@ public:
void FreeImpl(void* obj) { void FreeImpl(void* obj) {
// Don't allow freeing an object that wasn't allocated from this heap // Don't allow freeing an object that wasn't allocated from this heap
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
impl.Free(obj); impl.Free(obj);
} }
@ -148,6 +149,14 @@ public:
return obj; return obj;
} }
T* AllocateWithKernel(KernelCore& kernel) {
T* obj = static_cast<T*>(AllocateImpl());
if (obj != nullptr) {
new (obj) T(kernel);
}
return obj;
}
void Free(T* obj) { void Free(T* obj) {
FreeImpl(obj); FreeImpl(obj);
} }

@ -13,6 +13,11 @@
namespace Kernel { namespace Kernel {
void KSynchronizationObject::Finalize() {
this->OnFinalizeSynchronizationObject();
KAutoObject::Finalize();
}
ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects, KSynchronizationObject** objects, const s32 num_objects,
s64 timeout) { s64 timeout) {
@ -130,10 +135,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
return wait_result; return wait_result;
} }
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
: Object{kernel, std::move(name)} {}
KSynchronizationObject::~KSynchronizationObject() = default; KSynchronizationObject::~KSynchronizationObject() = default;

@ -6,7 +6,7 @@
#include <vector> #include <vector>
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_auto_object.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace Kernel { namespace Kernel {
@ -16,7 +16,9 @@ class Synchronization;
class KThread; class KThread;
/// Class that represents a Kernel object that a thread can be waiting on /// Class that represents a Kernel object that a thread can be waiting on
class KSynchronizationObject : public Object { class KSynchronizationObject : public KAutoObjectWithList {
KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
public: public:
struct ThreadListNode { struct ThreadListNode {
ThreadListNode* next{}; ThreadListNode* next{};
@ -27,15 +29,18 @@ public:
KSynchronizationObject** objects, const s32 num_objects, KSynchronizationObject** objects, const s32 num_objects,
s64 timeout); s64 timeout);
virtual void Finalize() override;
[[nodiscard]] virtual bool IsSignaled() const = 0; [[nodiscard]] virtual bool IsSignaled() const = 0;
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
protected: protected:
explicit KSynchronizationObject(KernelCore& kernel); explicit KSynchronizationObject(KernelCore& kernel);
explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
virtual ~KSynchronizationObject(); virtual ~KSynchronizationObject();
virtual void OnFinalizeSynchronizationObject() {}
void NotifyAvailable(ResultCode result); void NotifyAvailable(ResultCode result);
void NotifyAvailable() { void NotifyAvailable() {
return this->NotifyAvailable(RESULT_SUCCESS); return this->NotifyAvailable(RESULT_SUCCESS);
@ -46,14 +51,4 @@ private:
ThreadListNode* thread_list_tail{}; ThreadListNode* thread_list_tail{};
}; };
// Specialization of DynamicObjectCast for KSynchronizationObjects
template <>
inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
std::shared_ptr<Object> object) {
if (object != nullptr && object->IsWaitable()) {
return std::static_pointer_cast<KSynchronizationObject>(object);
}
return nullptr;
}
} // namespace Kernel } // namespace Kernel

@ -18,17 +18,16 @@
#include "core/core.h" #include "core/core.h"
#include "core/cpu_manager.h" #include "core/cpu_manager.h"
#include "core/hardware_properties.h" #include "core/hardware_properties.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h" #include "core/hle/result.h"
@ -62,11 +61,11 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
namespace Kernel { namespace Kernel {
KThread::KThread(KernelCore& kernel) KThread::KThread(KernelCore& kernel)
: KSynchronizationObject{kernel}, activity_pause_lock{kernel} {} : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {}
KThread::~KThread() = default; KThread::~KThread() = default;
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 virt_core, Process* owner, ThreadType type) { s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid. // Assert parameters are valid.
ASSERT((type == ThreadType::Main) || ASSERT((type == ThreadType::Main) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority)); (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@ -177,6 +176,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Set parent, if relevant. // Set parent, if relevant.
if (owner != nullptr) { if (owner != nullptr) {
parent = owner; parent = owner;
parent->Open();
parent->IncrementThreadCount(); parent->IncrementThreadCount();
} }
@ -209,14 +209,56 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
} }
ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
VAddr user_stack_top, s32 prio, s32 core, Process* owner, VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
ThreadType type) { ThreadType type, std::function<void(void*)>&& init_func,
void* init_func_parameter) {
// Initialize the thread. // Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize host context.
thread->host_context =
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode KThread::InitializeDummyThread(KThread* thread) {
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main);
}
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
Core::CpuManager::GetIdleThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
}
ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg,
s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
Core::CpuManager::GetSuspendThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
}
ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
}
void KThread::PostDestroy(uintptr_t arg) {
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL);
const bool resource_limit_release_hint = (arg & 1);
const s64 hint_value = (resource_limit_release_hint ? 0 : 1);
if (owner != nullptr) {
owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value);
owner->Close();
}
}
void KThread::Finalize() { void KThread::Finalize() {
// If the thread has an owner process, unregister it. // If the thread has an owner process, unregister it.
if (parent != nullptr) { if (parent != nullptr) {
@ -246,8 +288,10 @@ void KThread::Finalize() {
// Decrement the parent process's thread count. // Decrement the parent process's thread count.
if (parent != nullptr) { if (parent != nullptr) {
parent->DecrementThreadCount(); parent->DecrementThreadCount();
parent->GetResourceLimit()->Release(LimitableResource::Threads, 1);
} }
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>::Finalize();
} }
bool KThread::IsSignaled() const { bool KThread::IsSignaled() const {
@ -294,6 +338,9 @@ void KThread::StartTermination() {
// Register terminated dpc flag. // Register terminated dpc flag.
RegisterDpc(DpcFlag::Terminated); RegisterDpc(DpcFlag::Terminated);
// Close the thread.
this->Close();
} }
void KThread::Pin() { void KThread::Pin() {
@ -932,7 +979,7 @@ void KThread::Exit() {
// Release the thread resource hint from parent. // Release the thread resource hint from parent.
if (parent != nullptr) { if (parent != nullptr) {
// TODO(bunnei): Hint that the resource is about to be released. parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1);
resource_limit_release_hint = true; resource_limit_release_hint = true;
} }
@ -995,56 +1042,6 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context; return host_context;
} }
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(Core::System& system,
ThreadType type_flags, std::string name,
VAddr entry_point, u32 priority, u64 arg,
s32 processor_id, VAddr stack_top,
Process* owner_process) {
auto& kernel = system.Kernel();
std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
if (const auto result =
thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
processor_id, owner_process, type_flags);
result.IsError()) {
return result;
}
thread->name = name;
auto& scheduler = kernel.GlobalSchedulerContext();
scheduler.AddThread(thread);
return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
}
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter) {
auto thread_result = CreateThread(system, type_flags, name, entry_point, priority, arg,
processor_id, stack_top, owner_process);
if (thread_result.Succeeded()) {
(*thread_result)->host_context =
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
}
return thread_result;
}
ResultVal<std::shared_ptr<KThread>> KThread::CreateUserThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) {
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
return CreateThread(system, type_flags, name, entry_point, priority, arg, processor_id,
stack_top, owner_process, std::move(init_func), init_func_parameter);
}
KThread* GetCurrentThreadPointer(KernelCore& kernel) { KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread(); return kernel.GetCurrentEmuThread();
} }

@ -19,7 +19,7 @@
#include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_spin_lock.h" #include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h" #include "core/hle/result.h"
@ -37,7 +37,7 @@ namespace Kernel {
class GlobalSchedulerContext; class GlobalSchedulerContext;
class KernelCore; class KernelCore;
class Process; class KProcess;
class KScheduler; class KScheduler;
class KThreadQueue; class KThreadQueue;
@ -99,9 +99,13 @@ enum class ThreadWaitReasonForDebugging : u32 {
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); [[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); [[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>,
public boost::intrusive::list_base_hook<> {
KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
private:
friend class KScheduler; friend class KScheduler;
friend class Process; friend class KProcess;
public: public:
static constexpr s32 DefaultThreadPriority = 44; static constexpr s32 DefaultThreadPriority = 44;
@ -115,74 +119,10 @@ public:
using ThreadContext64 = Core::ARM_Interface::ThreadContext64; using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
using WaiterList = boost::intrusive::list<KThread>; using WaiterList = boost::intrusive::list<KThread>;
/**
* Creates and returns a new thread.
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
/**
* Creates and returns a new thread, with a specified entry point.
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @param thread_start_func The function where the host context will start.
* @param thread_start_parameter The parameter which will passed to host context on init
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
/**
* Creates and returns a new thread for the emulated "user" process.
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateUserThread(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
[[nodiscard]] std::string GetName() const override {
return name;
}
void SetName(std::string new_name) { void SetName(std::string new_name) {
name = std::move(new_name); name = std::move(new_name);
} }
[[nodiscard]] std::string GetTypeName() const override {
return "Thread";
}
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
[[nodiscard]] HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/** /**
* Gets the thread's current priority * Gets the thread's current priority
* @return The current thread's priority * @return The current thread's priority
@ -257,10 +197,6 @@ public:
void Suspend(); void Suspend();
void Finalize() override;
bool IsSignaled() const override;
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
synced_object = obj; synced_object = obj;
wait_result = wait_res; wait_result = wait_res;
@ -354,11 +290,11 @@ public:
current_core_id = core; current_core_id = core;
} }
[[nodiscard]] Process* GetOwnerProcess() { [[nodiscard]] KProcess* GetOwnerProcess() {
return parent; return parent;
} }
[[nodiscard]] const Process* GetOwnerProcess() const { [[nodiscard]] const KProcess* GetOwnerProcess() const {
return parent; return parent;
} }
@ -422,6 +358,40 @@ public:
return termination_requested || GetRawState() == ThreadState::Terminated; return termination_requested || GetRawState() == ThreadState::Terminated;
} }
[[nodiscard]] virtual u64 GetId() const override final {
return this->GetThreadID();
}
[[nodiscard]] virtual bool IsInitialized() const override {
return initialized;
}
[[nodiscard]] virtual uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0);
}
virtual void Finalize() override;
[[nodiscard]] virtual bool IsSignaled() const override;
static void PostDestroy(uintptr_t arg);
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
[[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
s32 virt_core);
[[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
KThread* thread,
KThreadFunction func,
uintptr_t arg, s32 virt_core);
[[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg,
VAddr user_stack_top, s32 prio,
s32 virt_core, KProcess* owner);
public:
struct StackParameters { struct StackParameters {
u8 svc_permission[0x10]; u8 svc_permission[0x10];
std::atomic<u8> dpc_flags; std::atomic<u8> dpc_flags;
@ -671,11 +641,13 @@ private:
void StartTermination(); void StartTermination();
[[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, Process* owner, ThreadType type); s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
[[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func, [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 core, Process* owner, ThreadType type); s32 core, KProcess* owner, ThreadType type,
std::function<void(void*)>&& init_func,
void* init_func_parameter);
static void RestorePriority(KernelCore& kernel, KThread* thread); static void RestorePriority(KernelCore& kernel, KThread* thread);
@ -697,7 +669,7 @@ private:
std::atomic<s64> cpu_time{}; std::atomic<s64> cpu_time{};
KSynchronizationObject* synced_object{}; KSynchronizationObject* synced_object{};
VAddr address_key{}; VAddr address_key{};
Process* parent{}; KProcess* parent{};
VAddr kernel_stack_top{}; VAddr kernel_stack_top{};
u32* light_ipc_data{}; u32* light_ipc_data{};
VAddr tls_address{}; VAddr tls_address{};
@ -742,7 +714,6 @@ private:
VAddr mutex_wait_address_for_debugging{}; VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{}; ThreadWaitReasonForDebugging wait_reason_for_debugging{};
ThreadType thread_type_for_debugging{}; ThreadType thread_type_for_debugging{};
std::string name;
public: public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree; using ConditionVariableThreadTreeType = ConditionVariableThreadTree;

@ -0,0 +1,45 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
KTransferMemory::KTransferMemory(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel} {}
KTransferMemory::~KTransferMemory() = default;
ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_,
Svc::MemoryPermission owner_perm_) {
// Set members.
owner = kernel.CurrentProcess();
// TODO(bunnei): Lock for transfer memory
// Set remaining tracking members.
owner->Open();
owner_perm = owner_perm_;
address = address_;
size = size_;
is_initialized = true;
return RESULT_SUCCESS;
}
void KTransferMemory::Finalize() {
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList>::Finalize();
}
void KTransferMemory::PostDestroy(uintptr_t arg) {
KProcess* owner = reinterpret_cast<KProcess*>(arg);
owner->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1);
owner->Close();
}
} // namespace Kernel

@ -0,0 +1,66 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
union ResultCode;
namespace Core::Memory {
class Memory;
}
namespace Kernel {
class KernelCore;
class KProcess;
class KTransferMemory final
: public KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
public:
explicit KTransferMemory(KernelCore& kernel);
virtual ~KTransferMemory() override;
ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
virtual void Finalize() override;
virtual bool IsInitialized() const override {
return is_initialized;
}
virtual uintptr_t GetPostDestroyArgument() const override {
return reinterpret_cast<uintptr_t>(owner);
}
static void PostDestroy(uintptr_t arg);
KProcess* GetOwner() const {
return owner;
}
VAddr GetSourceAddress() const {
return address;
}
size_t GetSize() const {
return is_initialized ? size * PageSize : 0;
}
private:
KProcess* owner{};
VAddr address{};
Svc::MemoryPermission owner_perm{};
size_t size{};
bool is_initialized{};
};
} // namespace Kernel

@ -8,20 +8,28 @@
namespace Kernel { namespace Kernel {
KWritableEvent::KWritableEvent(KernelCore& kernel, std::string&& name) KWritableEvent::KWritableEvent(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
: Object{kernel, std::move(name)} {}
KWritableEvent::~KWritableEvent() = default; KWritableEvent::~KWritableEvent() = default;
void KWritableEvent::Initialize(KEvent* parent_) { void KWritableEvent::Initialize(KEvent* parent_, std::string&& name_) {
parent = parent_; parent = parent_;
name = std::move(name_);
parent->GetReadableEvent().Open();
} }
ResultCode KWritableEvent::Signal() { ResultCode KWritableEvent::Signal() {
return parent->GetReadableEvent()->Signal(); return parent->GetReadableEvent().Signal();
} }
ResultCode KWritableEvent::Clear() { ResultCode KWritableEvent::Clear() {
return parent->GetReadableEvent()->Clear(); return parent->GetReadableEvent().Clear();
}
void KWritableEvent::Destroy() {
// Close our references.
parent->GetReadableEvent().Close();
parent->Close();
} }
} // namespace Kernel } // namespace Kernel

@ -4,7 +4,8 @@
#pragma once #pragma once
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h" #include "core/hle/result.h"
namespace Kernel { namespace Kernel {
@ -12,24 +13,19 @@ namespace Kernel {
class KernelCore; class KernelCore;
class KEvent; class KEvent;
class KWritableEvent final : public Object { class KWritableEvent final
: public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
public: public:
explicit KWritableEvent(KernelCore& kernel, std::string&& name); explicit KWritableEvent(KernelCore& kernel);
~KWritableEvent() override; ~KWritableEvent() override;
std::string GetTypeName() const override { virtual void Destroy() override;
return "KWritableEvent";
}
static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent; static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
void Initialize(KEvent* parent_);
void Finalize() override {}
void Initialize(KEvent* parent_, std::string&& name_);
ResultCode Signal(); ResultCode Signal();
ResultCode Clear(); ResultCode Clear();

@ -26,10 +26,12 @@
#include "core/cpu_manager.h" #include "core/cpu_manager.h"
#include "core/device_memory.h" #include "core/device_memory.h"
#include "core/hardware_properties.h" #include "core/hardware_properties.h"
#include "core/hle/kernel/client_port.h" #include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_shared_memory.h"
@ -37,7 +39,6 @@
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/service_thread.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/time_manager.h"
@ -51,7 +52,7 @@ namespace Kernel {
struct KernelCore::Impl { struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel) explicit Impl(Core::System& system, KernelCore& kernel)
: time_manager{system}, global_handle_table{kernel}, system{system} {} : time_manager{system}, object_list_container{kernel}, system{system} {}
void SetMulticore(bool is_multicore) { void SetMulticore(bool is_multicore) {
this->is_multicore = is_multicore; this->is_multicore = is_multicore;
@ -59,8 +60,7 @@ struct KernelCore::Impl {
void Initialize(KernelCore& kernel) { void Initialize(KernelCore& kernel) {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
RegisterHostThread();
service_thread_manager = service_thread_manager =
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
@ -69,14 +69,20 @@ struct KernelCore::Impl {
InitializePhysicalCores(); InitializePhysicalCores();
// Derive the initial memory layout from the emulated board // Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
KMemoryLayout memory_layout; KMemoryLayout memory_layout;
DeriveInitialMemoryLayout(memory_layout); DeriveInitialMemoryLayout(memory_layout);
InitializeMemoryLayout(memory_layout); Init::InitializeSlabHeaps(system, memory_layout);
// Initialize kernel memory and resources.
InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
InitializeSlabHeaps(); InitializeMemoryLayout(memory_layout);
InitializePageSlab();
InitializeSchedulers(); InitializeSchedulers();
InitializeSuspendThreads(); InitializeSuspendThreads();
InitializePreemption(kernel); InitializePreemption(kernel);
RegisterHostThread();
} }
void InitializeCores() { void InitializeCores() {
@ -93,34 +99,49 @@ struct KernelCore::Impl {
service_threads.clear(); service_threads.clear();
next_object_id = 0; next_object_id = 0;
next_kernel_process_id = Process::InitialKIPIDMin; next_kernel_process_id = KProcess::InitialKIPIDMin;
next_user_process_id = Process::ProcessIDMin; next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1; next_thread_id = 1;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[i]) { if (suspend_threads[core_id]) {
suspend_threads[i].reset(); suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
} }
schedulers[core_id].reset();
} }
cores.clear(); cores.clear();
if (current_process) {
current_process->Close();
current_process = nullptr; current_process = nullptr;
}
global_handle_table.Clear(); global_handle_table.reset();
preemption_event = nullptr; preemption_event = nullptr;
for (auto& iter : named_ports) {
iter.second->Close();
}
named_ports.clear(); named_ports.clear();
exclusive_monitor.reset(); exclusive_monitor.reset();
hid_shared_mem = nullptr; // Cleanup persistent kernel objects
font_shared_mem = nullptr; auto CleanupObject = [](KAutoObject* obj) {
irs_shared_mem = nullptr; if (obj) {
time_shared_mem = nullptr; obj->Close();
obj = nullptr;
system_resource_limit = nullptr; }
};
CleanupObject(hid_shared_mem);
CleanupObject(font_shared_mem);
CleanupObject(irs_shared_mem);
CleanupObject(time_shared_mem);
CleanupObject(system_resource_limit);
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
next_host_thread_id = Core::Hardware::NUM_CPU_CORES; next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
@ -145,7 +166,9 @@ struct KernelCore::Impl {
void InitializeSystemResourceLimit(KernelCore& kernel, void InitializeSystemResourceLimit(KernelCore& kernel,
const Core::Timing::CoreTiming& core_timing, const Core::Timing::CoreTiming& core_timing,
const KMemoryLayout& memory_layout) { const KMemoryLayout& memory_layout) {
system_resource_limit = std::make_shared<KResourceLimit>(kernel, core_timing); system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
// If setting the default system values fails, then something seriously wrong has occurred. // If setting the default system values fails, then something seriously wrong has occurred.
@ -189,19 +212,16 @@ struct KernelCore::Impl {
} }
void InitializeSuspendThreads() { void InitializeSuspendThreads() {
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
std::string name = "Suspend Thread Id:" + std::to_string(i); suspend_threads[core_id] = KThread::Create(system.Kernel());
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); core_id)
auto thread_res = KThread::CreateThread( .IsSuccess());
system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
nullptr, std::move(init_func), init_func_parameter);
suspend_threads[i] = std::move(thread_res).Unwrap();
} }
} }
void MakeCurrentProcess(Process* process) { void MakeCurrentProcess(KProcess* process) {
current_process = process; current_process = process;
if (process == nullptr) { if (process == nullptr) {
return; return;
@ -232,11 +252,15 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time // Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() { KThread* GetHostDummyThread() {
const thread_local auto thread = auto make_thread = [this]() {
KThread::CreateThread( std::unique_ptr<KThread> thread = std::make_unique<KThread>(system.Kernel());
system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0, KAutoObject::Create(thread.get());
KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr) ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
.Unwrap(); thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return std::move(thread);
};
thread_local auto thread = make_thread();
return thread.get(); return thread.get();
} }
@ -371,7 +395,8 @@ struct KernelCore::Impl {
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
// Determine the size of the slab region. // Determine the size of the slab region.
const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); const size_t slab_region_size =
Common::AlignUp(Init::CalculateTotalSlabHeapSize(system.Kernel()), PageSize);
ASSERT(slab_region_size <= resource_region_size); ASSERT(slab_region_size <= resource_region_size);
// Setup the slab region. // Setup the slab region.
@ -569,25 +594,30 @@ struct KernelCore::Impl {
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size}; const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size}; const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
hid_shared_mem = Kernel::KSharedMemory::Create( hid_shared_mem = KSharedMemory::Create(system.Kernel());
system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, font_shared_mem = KSharedMemory::Create(system.Kernel());
KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size, irs_shared_mem = KSharedMemory::Create(system.Kernel());
"HID:SharedMemory"); time_shared_mem = KSharedMemory::Create(system.Kernel());
font_shared_mem = Kernel::KSharedMemory::Create(
system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size, {hid_phys_addr, hid_size / PageSize},
"Font:SharedMemory"); Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
irs_shared_mem = Kernel::KSharedMemory::Create( hid_phys_addr, hid_size, "HID:SharedMemory");
system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, font_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size, {font_phys_addr, font_size / PageSize},
"IRS:SharedMemory"); Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
time_shared_mem = Kernel::KSharedMemory::Create( font_phys_addr, font_size, "Font:SharedMemory");
system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, irs_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size, {irs_phys_addr, irs_size / PageSize},
"Time:SharedMemory"); Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
irs_phys_addr, irs_size, "IRS:SharedMemory");
time_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
{time_phys_addr, time_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
time_phys_addr, time_size, "Time:SharedMemory");
} }
void InitializeSlabHeaps() { void InitializePageSlab() {
// Allocate slab heaps // Allocate slab heaps
user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
@ -596,30 +626,33 @@ struct KernelCore::Impl {
// Reserve slab heaps // Reserve slab heaps
ASSERT( ASSERT(
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
// Initialize slab heaps // Initialize slab heap
user_slab_heap_pages->Initialize( user_slab_heap_pages->Initialize(
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
user_slab_heap_size); user_slab_heap_size);
} }
std::atomic<u32> next_object_id{0}; std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
std::atomic<u64> next_thread_id{1}; std::atomic<u64> next_thread_id{1};
// Lists all processes that exist in the current session. // Lists all processes that exist in the current session.
std::vector<std::shared_ptr<Process>> process_list; std::vector<KProcess*> process_list;
Process* current_process = nullptr; KProcess* current_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager; Kernel::TimeManager time_manager;
std::shared_ptr<KResourceLimit> system_resource_limit; Init::KSlabResourceCounts slab_resource_counts{};
KResourceLimit* system_resource_limit{};
std::shared_ptr<Core::Timing::EventType> preemption_event; std::shared_ptr<Core::Timing::EventType> preemption_event;
// This is the kernel's handle table or supervisor handle table which // This is the kernel's handle table or supervisor handle table which
// stores all the objects in place. // stores all the objects in place.
HandleTable global_handle_table; std::unique_ptr<KHandleTable> global_handle_table;
KAutoObjectWithListContainer object_list_container;
/// Map of named ports managed by the kernel, which can be retrieved using /// Map of named ports managed by the kernel, which can be retrieved using
/// the ConnectToPort SVC. /// the ConnectToPort SVC.
@ -636,10 +669,10 @@ struct KernelCore::Impl {
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages; std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
// Shared memory for services // Shared memory for services
std::shared_ptr<Kernel::KSharedMemory> hid_shared_mem; Kernel::KSharedMemory* hid_shared_mem{};
std::shared_ptr<Kernel::KSharedMemory> font_shared_mem; Kernel::KSharedMemory* font_shared_mem{};
std::shared_ptr<Kernel::KSharedMemory> irs_shared_mem; Kernel::KSharedMemory* irs_shared_mem{};
std::shared_ptr<Kernel::KSharedMemory> time_shared_mem; Kernel::KSharedMemory* time_shared_mem{};
// Threads used for services // Threads used for services
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
@ -648,7 +681,7 @@ struct KernelCore::Impl {
// the release of itself // the release of itself
std::unique_ptr<Common::ThreadWorker> service_thread_manager; std::unique_ptr<Common::ThreadWorker> service_thread_manager;
std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@ -663,15 +696,14 @@ struct KernelCore::Impl {
}; };
KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {} KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
KernelCore::~KernelCore() { KernelCore::~KernelCore() = default;
Shutdown();
}
void KernelCore::SetMulticore(bool is_multicore) { void KernelCore::SetMulticore(bool is_multicore) {
impl->SetMulticore(is_multicore); impl->SetMulticore(is_multicore);
} }
void KernelCore::Initialize() { void KernelCore::Initialize() {
slab_heap_container = std::make_unique<SlabHeapContainer>();
impl->Initialize(*this); impl->Initialize(*this);
} }
@ -683,31 +715,35 @@ void KernelCore::Shutdown() {
impl->Shutdown(); impl->Shutdown();
} }
std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const { const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit; return impl->system_resource_limit;
} }
std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { KResourceLimit* KernelCore::GetSystemResourceLimit() {
return impl->global_handle_table.Get<KThread>(handle); return impl->system_resource_limit;
} }
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
impl->process_list.push_back(std::move(process)); return impl->global_handle_table->GetObject<KThread>(handle);
} }
void KernelCore::MakeCurrentProcess(Process* process) { void KernelCore::AppendNewProcess(KProcess* process) {
impl->process_list.push_back(process);
}
void KernelCore::MakeCurrentProcess(KProcess* process) {
impl->MakeCurrentProcess(process); impl->MakeCurrentProcess(process);
} }
Process* KernelCore::CurrentProcess() { KProcess* KernelCore::CurrentProcess() {
return impl->current_process; return impl->current_process;
} }
const Process* KernelCore::CurrentProcess() const { const KProcess* KernelCore::CurrentProcess() const {
return impl->current_process; return impl->current_process;
} }
const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const { const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list; return impl->process_list;
} }
@ -781,6 +817,14 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
return *impl->exclusive_monitor; return *impl->exclusive_monitor;
} }
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
return impl->object_list_container;
}
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return impl->object_list_container;
}
void KernelCore::InvalidateAllInstructionCaches() { void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) { for (auto& physical_core : impl->cores) {
physical_core.ArmInterface().ClearInstructionCache(); physical_core.ArmInterface().ClearInstructionCache();
@ -800,8 +844,9 @@ void KernelCore::PrepareReschedule(std::size_t id) {
// TODO: Reimplement, this // TODO: Reimplement, this
} }
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { void KernelCore::AddNamedPort(std::string name, KClientPort* port) {
impl->named_ports.emplace(std::move(name), std::move(port)); port->Open();
impl->named_ports.emplace(std::move(name), port);
} }
KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) { KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) {
@ -833,12 +878,12 @@ u64 KernelCore::CreateNewUserProcessID() {
return impl->next_user_process_id++; return impl->next_user_process_id++;
} }
Kernel::HandleTable& KernelCore::GlobalHandleTable() { KHandleTable& KernelCore::GlobalHandleTable() {
return impl->global_handle_table; return *impl->global_handle_table;
} }
const Kernel::HandleTable& KernelCore::GlobalHandleTable() const { const KHandleTable& KernelCore::GlobalHandleTable() const {
return impl->global_handle_table; return *impl->global_handle_table;
} }
void KernelCore::RegisterCoreThread(std::size_t core_id) { void KernelCore::RegisterCoreThread(std::size_t core_id) {
@ -910,9 +955,9 @@ void KernelCore::Suspend(bool in_suspention) {
{ {
KScopedSchedulerLock lock(*this); KScopedSchedulerLock lock(*this);
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
impl->suspend_threads[i]->SetState(state); impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[i]->SetWaitReasonForDebugging( impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended); ThreadWaitReasonForDebugging::Suspended);
} }
} }
@ -952,6 +997,14 @@ void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> servi
}); });
} }
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
return impl->slab_resource_counts;
}
const Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() const {
return impl->slab_resource_counts;
}
bool KernelCore::IsPhantomModeForSingleCore() const { bool KernelCore::IsPhantomModeForSingleCore() const {
return impl->IsPhantomModeForSingleCore(); return impl->IsPhantomModeForSingleCore();
} }
@ -960,4 +1013,12 @@ void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
impl->SetIsPhantomModeForSingleCore(value); impl->SetIsPhantomModeForSingleCore(value);
} }
Core::System& KernelCore::System() {
return impl->system;
}
const Core::System& KernelCore::System() const {
return impl->system;
}
} // namespace Kernel } // namespace Kernel

@ -11,8 +11,10 @@
#include <vector> #include <vector>
#include "core/arm/cpu_interrupt_handler.h" #include "core/arm/cpu_interrupt_handler.h"
#include "core/hardware_properties.h" #include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/memory_types.h" #include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/svc_common.h"
namespace Core { namespace Core {
class CPUInterruptHandler; class CPUInterruptHandler;
@ -27,20 +29,32 @@ struct EventType;
namespace Kernel { namespace Kernel {
class ClientPort; class KClientPort;
class GlobalSchedulerContext; class GlobalSchedulerContext;
class HandleTable; class KAutoObjectWithListContainer;
class KClientSession;
class KEvent;
class KHandleTable;
class KLinkedListNode;
class KMemoryManager; class KMemoryManager;
class KPort;
class KProcess;
class KResourceLimit; class KResourceLimit;
class KScheduler; class KScheduler;
class KSession;
class KSharedMemory; class KSharedMemory;
class KThread; class KThread;
class KTransferMemory;
class KWritableEvent;
class PhysicalCore; class PhysicalCore;
class Process;
class ServiceThread; class ServiceThread;
class Synchronization; class Synchronization;
class TimeManager; class TimeManager;
namespace Init {
struct KSlabResourceCounts;
}
template <typename T> template <typename T>
class KSlabHeap; class KSlabHeap;
@ -51,7 +65,7 @@ constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
/// Represents a single instance of the kernel. /// Represents a single instance of the kernel.
class KernelCore { class KernelCore {
private: private:
using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>; using NamedPortTable = std::unordered_map<std::string, KClientPort*>;
public: public:
/// Constructs an instance of the kernel using the given System /// Constructs an instance of the kernel using the given System
@ -83,25 +97,28 @@ public:
void Shutdown(); void Shutdown();
/// Retrieves a shared pointer to the system resource limit instance. /// Retrieves a shared pointer to the system resource limit instance.
std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const; const KResourceLimit* GetSystemResourceLimit() const;
/// Retrieves a shared pointer to the system resource limit instance.
KResourceLimit* GetSystemResourceLimit();
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
/// Adds the given shared pointer to an internal list of active processes. /// Adds the given shared pointer to an internal list of active processes.
void AppendNewProcess(std::shared_ptr<Process> process); void AppendNewProcess(KProcess* process);
/// Makes the given process the new current process. /// Makes the given process the new current process.
void MakeCurrentProcess(Process* process); void MakeCurrentProcess(KProcess* process);
/// Retrieves a pointer to the current process. /// Retrieves a pointer to the current process.
Process* CurrentProcess(); KProcess* CurrentProcess();
/// Retrieves a const pointer to the current process. /// Retrieves a const pointer to the current process.
const Process* CurrentProcess() const; const KProcess* CurrentProcess() const;
/// Retrieves the list of processes. /// Retrieves the list of processes.
const std::vector<std::shared_ptr<Process>>& GetProcessList() const; const std::vector<KProcess*>& GetProcessList() const;
/// Gets the sole instance of the global scheduler /// Gets the sole instance of the global scheduler
Kernel::GlobalSchedulerContext& GlobalSchedulerContext(); Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
@ -143,6 +160,10 @@ public:
const Core::ExclusiveMonitor& GetExclusiveMonitor() const; const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
KAutoObjectWithListContainer& ObjectListContainer();
const KAutoObjectWithListContainer& ObjectListContainer() const;
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
@ -152,7 +173,7 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
/// Adds a port to the named port table /// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port); void AddNamedPort(std::string name, KClientPort* port);
/// Finds a port within the named port table with the given name. /// Finds a port within the named port table with the given name.
NamedPortTable::iterator FindNamedPort(const std::string& name); NamedPortTable::iterator FindNamedPort(const std::string& name);
@ -225,9 +246,10 @@ public:
/** /**
* Creates an HLE service thread, which are used to execute service routines asynchronously. * Creates an HLE service thread, which are used to execute service routines asynchronously.
* While these are allocated per ServerSession, these need to be owned and managed outside of * While these are allocated per ServerSession, these need to be owned and managed outside
* ServerSession to avoid a circular dependency. * of ServerSession to avoid a circular dependency.
* @param name String name for the ServerSession creating this thread, used for debug purposes. * @param name String name for the ServerSession creating this thread, used for debug
* purposes.
* @returns The a weak pointer newly created service thread. * @returns The a weak pointer newly created service thread.
*/ */
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
@ -243,9 +265,45 @@ public:
bool IsPhantomModeForSingleCore() const; bool IsPhantomModeForSingleCore() const;
void SetIsPhantomModeForSingleCore(bool value); void SetIsPhantomModeForSingleCore(bool value);
Core::System& System();
const Core::System& System() const;
/// Gets the slab heap for the specified kernel object type.
template <typename T>
KSlabHeap<T>& SlabHeap() {
if constexpr (std::is_same_v<T, KClientSession>) {
return slab_heap_container->client_session;
} else if constexpr (std::is_same_v<T, KEvent>) {
return slab_heap_container->event;
} else if constexpr (std::is_same_v<T, KLinkedListNode>) {
return slab_heap_container->linked_list_node;
} else if constexpr (std::is_same_v<T, KPort>) {
return slab_heap_container->port;
} else if constexpr (std::is_same_v<T, KProcess>) {
return slab_heap_container->process;
} else if constexpr (std::is_same_v<T, KResourceLimit>) {
return slab_heap_container->resource_limit;
} else if constexpr (std::is_same_v<T, KSession>) {
return slab_heap_container->session;
} else if constexpr (std::is_same_v<T, KSharedMemory>) {
return slab_heap_container->shared_memory;
} else if constexpr (std::is_same_v<T, KThread>) {
return slab_heap_container->thread;
} else if constexpr (std::is_same_v<T, KTransferMemory>) {
return slab_heap_container->transfer_memory;
} else if constexpr (std::is_same_v<T, KWritableEvent>) {
return slab_heap_container->writeable_event;
}
}
/// Gets the current slab resource counts.
Init::KSlabResourceCounts& SlabResourceCounts();
/// Gets the current slab resource counts.
const Init::KSlabResourceCounts& SlabResourceCounts() const;
private: private:
friend class Object; friend class KProcess;
friend class Process;
friend class KThread; friend class KThread;
/// Creates a new object ID, incrementing the internal object ID counter. /// Creates a new object ID, incrementing the internal object ID counter.
@ -261,14 +319,33 @@ private:
u64 CreateNewThreadID(); u64 CreateNewThreadID();
/// Provides a reference to the global handle table. /// Provides a reference to the global handle table.
Kernel::HandleTable& GlobalHandleTable(); KHandleTable& GlobalHandleTable();
/// Provides a const reference to the global handle table. /// Provides a const reference to the global handle table.
const Kernel::HandleTable& GlobalHandleTable() const; const KHandleTable& GlobalHandleTable() const;
struct Impl; struct Impl;
std::unique_ptr<Impl> impl; std::unique_ptr<Impl> impl;
bool exception_exited{}; bool exception_exited{};
private:
/// Helper to encapsulate all slab heaps in a single heap allocated container
struct SlabHeapContainer {
KSlabHeap<KClientSession> client_session;
KSlabHeap<KEvent> event;
KSlabHeap<KLinkedListNode> linked_list_node;
KSlabHeap<KPort> port;
KSlabHeap<KProcess> process;
KSlabHeap<KResourceLimit> resource_limit;
KSlabHeap<KSession> session;
KSlabHeap<KSharedMemory> shared_memory;
KSlabHeap<KThread> thread;
KSlabHeap<KTransferMemory> transfer_memory;
KSlabHeap<KWritableEvent> writeable_event;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;
}; };
} // namespace Kernel } // namespace Kernel

@ -1,42 +0,0 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
namespace Kernel {
Object::Object(KernelCore& kernel_)
: kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{"[UNKNOWN KERNEL OBJECT]"} {}
Object::Object(KernelCore& kernel_, std::string&& name_)
: kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{std::move(name_)} {}
Object::~Object() = default;
bool Object::IsWaitable() const {
switch (GetHandleType()) {
case HandleType::ReadableEvent:
case HandleType::Thread:
case HandleType::Process:
case HandleType::ServerPort:
case HandleType::ServerSession:
return true;
case HandleType::Unknown:
case HandleType::Event:
case HandleType::WritableEvent:
case HandleType::SharedMemory:
case HandleType::TransferMemory:
case HandleType::ResourceLimit:
case HandleType::ClientPort:
case HandleType::ClientSession:
case HandleType::Session:
return false;
}
UNREACHABLE();
return false;
}
} // namespace Kernel

@ -1,96 +0,0 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include <memory>
#include <string>
#include "common/common_types.h"
namespace Kernel {
class KernelCore;
using Handle = u32;
enum class HandleType : u32 {
Unknown,
Event,
WritableEvent,
ReadableEvent,
SharedMemory,
TransferMemory,
Thread,
Process,
ResourceLimit,
ClientPort,
ServerPort,
ClientSession,
ServerSession,
Session,
};
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
public:
explicit Object(KernelCore& kernel_);
explicit Object(KernelCore& kernel_, std::string&& name_);
virtual ~Object();
/// Returns a unique identifier for the object. For debugging purposes only.
u32 GetObjectId() const {
return object_id.load(std::memory_order_relaxed);
}
virtual std::string GetTypeName() const {
return "[BAD KERNEL OBJECT TYPE]";
}
virtual std::string GetName() const {
return name;
}
virtual HandleType GetHandleType() const = 0;
void Close() {
// TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
// when we implement KAutoObject instead of using shared_ptr.
}
/**
* Check if a thread can wait on the object
* @return True if a thread can wait on the object, otherwise false
*/
bool IsWaitable() const;
virtual void Finalize() = 0;
protected:
/// The kernel instance this object was created under.
KernelCore& kernel;
private:
std::atomic<u32> object_id{0};
std::string name;
};
template <typename T>
std::shared_ptr<T> SharedFrom(T* raw) {
if (raw == nullptr)
return nullptr;
return std::static_pointer_cast<T>(raw->shared_from_this());
}
/**
* Attempts to downcast the given Object pointer to a pointer to T.
* @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.
*/
template <typename T>
inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) {
if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
return std::static_pointer_cast<T>(object);
}
return nullptr;
}
} // namespace Kernel

@ -6,7 +6,7 @@
#include "common/bit_util.h" #include "common/bit_util.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
@ -99,7 +99,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
interrupt_capabilities.set(); interrupt_capabilities.set();
// Allow using the maximum possible amount of handles // Allow using the maximum possible amount of handles
handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT); handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
// Allow all debugging capabilities. // Allow all debugging capabilities.
is_debuggable = true; is_debuggable = true;
@ -159,7 +159,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
const auto type = GetCapabilityType(flag); const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) { if (type == CapabilityType::Unset) {
return ResultInvalidCapabilityDescriptor; return ResultInvalidArgument;
} }
// Bail early on ignorable entries, as one would expect, // Bail early on ignorable entries, as one would expect,
@ -202,7 +202,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
} }
LOG_ERROR(Kernel, "Invalid capability type! type={}", type); LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
return ResultInvalidCapabilityDescriptor; return ResultInvalidArgument;
} }
void ProcessCapabilities::Clear() { void ProcessCapabilities::Clear() {
@ -225,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) { if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}", LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask); priority_mask, core_mask);
return ResultInvalidCapabilityDescriptor; return ResultInvalidArgument;
} }
const u32 core_num_min = (flags >> 16) & 0xFF; const u32 core_num_min = (flags >> 16) & 0xFF;
@ -329,7 +329,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17; const u32 reserved = flags >> 17;
if (reserved != 0) { if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ResultReservedValue; return ResultReservedUsed;
} }
program_type = static_cast<ProgramType>((flags >> 14) & 0b111); program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
@ -349,7 +349,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
LOG_ERROR(Kernel, LOG_ERROR(Kernel,
"Kernel version is non zero or flags are too small! major_version={}, flags={}", "Kernel version is non zero or flags are too small! major_version={}, flags={}",
major_version, flags); major_version, flags);
return ResultInvalidCapabilityDescriptor; return ResultInvalidArgument;
} }
kernel_version = flags; kernel_version = flags;
@ -360,7 +360,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26; const u32 reserved = flags >> 26;
if (reserved != 0) { if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ResultReservedValue; return ResultReservedUsed;
} }
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
@ -371,7 +371,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19; const u32 reserved = flags >> 19;
if (reserved != 0) { if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ResultReservedValue; return ResultReservedUsed;
} }
is_debuggable = (flags & 0x20000) != 0; is_debuggable = (flags & 0x20000) != 0;

@ -1,54 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <tuple>
#include "common/assert.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ServerPort::~ServerPort() = default;
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
if (pending_sessions.empty()) {
return ResultNotFound;
}
auto session = std::move(pending_sessions.back());
pending_sessions.pop_back();
return MakeResult(std::move(session));
}
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
pending_sessions.push_back(std::move(pending_session));
if (pending_sessions.size() == 1) {
NotifyAvailable();
}
}
bool ServerPort::IsSignaled() const {
return !pending_sessions.empty();
}
ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions,
std::string name) {
std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel);
std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel);
server_port->name = name + "_Server";
client_port->name = name + "_Client";
client_port->server_port = server_port;
client_port->max_sessions = max_sessions;
client_port->active_sessions = 0;
return std::make_pair(std::move(server_port), std::move(client_port));
}
} // namespace Kernel

@ -1,98 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class ClientPort;
class KernelCore;
class ServerSession;
class SessionRequestHandler;
class ServerPort final : public KSynchronizationObject {
public:
explicit ServerPort(KernelCore& kernel);
~ServerPort() override;
using HLEHandler = std::shared_ptr<SessionRequestHandler>;
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
/**
* Creates a pair of ServerPort and an associated ClientPort.
*
* @param kernel The kernel instance to create the port pair under.
* @param max_sessions Maximum number of sessions to the port
* @param name Optional name of the ports
* @return The created port tuple
*/
static PortPair CreatePortPair(KernelCore& kernel, u32 max_sessions,
std::string name = "UnknownPort");
std::string GetTypeName() const override {
return "ServerPort";
}
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ServerPort;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Accepts a pending incoming connection on this port. If there are no pending sessions, will
* return ERR_NO_PENDING_SESSIONS.
*/
ResultVal<std::shared_ptr<ServerSession>> Accept();
/// Whether or not this server port has an HLE handler available.
bool HasHLEHandler() const {
return hle_handler != nullptr;
}
/// Gets the HLE handler for this port.
HLEHandler GetHLEHandler() const {
return hle_handler;
}
/**
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
void SetHleHandler(HLEHandler hle_handler_) {
hle_handler = std::move(hle_handler_);
}
/// Appends a ServerSession to the collection of ServerSessions
/// waiting to be accepted by this port.
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
bool IsSignaled() const override;
void Finalize() override {}
private:
/// ServerSessions waiting to be accepted by the port
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
/// This session's HLE request handler template (optional)
/// ServerSessions created from this port inherit a reference to this handler.
HLEHandler hle_handler;
/// Name of the port (optional)
std::string name;
};
} // namespace Kernel

@ -13,8 +13,8 @@
#include "common/scope_exit.h" #include "common/scope_exit.h"
#include "common/thread.h" #include "common/thread.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/service_thread.h"
#include "core/hle/lock.h" #include "core/hle/lock.h"
#include "video_core/renderer_base.h" #include "video_core/renderer_base.h"
@ -26,7 +26,7 @@ public:
explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name); explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
~Impl(); ~Impl();
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context); void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
private: private:
std::vector<std::thread> threads; std::vector<std::thread> threads;
@ -69,18 +69,27 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
}); });
} }
void ServiceThread::Impl::QueueSyncRequest(ServerSession& session, void ServiceThread::Impl::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) { std::shared_ptr<HLERequestContext>&& context) {
{ {
std::unique_lock lock{queue_mutex}; std::unique_lock lock{queue_mutex};
// ServerSession owns the service thread, so we cannot caption a strong pointer here in the // Open a reference to the session to ensure it is not closes while the service request
// event that the ServerSession is terminated. // completes asynchronously.
std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)}; session.Open();
requests.emplace([weak_ptr, context{std::move(context)}]() {
if (auto strong_ptr = weak_ptr.lock()) { requests.emplace([session_ptr{&session}, context{std::move(context)}]() {
strong_ptr->CompleteSyncRequest(*context); // Close the reference.
SCOPE_EXIT({ session_ptr->Close(); });
// If the session has been closed, we are done.
if (session_ptr->IsServerClosed()) {
return;
} }
// Complete the service request.
KScopedAutoObject server_session{&session_ptr->GetServerSession()};
server_session->CompleteSyncRequest(*context);
}); });
} }
condition.notify_one(); condition.notify_one();
@ -102,7 +111,7 @@ ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const
ServiceThread::~ServiceThread() = default; ServiceThread::~ServiceThread() = default;
void ServiceThread::QueueSyncRequest(ServerSession& session, void ServiceThread::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) { std::shared_ptr<HLERequestContext>&& context) {
impl->QueueSyncRequest(session, std::move(context)); impl->QueueSyncRequest(session, std::move(context));
} }

@ -11,14 +11,14 @@ namespace Kernel {
class HLERequestContext; class HLERequestContext;
class KernelCore; class KernelCore;
class ServerSession; class KSession;
class ServiceThread final { class ServiceThread final {
public: public:
explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name); explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
~ServiceThread(); ~ServiceThread();
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context); void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
private: private:
class Impl; class Impl;

@ -1,41 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
namespace Kernel {
Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
Session::~Session() {
// Release reserved resource when the Session pair was created.
kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
}
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
LimitableResource::Sessions);
ASSERT(session_reservation.Succeeded());
auto session{std::make_shared<Session>(kernel)};
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
session->name = std::move(name);
session->client = client_session;
session->server = server_session;
session_reservation.Commit();
return std::make_pair(std::move(client_session), std::move(server_session));
}
bool Session::IsSignaled() const {
UNIMPLEMENTED();
return true;
}
} // namespace Kernel

@ -1,64 +0,0 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <utility>
#include "core/hle/kernel/k_synchronization_object.h"
namespace Kernel {
class ClientSession;
class ServerSession;
/**
* Parent structure to link the client and server endpoints of a session with their associated
* client port.
*/
class Session final : public KSynchronizationObject {
public:
explicit Session(KernelCore& kernel);
~Session() override;
using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>;
static SessionPair Create(KernelCore& kernel, std::string name = "Unknown");
std::string GetName() const override {
return name;
}
static constexpr HandleType HANDLE_TYPE = HandleType::Session;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
bool IsSignaled() const override;
void Finalize() override {}
std::shared_ptr<ClientSession> Client() {
if (auto result{client.lock()}) {
return result;
}
return {};
}
std::shared_ptr<ServerSession> Server() {
if (auto result{server.lock()}) {
return result;
}
return {};
}
private:
std::string name;
std::weak_ptr<ClientSession> client;
std::weak_ptr<ServerSession> server;
};
} // namespace Kernel

@ -0,0 +1,148 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_auto_object_container.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
template <class Derived>
class KSlabAllocated {
public:
constexpr KSlabAllocated() = default;
size_t GetSlabIndex(KernelCore& kernel) const {
return kernel.SlabHeap<Derived>().GetIndex(static_cast<const Derived*>(this));
}
public:
static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) {
kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
}
static Derived* Allocate(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().Allocate();
}
static void Free(KernelCore& kernel, Derived* obj) {
kernel.SlabHeap<Derived>().Free(obj);
}
static size_t GetObjectSize(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetObjectSize();
}
static size_t GetSlabHeapSize(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapSize();
}
static size_t GetPeakIndex(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetPeakIndex();
}
static uintptr_t GetSlabHeapAddress(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
}
static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
};
template <typename Derived, typename Base>
class KAutoObjectWithSlabHeapAndContainer : public Base {
static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
private:
static Derived* Allocate(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
}
static void Free(KernelCore& kernel, Derived* obj) {
kernel.SlabHeap<Derived>().Free(obj);
}
public:
KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
virtual ~KAutoObjectWithSlabHeapAndContainer() {}
virtual void Destroy() override {
const bool is_initialized = this->IsInitialized();
uintptr_t arg = 0;
if (is_initialized) {
kernel.ObjectListContainer().Unregister(this);
arg = this->GetPostDestroyArgument();
this->Finalize();
}
Free(kernel, static_cast<Derived*>(this));
if (is_initialized) {
Derived::PostDestroy(arg);
}
}
virtual bool IsInitialized() const {
return true;
}
virtual uintptr_t GetPostDestroyArgument() const {
return 0;
}
size_t GetSlabIndex() const {
return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
}
public:
static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) {
kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
kernel.ObjectListContainer().Initialize();
}
static Derived* Create(KernelCore& kernel) {
Derived* obj = Allocate(kernel);
if (obj != nullptr) {
KAutoObject::Create(obj);
}
return obj;
}
static void Register(KernelCore& kernel, Derived* obj) {
return kernel.ObjectListContainer().Register(obj);
}
static size_t GetObjectSize(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetObjectSize();
}
static size_t GetSlabHeapSize(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapSize();
}
static size_t GetPeakIndex(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetPeakIndex();
}
static uintptr_t GetSlabHeapAddress(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
}
static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
protected:
KernelCore& kernel;
};
} // namespace Kernel

File diff suppressed because it is too large Load Diff

@ -6,9 +6,24 @@
#include "common/common_types.h" #include "common/common_types.h"
namespace Kernel {
using Handle = u32;
}
namespace Kernel::Svc { namespace Kernel::Svc {
constexpr s32 ArgumentHandleCountMax = 0x40; constexpr s32 ArgumentHandleCountMax = 0x40;
constexpr u32 HandleWaitMask{1u << 30}; constexpr u32 HandleWaitMask{1u << 30};
constexpr inline Handle InvalidHandle = Handle(0);
enum PseudoHandle : Handle {
CurrentThread = 0xFFFF8000,
CurrentProcess = 0xFFFF8001,
};
constexpr bool IsPseudoHandle(Handle handle) {
return handle == PseudoHandle::CurrentProcess || handle == PseudoHandle::CurrentThread;
}
} // namespace Kernel::Svc } // namespace Kernel::Svc

@ -10,18 +10,18 @@ namespace Kernel {
// Confirmed Switch kernel error codes // Confirmed Switch kernel error codes
constexpr ResultCode ResultMaxConnectionsReached{ErrorModule::Kernel, 7}; constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
constexpr ResultCode ResultInvalidCapabilityDescriptor{ErrorModule::Kernel, 14}; constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57}; constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59}; constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101}; constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102}; constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103}; constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104}; constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
constexpr ResultCode ResultHandleTableFull{ErrorModule::Kernel, 105}; constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106}; constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
constexpr ResultCode ResultInvalidMemoryPermissions{ErrorModule::Kernel, 108}; constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
constexpr ResultCode ResultInvalidMemoryRange{ErrorModule::Kernel, 110}; constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112}; constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113}; constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114}; constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
@ -33,9 +33,11 @@ constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120}; constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121}; constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122}; constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
constexpr ResultCode ResultSessionClosedByRemote{ErrorModule::Kernel, 123}; constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125}; constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
constexpr ResultCode ResultReservedValue{ErrorModule::Kernel, 126}; constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
constexpr ResultCode ResultResourceLimitedExceeded{ErrorModule::Kernel, 132}; constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel } // namespace Kernel

@ -154,15 +154,28 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval); FuncReturn(system, retval);
} }
// Used by GetResourceLimitLimitValue.
template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
static_cast<LimitableResource>(Param(system, 2)))
.raw;
system.CurrentArmInterface().SetReg(1, param_1);
FuncReturn(system, retval);
}
template <ResultCode func(Core::System&, u32, u64)> template <ResultCode func(Core::System&, u32, u64)>
void SvcWrap64(Core::System& system) { void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw); FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
} }
template <ResultCode func(Core::System&, u32, u32, u64)> // Used by SetResourceLimitLimitValue
template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
void SvcWrap64(Core::System& system) { void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), Param(system, 2)) static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
.raw); .raw);
} }
@ -219,10 +232,11 @@ void SvcWrap64(Core::System& system) {
func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw); func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw);
} }
template <ResultCode func(Core::System&, u32, u64, u64, u32)> // Used by MapSharedMemory
template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) { void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
Param(system, 2), static_cast<u32>(Param(system, 3))) Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw); .raw);
} }
@ -252,11 +266,13 @@ void SvcWrap64(Core::System& system) {
.raw); .raw);
} }
template <ResultCode func(Core::System&, u64*, u64, u64, u64)> // Used by GetInfo
template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
void SvcWrap64(Core::System& system) { void SvcWrap64(Core::System& system) {
u64 param_1 = 0; u64 param_1 = 0;
const u32 retval = const u32 retval = func(system, &param_1, Param(system, 1),
func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3)).raw; static_cast<Handle>(Param(system, 2)), Param(system, 3))
.raw;
system.CurrentArmInterface().SetReg(1, param_1); system.CurrentArmInterface().SetReg(1, param_1);
FuncReturn(system, retval); FuncReturn(system, retval);
@ -273,11 +289,12 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval); FuncReturn(system, retval);
} }
template <ResultCode func(Core::System&, u32*, u64, u64, u32)> // Used by CreateTransferMemory
template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) { void SvcWrap64(Core::System& system) {
u32 param_1 = 0; u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2), const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
static_cast<u32>(Param(system, 3))) static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw; .raw;
system.CurrentArmInterface().SetReg(1, param_1); system.CurrentArmInterface().SetReg(1, param_1);
@ -537,6 +554,16 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval); FuncReturn(system, retval);
} }
// Used by MapSharedMemory32
template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw;
FuncReturn(system, retval);
}
// Used by SetThreadCoreMask32 // Used by SetThreadCoreMask32
template <ResultCode func(Core::System&, Handle, s32, u32, u32)> template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
void SvcWrap32(Core::System& system) { void SvcWrap32(Core::System& system) {
@ -586,11 +613,12 @@ void SvcWrap32(Core::System& system) {
} }
// Used by CreateTransferMemory32 // Used by CreateTransferMemory32
template <ResultCode func(Core::System&, Handle*, u32, u32, u32)> template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) { void SvcWrap32(Core::System& system) {
Handle handle = 0; Handle handle = 0;
const u32 retval = const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw; static_cast<Svc::MemoryPermission>(Param32(system, 3)))
.raw;
system.CurrentArmInterface().SetReg(1, handle); system.CurrentArmInterface().SetReg(1, handle);
FuncReturn(system, retval); FuncReturn(system, retval);
} }

@ -6,7 +6,6 @@
#include "core/core.h" #include "core/core.h"
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/core_timing_util.h" #include "core/core_timing_util.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
@ -15,14 +14,10 @@
namespace Kernel { namespace Kernel {
TimeManager::TimeManager(Core::System& system_) : system{system_} { TimeManager::TimeManager(Core::System& system_) : system{system_} {
time_manager_event_type = Core::Timing::CreateEvent( time_manager_event_type =
"Kernel::TimeManagerCallback", Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
std::shared_ptr<KThread> thread; KThread* thread = reinterpret_cast<KThread*>(thread_handle);
{
std::lock_guard lock{mutex};
thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
}
thread->Wakeup(); thread->Wakeup();
}); });
} }

@ -8,8 +8,6 @@
#include <mutex> #include <mutex>
#include <unordered_map> #include <unordered_map>
#include "core/hle/kernel/object.h"
namespace Core { namespace Core {
class System; class System;
} // namespace Core } // namespace Core

@ -16,8 +16,8 @@
#include "core/file_sys/control_metadata.h" #include "core/file_sys/control_metadata.h"
#include "core/file_sys/patch_manager.h" #include "core/file_sys/patch_manager.h"
#include "core/hle/ipc_helpers.h" #include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/acc/acc.h" #include "core/hle/service/acc/acc.h"
#include "core/hle/service/acc/acc_aa.h" #include "core/hle/service/acc/acc_aa.h"
#include "core/hle/service/acc/acc_su.h" #include "core/hle/service/acc/acc_su.h"

@ -15,11 +15,11 @@
#include "core/file_sys/savedata_factory.h" #include "core/file_sys/savedata_factory.h"
#include "core/hle/ipc_helpers.h" #include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/transfer_memory.h"
#include "core/hle/service/acc/profile_manager.h" #include "core/hle/service/acc/profile_manager.h"
#include "core/hle/service/am/am.h" #include "core/hle/service/am/am.h"
#include "core/hle/service/am/applet_ae.h" #include "core/hle/service/am/applet_ae.h"
@ -42,6 +42,7 @@
#include "core/hle/service/set/set.h" #include "core/hle/service/set/set.h"
#include "core/hle/service/sm/sm.h" #include "core/hle/service/sm/sm.h"
#include "core/hle/service/vi/vi.h" #include "core/hle/service/vi/vi.h"
#include "core/memory.h"
namespace Service::AM { namespace Service::AM {
@ -253,7 +254,8 @@ IDebugFunctions::IDebugFunctions(Core::System& system_)
IDebugFunctions::~IDebugFunctions() = default; IDebugFunctions::~IDebugFunctions() = default;
ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nvflinger_) ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nvflinger_)
: ServiceFramework{system_, "ISelfController"}, nvflinger{nvflinger_} { : ServiceFramework{system_, "ISelfController"}, nvflinger{nvflinger_},
launchable_event{system.Kernel()}, accumulated_suspended_tick_changed_event{system.Kernel()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{0, &ISelfController::Exit, "Exit"}, {0, &ISelfController::Exit, "Exit"},
@ -306,19 +308,20 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv
RegisterHandlers(functions); RegisterHandlers(functions);
auto& kernel = system.Kernel(); Kernel::KAutoObject::Create(std::addressof(launchable_event));
launchable_event = Kernel::KEvent::Create(kernel, "ISelfController:LaunchableEvent");
launchable_event->Initialize(); launchable_event.Initialize("ISelfController:LaunchableEvent");
// This event is created by AM on the first time GetAccumulatedSuspendedTickChangedEvent() is // This event is created by AM on the first time GetAccumulatedSuspendedTickChangedEvent() is
// called. Yuzu can just create it unconditionally, since it doesn't need to support multiple // called. Yuzu can just create it unconditionally, since it doesn't need to support multiple
// ISelfControllers. The event is signaled on creation, and on transition from suspended -> not // ISelfControllers. The event is signaled on creation, and on transition from suspended -> not
// suspended if the event has previously been created by a call to // suspended if the event has previously been created by a call to
// GetAccumulatedSuspendedTickChangedEvent. // GetAccumulatedSuspendedTickChangedEvent.
accumulated_suspended_tick_changed_event =
Kernel::KEvent::Create(kernel, "ISelfController:AccumulatedSuspendedTickChangedEvent"); Kernel::KAutoObject::Create(std::addressof(accumulated_suspended_tick_changed_event));
accumulated_suspended_tick_changed_event->Initialize(); accumulated_suspended_tick_changed_event.Initialize(
accumulated_suspended_tick_changed_event->GetWritableEvent()->Signal(); "ISelfController:AccumulatedSuspendedTickChangedEvent");
accumulated_suspended_tick_changed_event.GetWritableEvent().Signal();
} }
ISelfController::~ISelfController() = default; ISelfController::~ISelfController() = default;
@ -377,11 +380,11 @@ void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) {
void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called"); LOG_WARNING(Service_AM, "(STUBBED) called");
launchable_event->GetWritableEvent()->Signal(); launchable_event.GetWritableEvent().Signal();
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(launchable_event->GetReadableEvent()); rb.PushCopyObjects(launchable_event.GetReadableEvent());
} }
void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) { void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) {
@ -560,7 +563,7 @@ void ISelfController::GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequest
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(accumulated_suspended_tick_changed_event->GetReadableEvent()); rb.PushCopyObjects(accumulated_suspended_tick_changed_event.GetReadableEvent());
} }
void ISelfController::SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx) { void ISelfController::SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx) {
@ -578,39 +581,40 @@ void ISelfController::SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestCo
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
} }
AppletMessageQueue::AppletMessageQueue(Kernel::KernelCore& kernel) { AppletMessageQueue::AppletMessageQueue(Kernel::KernelCore& kernel)
on_new_message = Kernel::KEvent::Create(kernel, "AMMessageQueue:OnMessageReceived"); : on_new_message{kernel}, on_operation_mode_changed{kernel} {
on_new_message->Initialize();
on_operation_mode_changed = Kernel::KAutoObject::Create(std::addressof(on_new_message));
Kernel::KEvent::Create(kernel, "AMMessageQueue:OperationModeChanged"); Kernel::KAutoObject::Create(std::addressof(on_operation_mode_changed));
on_operation_mode_changed->Initialize();
on_new_message.Initialize("AMMessageQueue:OnMessageReceived");
on_operation_mode_changed.Initialize("AMMessageQueue:OperationModeChanged");
} }
AppletMessageQueue::~AppletMessageQueue() = default; AppletMessageQueue::~AppletMessageQueue() = default;
const std::shared_ptr<Kernel::KReadableEvent>& AppletMessageQueue::GetMessageReceiveEvent() const { Kernel::KReadableEvent& AppletMessageQueue::GetMessageReceiveEvent() {
return on_new_message->GetReadableEvent(); return on_new_message.GetReadableEvent();
} }
const std::shared_ptr<Kernel::KReadableEvent>& AppletMessageQueue::GetOperationModeChangedEvent() Kernel::KReadableEvent& AppletMessageQueue::GetOperationModeChangedEvent() {
const { return on_operation_mode_changed.GetReadableEvent();
return on_operation_mode_changed->GetReadableEvent();
} }
void AppletMessageQueue::PushMessage(AppletMessage msg) { void AppletMessageQueue::PushMessage(AppletMessage msg) {
messages.push(msg); messages.push(msg);
on_new_message->GetWritableEvent()->Signal(); on_new_message.GetWritableEvent().Signal();
} }
AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() { AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() {
if (messages.empty()) { if (messages.empty()) {
on_new_message->GetWritableEvent()->Clear(); on_new_message.GetWritableEvent().Clear();
return AppletMessage::NoMessage; return AppletMessage::NoMessage;
} }
auto msg = messages.front(); auto msg = messages.front();
messages.pop(); messages.pop();
if (messages.empty()) { if (messages.empty()) {
on_new_message->GetWritableEvent()->Clear(); on_new_message.GetWritableEvent().Clear();
} }
return msg; return msg;
} }
@ -630,7 +634,7 @@ void AppletMessageQueue::FocusStateChanged() {
void AppletMessageQueue::OperationModeChanged() { void AppletMessageQueue::OperationModeChanged() {
PushMessage(AppletMessage::OperationModeChanged); PushMessage(AppletMessage::OperationModeChanged);
PushMessage(AppletMessage::PerformanceModeChanged); PushMessage(AppletMessage::PerformanceModeChanged);
on_operation_mode_changed->GetWritableEvent()->Signal(); on_operation_mode_changed.GetWritableEvent().Signal();
} }
ICommonStateGetter::ICommonStateGetter(Core::System& system_, ICommonStateGetter::ICommonStateGetter(Core::System& system_,
@ -927,11 +931,9 @@ private:
void GetAppletStateChangedEvent(Kernel::HLERequestContext& ctx) { void GetAppletStateChangedEvent(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called"); LOG_DEBUG(Service_AM, "called");
const auto event = applet->GetBroker().GetStateChangedEvent();
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(event); rb.PushCopyObjects(applet->GetBroker().GetStateChangedEvent());
} }
void IsCompleted(Kernel::HLERequestContext& ctx) { void IsCompleted(Kernel::HLERequestContext& ctx) {
@ -1213,16 +1215,16 @@ void ILibraryAppletCreator::CreateTransferMemoryStorage(Kernel::HLERequestContex
} }
auto transfer_mem = auto transfer_mem =
system.CurrentProcess()->GetHandleTable().Get<Kernel::TransferMemory>(handle); system.CurrentProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(handle);
if (transfer_mem == nullptr) { if (transfer_mem.IsNull()) {
LOG_ERROR(Service_AM, "transfer_mem is a nullptr for handle={:08X}", handle); LOG_ERROR(Service_AM, "transfer_mem is a nullptr for handle={:08X}", handle);
IPC::ResponseBuilder rb{ctx, 2}; IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_UNKNOWN); rb.Push(RESULT_UNKNOWN);
return; return;
} }
const u8* const mem_begin = transfer_mem->GetPointer(); const u8* const mem_begin = system.Memory().GetPointer(transfer_mem->GetSourceAddress());
const u8* const mem_end = mem_begin + transfer_mem->GetSize(); const u8* const mem_end = mem_begin + transfer_mem->GetSize();
std::vector<u8> memory{mem_begin, mem_end}; std::vector<u8> memory{mem_begin, mem_end};
@ -1247,16 +1249,16 @@ void ILibraryAppletCreator::CreateHandleStorage(Kernel::HLERequestContext& ctx)
} }
auto transfer_mem = auto transfer_mem =
system.CurrentProcess()->GetHandleTable().Get<Kernel::TransferMemory>(handle); system.CurrentProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(handle);
if (transfer_mem == nullptr) { if (transfer_mem.IsNull()) {
LOG_ERROR(Service_AM, "transfer_mem is a nullptr for handle={:08X}", handle); LOG_ERROR(Service_AM, "transfer_mem is a nullptr for handle={:08X}", handle);
IPC::ResponseBuilder rb{ctx, 2}; IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_UNKNOWN); rb.Push(RESULT_UNKNOWN);
return; return;
} }
const u8* const mem_begin = transfer_mem->GetPointer(); const u8* const mem_begin = system.Memory().GetPointer(transfer_mem->GetSourceAddress());
const u8* const mem_end = mem_begin + transfer_mem->GetSize(); const u8* const mem_end = mem_begin + transfer_mem->GetSize();
std::vector<u8> memory{mem_begin, mem_end}; std::vector<u8> memory{mem_begin, mem_end};
@ -1266,7 +1268,9 @@ void ILibraryAppletCreator::CreateHandleStorage(Kernel::HLERequestContext& ctx)
} }
IApplicationFunctions::IApplicationFunctions(Core::System& system_) IApplicationFunctions::IApplicationFunctions(Core::System& system_)
: ServiceFramework{system_, "IApplicationFunctions"} { : ServiceFramework{system_, "IApplicationFunctions"}, gpu_error_detected_event{system.Kernel()},
friend_invitation_storage_channel_event{system.Kernel()},
health_warning_disappeared_system_event{system.Kernel()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{1, &IApplicationFunctions::PopLaunchParameter, "PopLaunchParameter"}, {1, &IApplicationFunctions::PopLaunchParameter, "PopLaunchParameter"},
@ -1334,16 +1338,15 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
RegisterHandlers(functions); RegisterHandlers(functions);
auto& kernel = system.Kernel(); Kernel::KAutoObject::Create(std::addressof(gpu_error_detected_event));
gpu_error_detected_event = Kernel::KAutoObject::Create(std::addressof(friend_invitation_storage_channel_event));
Kernel::KEvent::Create(kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent"); Kernel::KAutoObject::Create(std::addressof(health_warning_disappeared_system_event));
gpu_error_detected_event->Initialize();
friend_invitation_storage_channel_event = gpu_error_detected_event.Initialize("IApplicationFunctions:GpuErrorDetectedSystemEvent");
Kernel::KEvent::Create(kernel, "IApplicationFunctions:FriendInvitationStorageChannelEvent"); friend_invitation_storage_channel_event.Initialize(
friend_invitation_storage_channel_event->Initialize(); "IApplicationFunctions:FriendInvitationStorageChannelEvent");
health_warning_disappeared_system_event = health_warning_disappeared_system_event.Initialize(
Kernel::KEvent::Create(kernel, "IApplicationFunctions:HealthWarningDisappearedSystemEvent"); "IApplicationFunctions:HealthWarningDisappearedSystemEvent");
health_warning_disappeared_system_event->Initialize();
} }
IApplicationFunctions::~IApplicationFunctions() = default; IApplicationFunctions::~IApplicationFunctions() = default;
@ -1740,7 +1743,7 @@ void IApplicationFunctions::GetGpuErrorDetectedSystemEvent(Kernel::HLERequestCon
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(gpu_error_detected_event->GetReadableEvent()); rb.PushCopyObjects(gpu_error_detected_event.GetReadableEvent());
} }
void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx) { void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx) {
@ -1748,7 +1751,7 @@ void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERe
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(friend_invitation_storage_channel_event->GetReadableEvent()); rb.PushCopyObjects(friend_invitation_storage_channel_event.GetReadableEvent());
} }
void IApplicationFunctions::TryPopFromFriendInvitationStorageChannel( void IApplicationFunctions::TryPopFromFriendInvitationStorageChannel(
@ -1764,7 +1767,7 @@ void IApplicationFunctions::GetHealthWarningDisappearedSystemEvent(Kernel::HLERe
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(health_warning_disappeared_system_event->GetReadableEvent()); rb.PushCopyObjects(health_warning_disappeared_system_event.GetReadableEvent());
} }
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
@ -1782,7 +1785,8 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
} }
IHomeMenuFunctions::IHomeMenuFunctions(Core::System& system_) IHomeMenuFunctions::IHomeMenuFunctions(Core::System& system_)
: ServiceFramework{system_, "IHomeMenuFunctions"} { : ServiceFramework{system_, "IHomeMenuFunctions"}, pop_from_general_channel_event{
system.Kernel()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{10, &IHomeMenuFunctions::RequestToGetForeground, "RequestToGetForeground"}, {10, &IHomeMenuFunctions::RequestToGetForeground, "RequestToGetForeground"},
@ -1803,9 +1807,8 @@ IHomeMenuFunctions::IHomeMenuFunctions(Core::System& system_)
RegisterHandlers(functions); RegisterHandlers(functions);
pop_from_general_channel_event = Kernel::KAutoObject::Create(std::addressof(pop_from_general_channel_event));
Kernel::KEvent::Create(system.Kernel(), "IHomeMenuFunctions:PopFromGeneralChannelEvent"); pop_from_general_channel_event.Initialize("IHomeMenuFunctions:PopFromGeneralChannelEvent");
pop_from_general_channel_event->Initialize();
} }
IHomeMenuFunctions::~IHomeMenuFunctions() = default; IHomeMenuFunctions::~IHomeMenuFunctions() = default;
@ -1822,7 +1825,7 @@ void IHomeMenuFunctions::GetPopFromGeneralChannelEvent(Kernel::HLERequestContext
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(pop_from_general_channel_event->GetReadableEvent()); rb.PushCopyObjects(pop_from_general_channel_event.GetReadableEvent());
} }
IGlobalStateController::IGlobalStateController(Core::System& system_) IGlobalStateController::IGlobalStateController(Core::System& system_)

@ -8,12 +8,12 @@
#include <memory> #include <memory>
#include <queue> #include <queue>
#include "core/hle/kernel/k_event.h"
#include "core/hle/service/service.h" #include "core/hle/service/service.h"
namespace Kernel { namespace Kernel {
class KernelCore; class KernelCore;
class KEvent; class KTransferMemory;
class TransferMemory;
} // namespace Kernel } // namespace Kernel
namespace Service::NVFlinger { namespace Service::NVFlinger {
@ -56,8 +56,8 @@ public:
explicit AppletMessageQueue(Kernel::KernelCore& kernel); explicit AppletMessageQueue(Kernel::KernelCore& kernel);
~AppletMessageQueue(); ~AppletMessageQueue();
const std::shared_ptr<Kernel::KReadableEvent>& GetMessageReceiveEvent() const; Kernel::KReadableEvent& GetMessageReceiveEvent();
const std::shared_ptr<Kernel::KReadableEvent>& GetOperationModeChangedEvent() const; Kernel::KReadableEvent& GetOperationModeChangedEvent();
void PushMessage(AppletMessage msg); void PushMessage(AppletMessage msg);
AppletMessage PopMessage(); AppletMessage PopMessage();
std::size_t GetMessageCount() const; std::size_t GetMessageCount() const;
@ -67,8 +67,8 @@ public:
private: private:
std::queue<AppletMessage> messages; std::queue<AppletMessage> messages;
std::shared_ptr<Kernel::KEvent> on_new_message; Kernel::KEvent on_new_message;
std::shared_ptr<Kernel::KEvent> on_operation_mode_changed; Kernel::KEvent on_operation_mode_changed;
}; };
class IWindowController final : public ServiceFramework<IWindowController> { class IWindowController final : public ServiceFramework<IWindowController> {
@ -156,8 +156,8 @@ private:
}; };
NVFlinger::NVFlinger& nvflinger; NVFlinger::NVFlinger& nvflinger;
std::shared_ptr<Kernel::KEvent> launchable_event; Kernel::KEvent launchable_event;
std::shared_ptr<Kernel::KEvent> accumulated_suspended_tick_changed_event; Kernel::KEvent accumulated_suspended_tick_changed_event;
u32 idle_time_detection_extension = 0; u32 idle_time_detection_extension = 0;
u64 num_fatal_sections_entered = 0; u64 num_fatal_sections_entered = 0;
@ -300,9 +300,9 @@ private:
bool launch_popped_application_specific = false; bool launch_popped_application_specific = false;
bool launch_popped_account_preselect = false; bool launch_popped_account_preselect = false;
s32 previous_program_index{-1}; s32 previous_program_index{-1};
std::shared_ptr<Kernel::KEvent> gpu_error_detected_event; Kernel::KEvent gpu_error_detected_event;
std::shared_ptr<Kernel::KEvent> friend_invitation_storage_channel_event; Kernel::KEvent friend_invitation_storage_channel_event;
std::shared_ptr<Kernel::KEvent> health_warning_disappeared_system_event; Kernel::KEvent health_warning_disappeared_system_event;
}; };
class IHomeMenuFunctions final : public ServiceFramework<IHomeMenuFunctions> { class IHomeMenuFunctions final : public ServiceFramework<IHomeMenuFunctions> {
@ -314,7 +314,7 @@ private:
void RequestToGetForeground(Kernel::HLERequestContext& ctx); void RequestToGetForeground(Kernel::HLERequestContext& ctx);
void GetPopFromGeneralChannelEvent(Kernel::HLERequestContext& ctx); void GetPopFromGeneralChannelEvent(Kernel::HLERequestContext& ctx);
std::shared_ptr<Kernel::KEvent> pop_from_general_channel_event; Kernel::KEvent pop_from_general_channel_event;
}; };
class IGlobalStateController final : public ServiceFramework<IGlobalStateController> { class IGlobalStateController final : public ServiceFramework<IGlobalStateController> {

@ -12,10 +12,8 @@
#include "core/frontend/applets/profile_select.h" #include "core/frontend/applets/profile_select.h"
#include "core/frontend/applets/software_keyboard.h" #include "core/frontend/applets/software_keyboard.h"
#include "core/frontend/applets/web_browser.h" #include "core/frontend/applets/web_browser.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/service/am/am.h" #include "core/hle/service/am/am.h"
#include "core/hle/service/am/applet_ae.h" #include "core/hle/service/am/applet_ae.h"
#include "core/hle/service/am/applet_oe.h" #include "core/hle/service/am/applet_oe.h"
@ -31,16 +29,16 @@
namespace Service::AM::Applets { namespace Service::AM::Applets {
AppletDataBroker::AppletDataBroker(Core::System& system_, LibraryAppletMode applet_mode_) AppletDataBroker::AppletDataBroker(Core::System& system_, LibraryAppletMode applet_mode_)
: system{system_}, applet_mode{applet_mode_} { : system{system_}, applet_mode{applet_mode_}, state_changed_event{system.Kernel()},
state_changed_event = pop_out_data_event{system.Kernel()}, pop_interactive_out_data_event{system.Kernel()} {
Kernel::KEvent::Create(system.Kernel(), "ILibraryAppletAccessor:StateChangedEvent");
state_changed_event->Initialize(); Kernel::KAutoObject::Create(std::addressof(state_changed_event));
pop_out_data_event = Kernel::KAutoObject::Create(std::addressof(pop_out_data_event));
Kernel::KEvent::Create(system.Kernel(), "ILibraryAppletAccessor:PopDataOutEvent"); Kernel::KAutoObject::Create(std::addressof(pop_interactive_out_data_event));
pop_out_data_event->Initialize();
pop_interactive_out_data_event = Kernel::KEvent::Create( state_changed_event.Initialize("ILibraryAppletAccessor:StateChangedEvent");
system.Kernel(), "ILibraryAppletAccessor:PopInteractiveDataOutEvent"); pop_out_data_event.Initialize("ILibraryAppletAccessor:PopDataOutEvent");
pop_interactive_out_data_event->Initialize(); pop_interactive_out_data_event.Initialize("ILibraryAppletAccessor:PopInteractiveDataOutEvent");
} }
AppletDataBroker::~AppletDataBroker() = default; AppletDataBroker::~AppletDataBroker() = default;
@ -67,7 +65,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopNormalDataToGame() {
auto out = std::move(out_channel.front()); auto out = std::move(out_channel.front());
out_channel.pop_front(); out_channel.pop_front();
pop_out_data_event->GetWritableEvent()->Clear(); pop_out_data_event.GetWritableEvent().Clear();
return out; return out;
} }
@ -86,7 +84,7 @@ std::shared_ptr<IStorage> AppletDataBroker::PopInteractiveDataToGame() {
auto out = std::move(out_interactive_channel.front()); auto out = std::move(out_interactive_channel.front());
out_interactive_channel.pop_front(); out_interactive_channel.pop_front();
pop_interactive_out_data_event->GetWritableEvent()->Clear(); pop_interactive_out_data_event.GetWritableEvent().Clear();
return out; return out;
} }
@ -105,7 +103,7 @@ void AppletDataBroker::PushNormalDataFromGame(std::shared_ptr<IStorage>&& storag
void AppletDataBroker::PushNormalDataFromApplet(std::shared_ptr<IStorage>&& storage) { void AppletDataBroker::PushNormalDataFromApplet(std::shared_ptr<IStorage>&& storage) {
out_channel.emplace_back(std::move(storage)); out_channel.emplace_back(std::move(storage));
pop_out_data_event->GetWritableEvent()->Signal(); pop_out_data_event.GetWritableEvent().Signal();
} }
void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage) { void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage) {
@ -114,11 +112,11 @@ void AppletDataBroker::PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& s
void AppletDataBroker::PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage) { void AppletDataBroker::PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage) {
out_interactive_channel.emplace_back(std::move(storage)); out_interactive_channel.emplace_back(std::move(storage));
pop_interactive_out_data_event->GetWritableEvent()->Signal(); pop_interactive_out_data_event.GetWritableEvent().Signal();
} }
void AppletDataBroker::SignalStateChanged() const { void AppletDataBroker::SignalStateChanged() {
state_changed_event->GetWritableEvent()->Signal(); state_changed_event.GetWritableEvent().Signal();
switch (applet_mode) { switch (applet_mode) {
case LibraryAppletMode::AllForeground: case LibraryAppletMode::AllForeground:
@ -142,16 +140,16 @@ void AppletDataBroker::SignalStateChanged() const {
} }
} }
std::shared_ptr<Kernel::KReadableEvent> AppletDataBroker::GetNormalDataEvent() const { Kernel::KReadableEvent& AppletDataBroker::GetNormalDataEvent() {
return pop_out_data_event->GetReadableEvent(); return pop_out_data_event.GetReadableEvent();
} }
std::shared_ptr<Kernel::KReadableEvent> AppletDataBroker::GetInteractiveDataEvent() const { Kernel::KReadableEvent& AppletDataBroker::GetInteractiveDataEvent() {
return pop_interactive_out_data_event->GetReadableEvent(); return pop_interactive_out_data_event.GetReadableEvent();
} }
std::shared_ptr<Kernel::KReadableEvent> AppletDataBroker::GetStateChangedEvent() const { Kernel::KReadableEvent& AppletDataBroker::GetStateChangedEvent() {
return state_changed_event->GetReadableEvent(); return state_changed_event.GetReadableEvent();
} }
Applet::Applet(Core::System& system_, LibraryAppletMode applet_mode_) Applet::Applet(Core::System& system_, LibraryAppletMode applet_mode_)

@ -8,7 +8,7 @@
#include <queue> #include <queue>
#include "common/swap.h" #include "common/swap.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/k_event.h"
union ResultCode; union ResultCode;
@ -95,11 +95,11 @@ public:
void PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage); void PushInteractiveDataFromGame(std::shared_ptr<IStorage>&& storage);
void PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage); void PushInteractiveDataFromApplet(std::shared_ptr<IStorage>&& storage);
void SignalStateChanged() const; void SignalStateChanged();
std::shared_ptr<Kernel::KReadableEvent> GetNormalDataEvent() const; Kernel::KReadableEvent& GetNormalDataEvent();
std::shared_ptr<Kernel::KReadableEvent> GetInteractiveDataEvent() const; Kernel::KReadableEvent& GetInteractiveDataEvent();
std::shared_ptr<Kernel::KReadableEvent> GetStateChangedEvent() const; Kernel::KReadableEvent& GetStateChangedEvent();
private: private:
Core::System& system; Core::System& system;
@ -119,13 +119,13 @@ private:
// PopInteractiveDataToGame and PushInteractiveDataFromApplet // PopInteractiveDataToGame and PushInteractiveDataFromApplet
std::deque<std::shared_ptr<IStorage>> out_interactive_channel; std::deque<std::shared_ptr<IStorage>> out_interactive_channel;
std::shared_ptr<Kernel::KEvent> state_changed_event; Kernel::KEvent state_changed_event;
// Signaled on PushNormalDataFromApplet // Signaled on PushNormalDataFromApplet
std::shared_ptr<Kernel::KEvent> pop_out_data_event; Kernel::KEvent pop_out_data_event;
// Signaled on PushInteractiveDataFromApplet // Signaled on PushInteractiveDataFromApplet
std::shared_ptr<Kernel::KEvent> pop_interactive_out_data_event; Kernel::KEvent pop_interactive_out_data_event;
}; };
class Applet { class Applet {

@ -9,7 +9,7 @@
#include "common/string_util.h" #include "common/string_util.h"
#include "core/core.h" #include "core/core.h"
#include "core/frontend/applets/error.h" #include "core/frontend/applets/error.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/service/am/am.h" #include "core/hle/service/am/am.h"
#include "core/hle/service/am/applets/error.h" #include "core/hle/service/am/applets/error.h"
#include "core/reporter.h" #include "core/reporter.h"

@ -9,7 +9,7 @@
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/core.h" #include "core/core.h"
#include "core/frontend/applets/general_frontend.h" #include "core/frontend/applets/general_frontend.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/hle/service/am/am.h" #include "core/hle/service/am/am.h"
#include "core/hle/service/am/applets/general_backend.h" #include "core/hle/service/am/applets/general_backend.h"

@ -17,7 +17,7 @@
#include "core/file_sys/system_archive/system_archive.h" #include "core/file_sys/system_archive/system_archive.h"
#include "core/file_sys/vfs_vector.h" #include "core/file_sys/vfs_vector.h"
#include "core/frontend/applets/web_browser.h" #include "core/frontend/applets/web_browser.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/hle/service/am/am.h" #include "core/hle/service/am/am.h"
#include "core/hle/service/am/applets/web_browser.h" #include "core/hle/service/am/applets/web_browser.h"

@ -16,10 +16,9 @@
#include "core/file_sys/patch_manager.h" #include "core/file_sys/patch_manager.h"
#include "core/file_sys/registered_cache.h" #include "core/file_sys/registered_cache.h"
#include "core/hle/ipc_helpers.h" #include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/aoc/aoc_u.h" #include "core/hle/service/aoc/aoc_u.h"
#include "core/loader/loader.h" #include "core/loader/loader.h"
@ -50,7 +49,7 @@ static std::vector<u64> AccumulateAOCTitleIDs(Core::System& system) {
class IPurchaseEventManager final : public ServiceFramework<IPurchaseEventManager> { class IPurchaseEventManager final : public ServiceFramework<IPurchaseEventManager> {
public: public:
explicit IPurchaseEventManager(Core::System& system_) explicit IPurchaseEventManager(Core::System& system_)
: ServiceFramework{system_, "IPurchaseEventManager"} { : ServiceFramework{system_, "IPurchaseEventManager"}, purchased_event{system.Kernel()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{0, &IPurchaseEventManager::SetDefaultDeliveryTarget, "SetDefaultDeliveryTarget"}, {0, &IPurchaseEventManager::SetDefaultDeliveryTarget, "SetDefaultDeliveryTarget"},
@ -63,9 +62,8 @@ public:
RegisterHandlers(functions); RegisterHandlers(functions);
purchased_event = Kernel::KAutoObject::Create(std::addressof(purchased_event));
Kernel::KEvent::Create(system.Kernel(), "IPurchaseEventManager:PurchasedEvent"); purchased_event.Initialize("IPurchaseEventManager:PurchasedEvent");
purchased_event->Initialize();
} }
private: private:
@ -98,14 +96,15 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(purchased_event->GetReadableEvent()); rb.PushCopyObjects(purchased_event.GetReadableEvent());
} }
std::shared_ptr<Kernel::KEvent> purchased_event; Kernel::KEvent purchased_event;
}; };
AOC_U::AOC_U(Core::System& system_) AOC_U::AOC_U(Core::System& system_)
: ServiceFramework{system_, "aoc:u"}, add_on_content{AccumulateAOCTitleIDs(system)} { : ServiceFramework{system_, "aoc:u"}, add_on_content{AccumulateAOCTitleIDs(system)},
aoc_change_event{system.Kernel()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{0, nullptr, "CountAddOnContentByApplicationId"}, {0, nullptr, "CountAddOnContentByApplicationId"},
@ -127,9 +126,8 @@ AOC_U::AOC_U(Core::System& system_)
RegisterHandlers(functions); RegisterHandlers(functions);
auto& kernel = system.Kernel(); Kernel::KAutoObject::Create(std::addressof(aoc_change_event));
aoc_change_event = Kernel::KEvent::Create(kernel, "GetAddOnContentListChanged:Event"); aoc_change_event.Initialize("GetAddOnContentListChanged:Event");
aoc_change_event->Initialize();
} }
AOC_U::~AOC_U() = default; AOC_U::~AOC_U() = default;
@ -256,7 +254,7 @@ void AOC_U::GetAddOnContentListChangedEvent(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(aoc_change_event->GetReadableEvent()); rb.PushCopyObjects(aoc_change_event.GetReadableEvent());
} }
void AOC_U::CreateEcPurchasedEventManager(Kernel::HLERequestContext& ctx) { void AOC_U::CreateEcPurchasedEventManager(Kernel::HLERequestContext& ctx) {

@ -4,6 +4,7 @@
#pragma once #pragma once
#include "core/hle/kernel/k_event.h"
#include "core/hle/service/service.h" #include "core/hle/service/service.h"
namespace Core { namespace Core {
@ -31,7 +32,7 @@ private:
void CreatePermanentEcPurchasedEventManager(Kernel::HLERequestContext& ctx); void CreatePermanentEcPurchasedEventManager(Kernel::HLERequestContext& ctx);
std::vector<u64> add_on_content; std::vector<u64> add_on_content;
std::shared_ptr<Kernel::KEvent> aoc_change_event; Kernel::KEvent aoc_change_event;
}; };
/// Registers all AOC services with the specified service manager. /// Registers all AOC services with the specified service manager.

@ -43,9 +43,9 @@ class IAudioOut final : public ServiceFramework<IAudioOut> {
public: public:
IAudioOut(Core::System& system_, AudoutParams audio_params_, AudioCore::AudioOut& audio_core_, IAudioOut(Core::System& system_, AudoutParams audio_params_, AudioCore::AudioOut& audio_core_,
std::string&& device_name_, std::string&& unique_name) std::string&& device_name_, std::string&& unique_name)
: ServiceFramework{system_, "IAudioOut"}, audio_core{audio_core_}, : ServiceFramework{system_, "IAudioOut"}, audio_core{audio_core_}, device_name{std::move(
device_name{std::move(device_name_)}, audio_params{audio_params_}, main_memory{ device_name_)},
system.Memory()} { audio_params{audio_params_}, buffer_event{system.Kernel()}, main_memory{system.Memory()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{0, &IAudioOut::GetAudioOutState, "GetAudioOutState"}, {0, &IAudioOut::GetAudioOutState, "GetAudioOutState"},
@ -67,13 +67,13 @@ public:
RegisterHandlers(functions); RegisterHandlers(functions);
// This is the event handle used to check if the audio buffer was released // This is the event handle used to check if the audio buffer was released
buffer_event = Kernel::KEvent::Create(system.Kernel(), "IAudioOutBufferReleased"); Kernel::KAutoObject::Create(std::addressof(buffer_event));
buffer_event->Initialize(); buffer_event.Initialize("IAudioOutBufferReleased");
stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate, stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate,
audio_params.channel_count, std::move(unique_name), [this] { audio_params.channel_count, std::move(unique_name), [this] {
const auto guard = LockService(); const auto guard = LockService();
buffer_event->GetWritableEvent()->Signal(); buffer_event.GetWritableEvent().Signal();
}); });
} }
@ -126,7 +126,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(buffer_event->GetReadableEvent()); rb.PushCopyObjects(buffer_event.GetReadableEvent());
} }
void AppendAudioOutBufferImpl(Kernel::HLERequestContext& ctx) { void AppendAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
@ -220,7 +220,7 @@ private:
[[maybe_unused]] AudoutParams audio_params{}; [[maybe_unused]] AudoutParams audio_params{};
/// This is the event handle used to check if the audio buffer was released /// This is the event handle used to check if the audio buffer was released
std::shared_ptr<Kernel::KEvent> buffer_event; Kernel::KEvent buffer_event;
Core::Memory::Memory& main_memory; Core::Memory::Memory& main_memory;
}; };

@ -30,7 +30,7 @@ public:
explicit IAudioRenderer(Core::System& system_, explicit IAudioRenderer(Core::System& system_,
const AudioCommon::AudioRendererParameter& audren_params, const AudioCommon::AudioRendererParameter& audren_params,
const std::size_t instance_number) const std::size_t instance_number)
: ServiceFramework{system_, "IAudioRenderer"} { : ServiceFramework{system_, "IAudioRenderer"}, system_event{system.Kernel()} {
// clang-format off // clang-format off
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{0, &IAudioRenderer::GetSampleRate, "GetSampleRate"}, {0, &IAudioRenderer::GetSampleRate, "GetSampleRate"},
@ -49,13 +49,13 @@ public:
// clang-format on // clang-format on
RegisterHandlers(functions); RegisterHandlers(functions);
system_event = Kernel::KEvent::Create(system.Kernel(), "IAudioRenderer:SystemEvent"); Kernel::KAutoObject::Create(std::addressof(system_event));
system_event->Initialize(); system_event.Initialize("IAudioRenderer:SystemEvent");
renderer = std::make_unique<AudioCore::AudioRenderer>( renderer = std::make_unique<AudioCore::AudioRenderer>(
system.CoreTiming(), system.Memory(), audren_params, system.CoreTiming(), system.Memory(), audren_params,
[this]() { [this]() {
const auto guard = LockService(); const auto guard = LockService();
system_event->GetWritableEvent()->Signal(); system_event.GetWritableEvent().Signal();
}, },
instance_number); instance_number);
} }
@ -128,7 +128,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(system_event->GetReadableEvent()); rb.PushCopyObjects(system_event.GetReadableEvent());
} }
void SetRenderingTimeLimit(Kernel::HLERequestContext& ctx) { void SetRenderingTimeLimit(Kernel::HLERequestContext& ctx) {
@ -162,7 +162,7 @@ private:
rb.Push(ERR_NOT_SUPPORTED); rb.Push(ERR_NOT_SUPPORTED);
} }
std::shared_ptr<Kernel::KEvent> system_event; Kernel::KEvent system_event;
std::unique_ptr<AudioCore::AudioRenderer> renderer; std::unique_ptr<AudioCore::AudioRenderer> renderer;
u32 rendering_time_limit_percent = 100; u32 rendering_time_limit_percent = 100;
}; };
@ -170,7 +170,9 @@ private:
class IAudioDevice final : public ServiceFramework<IAudioDevice> { class IAudioDevice final : public ServiceFramework<IAudioDevice> {
public: public:
explicit IAudioDevice(Core::System& system_, u32_le revision_num) explicit IAudioDevice(Core::System& system_, u32_le revision_num)
: ServiceFramework{system_, "IAudioDevice"}, revision{revision_num} { : ServiceFramework{system_, "IAudioDevice"}, revision{revision_num},
buffer_event{system.Kernel()}, audio_input_device_switch_event{system.Kernel()},
audio_output_device_switch_event{system.Kernel()} {
static const FunctionInfo functions[] = { static const FunctionInfo functions[] = {
{0, &IAudioDevice::ListAudioDeviceName, "ListAudioDeviceName"}, {0, &IAudioDevice::ListAudioDeviceName, "ListAudioDeviceName"},
{1, &IAudioDevice::SetAudioDeviceOutputVolume, "SetAudioDeviceOutputVolume"}, {1, &IAudioDevice::SetAudioDeviceOutputVolume, "SetAudioDeviceOutputVolume"},
@ -188,20 +190,17 @@ public:
}; };
RegisterHandlers(functions); RegisterHandlers(functions);
auto& kernel = system.Kernel(); Kernel::KAutoObject::Create(std::addressof(buffer_event));
buffer_event = Kernel::KEvent::Create(kernel, "IAudioOutBufferReleasedEvent"); buffer_event.Initialize("IAudioOutBufferReleasedEvent");
buffer_event->Initialize();
// Should be similar to audio_output_device_switch_event // Should be similar to audio_output_device_switch_event
audio_input_device_switch_event = Kernel::KAutoObject::Create(std::addressof(audio_input_device_switch_event));
Kernel::KEvent::Create(kernel, "IAudioDevice:AudioInputDeviceSwitchedEvent"); audio_input_device_switch_event.Initialize("IAudioDevice:AudioInputDeviceSwitchedEvent");
audio_input_device_switch_event->Initialize();
// Should only be signalled when an audio output device has been changed, example: speaker // Should only be signalled when an audio output device has been changed, example: speaker
// to headset // to headset
audio_output_device_switch_event = Kernel::KAutoObject::Create(std::addressof(audio_output_device_switch_event));
Kernel::KEvent::Create(kernel, "IAudioDevice:AudioOutputDeviceSwitchedEvent"); audio_output_device_switch_event.Initialize("IAudioDevice:AudioOutputDeviceSwitchedEvent");
audio_output_device_switch_event->Initialize();
} }
private: private:
@ -290,11 +289,11 @@ private:
void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) { void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Audio, "(STUBBED) called"); LOG_WARNING(Service_Audio, "(STUBBED) called");
buffer_event->GetWritableEvent()->Signal(); buffer_event.GetWritableEvent().Signal();
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(buffer_event->GetReadableEvent()); rb.PushCopyObjects(buffer_event.GetReadableEvent());
} }
void GetActiveChannelCount(Kernel::HLERequestContext& ctx) { void GetActiveChannelCount(Kernel::HLERequestContext& ctx) {
@ -311,7 +310,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(audio_input_device_switch_event->GetReadableEvent()); rb.PushCopyObjects(audio_input_device_switch_event.GetReadableEvent());
} }
void QueryAudioDeviceOutputEvent(Kernel::HLERequestContext& ctx) { void QueryAudioDeviceOutputEvent(Kernel::HLERequestContext& ctx) {
@ -319,13 +318,13 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 1}; IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS); rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(audio_output_device_switch_event->GetReadableEvent()); rb.PushCopyObjects(audio_output_device_switch_event.GetReadableEvent());
} }
u32_le revision = 0; u32_le revision = 0;
std::shared_ptr<Kernel::KEvent> buffer_event; Kernel::KEvent buffer_event;
std::shared_ptr<Kernel::KEvent> audio_input_device_switch_event; Kernel::KEvent audio_input_device_switch_event;
std::shared_ptr<Kernel::KEvent> audio_output_device_switch_event; Kernel::KEvent audio_output_device_switch_event;
}; // namespace Audio }; // namespace Audio

Some files were not shown because too many files have changed in this diff Show More