commit
b8f5c71f2d
@ -1 +1 @@
|
|||||||
Subproject commit f6ae9e1c3311b747b7b91fd903c62bf40b3b9c88
|
Subproject commit 57b987c185ae6677861cbf781f08ed1649b0543e
|
@ -0,0 +1,52 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <windows.h>
|
||||||
|
#else
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#if defined __APPLE__ || defined __FreeBSD__ || defined __OpenBSD__
|
||||||
|
#include <sys/sysctl.h>
|
||||||
|
#elif defined __HAIKU__
|
||||||
|
#include <OS.h>
|
||||||
|
#else
|
||||||
|
#include <sys/sysinfo.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/virtual_buffer.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
void* AllocateMemoryPages(std::size_t size) {
|
||||||
|
#ifdef _WIN32
|
||||||
|
void* base{VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE)};
|
||||||
|
#else
|
||||||
|
void* base{mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)};
|
||||||
|
|
||||||
|
if (base == MAP_FAILED) {
|
||||||
|
base = nullptr;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ASSERT(base);
|
||||||
|
|
||||||
|
return base;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeMemoryPages(void* base, std::size_t size) {
|
||||||
|
if (!base) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#ifdef _WIN32
|
||||||
|
ASSERT(VirtualFree(base, 0, MEM_RELEASE));
|
||||||
|
#else
|
||||||
|
ASSERT(munmap(base, size) == 0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
void* AllocateMemoryPages(std::size_t size);
|
||||||
|
void FreeMemoryPages(void* base, std::size_t size);
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class VirtualBuffer final : NonCopyable {
|
||||||
|
public:
|
||||||
|
constexpr VirtualBuffer() = default;
|
||||||
|
explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} {
|
||||||
|
base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
~VirtualBuffer() {
|
||||||
|
FreeMemoryPages(base_ptr, alloc_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void resize(std::size_t count) {
|
||||||
|
FreeMemoryPages(base_ptr, alloc_size);
|
||||||
|
|
||||||
|
alloc_size = count * sizeof(T);
|
||||||
|
base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const T& operator[](std::size_t index) const {
|
||||||
|
return base_ptr[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr T& operator[](std::size_t index) {
|
||||||
|
return base_ptr[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr T* data() {
|
||||||
|
return base_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const T* data() const {
|
||||||
|
return base_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t size() const {
|
||||||
|
return alloc_size / sizeof(T);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::size_t alloc_size{};
|
||||||
|
T* base_ptr{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Common
|
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/device_memory.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
|
||||||
|
DeviceMemory::DeviceMemory(System& system) : buffer{DramMemoryMap::Size}, system{system} {}
|
||||||
|
|
||||||
|
DeviceMemory::~DeviceMemory() = default;
|
||||||
|
|
||||||
|
} // namespace Core
|
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/virtual_buffer.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
|
||||||
|
class System;
|
||||||
|
|
||||||
|
namespace DramMemoryMap {
|
||||||
|
enum : u64 {
|
||||||
|
Base = 0x80000000ULL,
|
||||||
|
Size = 0x100000000ULL,
|
||||||
|
End = Base + Size,
|
||||||
|
KernelReserveBase = Base + 0x60000,
|
||||||
|
SlabHeapBase = KernelReserveBase + 0x85000,
|
||||||
|
SlapHeapSize = 0xa21000,
|
||||||
|
SlabHeapEnd = SlabHeapBase + SlapHeapSize,
|
||||||
|
};
|
||||||
|
}; // namespace DramMemoryMap
|
||||||
|
|
||||||
|
class DeviceMemory : NonCopyable {
|
||||||
|
public:
|
||||||
|
explicit DeviceMemory(Core::System& system);
|
||||||
|
~DeviceMemory();
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
PAddr GetPhysicalAddr(const T* ptr) const {
|
||||||
|
return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) +
|
||||||
|
DramMemoryMap::Base;
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* GetPointer(PAddr addr) {
|
||||||
|
return buffer.data() + (addr - DramMemoryMap::Base);
|
||||||
|
}
|
||||||
|
|
||||||
|
const u8* GetPointer(PAddr addr) const {
|
||||||
|
return buffer.data() + (addr - DramMemoryMap::Base);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Common::VirtualBuffer<u8> buffer;
|
||||||
|
Core::System& system;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Core
|
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphère, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "core/hle/kernel/memory/address_space_info.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
enum : u64 {
|
||||||
|
Size_1_MB = 0x100000,
|
||||||
|
Size_2_MB = 2 * Size_1_MB,
|
||||||
|
Size_128_MB = 128 * Size_1_MB,
|
||||||
|
Size_1_GB = 0x40000000,
|
||||||
|
Size_2_GB = 2 * Size_1_GB,
|
||||||
|
Size_4_GB = 4 * Size_1_GB,
|
||||||
|
Size_6_GB = 6 * Size_1_GB,
|
||||||
|
Size_64_GB = 64 * Size_1_GB,
|
||||||
|
Size_512_GB = 512 * Size_1_GB,
|
||||||
|
Invalid = std::numeric_limits<u64>::max(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
|
constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||||
|
{ 32 /*bit_width*/, Size_2_MB /*addr*/, Size_1_GB - Size_2_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, },
|
||||||
|
{ 32 /*bit_width*/, Size_1_GB /*addr*/, Size_4_GB - Size_1_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, },
|
||||||
|
{ 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Heap, },
|
||||||
|
{ 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Alias, },
|
||||||
|
{ 36 /*bit_width*/, Size_128_MB /*addr*/, Size_2_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, },
|
||||||
|
{ 36 /*bit_width*/, Size_2_GB /*addr*/, Size_64_GB - Size_2_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, },
|
||||||
|
{ 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, },
|
||||||
|
{ 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Alias, },
|
||||||
|
{ 39 /*bit_width*/, Size_128_MB /*addr*/, Size_512_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Large64Bit, },
|
||||||
|
{ 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Is32Bit },
|
||||||
|
{ 39 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, },
|
||||||
|
{ 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Alias, },
|
||||||
|
{ 39 /*bit_width*/, Invalid /*addr*/, Size_2_GB /*size*/, AddressSpaceInfo::Type::Stack, },
|
||||||
|
}};
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
|
constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
||||||
|
return index < std::size(AddressSpaceInfos) && AddressSpaceInfos[index].GetAddress() != Invalid;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t
|
||||||
|
AddressSpaceIndices32Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{
|
||||||
|
0, 1, 0, 2, 0, 3,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr std::size_t
|
||||||
|
AddressSpaceIndices36Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{
|
||||||
|
4, 5, 4, 6, 4, 7,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr std::size_t
|
||||||
|
AddressSpaceIndices39Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{
|
||||||
|
9, 8, 8, 10, 12, 11,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
|
||||||
|
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||||
|
type != AddressSpaceInfo::Type::Stack;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
|
||||||
|
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||||
|
type != AddressSpaceInfo::Type::Stack;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
|
||||||
|
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, AddressSpaceInfo::Type type) {
|
||||||
|
const std::size_t index{static_cast<std::size_t>(type)};
|
||||||
|
switch (width) {
|
||||||
|
case 32:
|
||||||
|
ASSERT(IsAllowed32BitType(type));
|
||||||
|
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
|
||||||
|
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetAddress();
|
||||||
|
case 36:
|
||||||
|
ASSERT(IsAllowed36BitType(type));
|
||||||
|
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
|
||||||
|
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetAddress();
|
||||||
|
case 39:
|
||||||
|
ASSERT(IsAllowed39BitType(type));
|
||||||
|
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
|
||||||
|
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetAddress();
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, AddressSpaceInfo::Type type) {
|
||||||
|
const std::size_t index{static_cast<std::size_t>(type)};
|
||||||
|
switch (width) {
|
||||||
|
case 32:
|
||||||
|
ASSERT(IsAllowed32BitType(type));
|
||||||
|
return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetSize();
|
||||||
|
case 36:
|
||||||
|
ASSERT(IsAllowed36BitType(type));
|
||||||
|
return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetSize();
|
||||||
|
case 39:
|
||||||
|
ASSERT(IsAllowed39BitType(type));
|
||||||
|
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetSize();
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphère, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class AddressSpaceInfo final : NonCopyable {
|
||||||
|
public:
|
||||||
|
enum class Type : u32 {
|
||||||
|
Is32Bit = 0,
|
||||||
|
Small64Bit = 1,
|
||||||
|
Large64Bit = 2,
|
||||||
|
Heap = 3,
|
||||||
|
Stack = 4,
|
||||||
|
Alias = 5,
|
||||||
|
Count,
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::size_t bit_width{};
|
||||||
|
std::size_t addr{};
|
||||||
|
std::size_t size{};
|
||||||
|
Type type{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
static u64 GetAddressSpaceStart(std::size_t width, Type type);
|
||||||
|
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
|
||||||
|
|
||||||
|
constexpr AddressSpaceInfo(std::size_t bit_width, std::size_t addr, std::size_t size, Type type)
|
||||||
|
: bit_width{bit_width}, addr{addr}, size{size}, type{type} {}
|
||||||
|
|
||||||
|
constexpr std::size_t GetWidth() const {
|
||||||
|
return bit_width;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetAddress() const {
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
constexpr Type GetType() const {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,318 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphère, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_types.h"
|
||||||
|
#include "core/hle/kernel/svc_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
enum class MemoryState : u32 {
|
||||||
|
None = 0,
|
||||||
|
Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF
|
||||||
|
All = ~None,
|
||||||
|
|
||||||
|
FlagCanReprotect = (1 << 8),
|
||||||
|
FlagCanDebug = (1 << 9),
|
||||||
|
FlagCanUseIpc = (1 << 10),
|
||||||
|
FlagCanUseNonDeviceIpc = (1 << 11),
|
||||||
|
FlagCanUseNonSecureIpc = (1 << 12),
|
||||||
|
FlagMapped = (1 << 13),
|
||||||
|
FlagCode = (1 << 14),
|
||||||
|
FlagCanAlias = (1 << 15),
|
||||||
|
FlagCanCodeAlias = (1 << 16),
|
||||||
|
FlagCanTransfer = (1 << 17),
|
||||||
|
FlagCanQueryPhysical = (1 << 18),
|
||||||
|
FlagCanDeviceMap = (1 << 19),
|
||||||
|
FlagCanAlignedDeviceMap = (1 << 20),
|
||||||
|
FlagCanIpcUserBuffer = (1 << 21),
|
||||||
|
FlagReferenceCounted = (1 << 22),
|
||||||
|
FlagCanMapProcess = (1 << 23),
|
||||||
|
FlagCanChangeAttribute = (1 << 24),
|
||||||
|
FlagCanCodeMemory = (1 << 25),
|
||||||
|
|
||||||
|
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||||
|
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
||||||
|
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
|
||||||
|
FlagReferenceCounted | FlagCanChangeAttribute,
|
||||||
|
|
||||||
|
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||||
|
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
|
||||||
|
FlagCanAlignedDeviceMap | FlagReferenceCounted,
|
||||||
|
|
||||||
|
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
|
||||||
|
|
||||||
|
Free = static_cast<u32>(Svc::MemoryState::Free),
|
||||||
|
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
|
||||||
|
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
||||||
|
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
||||||
|
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
||||||
|
FlagCanCodeMemory,
|
||||||
|
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
|
||||||
|
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
||||||
|
|
||||||
|
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
||||||
|
FlagCanCodeAlias,
|
||||||
|
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
|
||||||
|
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
|
||||||
|
|
||||||
|
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||||
|
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||||
|
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
ThreadLocal =
|
||||||
|
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
|
||||||
|
|
||||||
|
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
||||||
|
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||||
|
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
|
||||||
|
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
|
||||||
|
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
|
||||||
|
|
||||||
|
NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc |
|
||||||
|
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
NonDeviceIpc =
|
||||||
|
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
|
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
|
||||||
|
|
||||||
|
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
||||||
|
FlagReferenceCounted | FlagCanDebug,
|
||||||
|
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||||
|
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
|
||||||
|
static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
|
||||||
|
|
||||||
|
enum class MemoryPermission : u8 {
|
||||||
|
None = 0,
|
||||||
|
Mask = static_cast<u8>(~None),
|
||||||
|
|
||||||
|
Read = 1 << 0,
|
||||||
|
Write = 1 << 1,
|
||||||
|
Execute = 1 << 2,
|
||||||
|
|
||||||
|
ReadAndWrite = Read | Write,
|
||||||
|
ReadAndExecute = Read | Execute,
|
||||||
|
|
||||||
|
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||||
|
Svc::MemoryPermission::Execute),
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||||
|
|
||||||
|
enum class MemoryAttribute : u8 {
|
||||||
|
None = 0x00,
|
||||||
|
Mask = 0x7F,
|
||||||
|
All = Mask,
|
||||||
|
DontCareMask = 0x80,
|
||||||
|
|
||||||
|
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
||||||
|
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||||
|
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
|
||||||
|
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||||
|
|
||||||
|
IpcAndDeviceMapped = IpcLocked | DeviceShared,
|
||||||
|
LockedAndIpcLocked = Locked | IpcLocked,
|
||||||
|
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||||
|
|
||||||
|
static_assert((static_cast<u8>(MemoryAttribute::Mask) &
|
||||||
|
static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
|
||||||
|
|
||||||
|
struct MemoryInfo {
|
||||||
|
VAddr addr{};
|
||||||
|
std::size_t size{};
|
||||||
|
MemoryState state{};
|
||||||
|
MemoryPermission perm{};
|
||||||
|
MemoryAttribute attribute{};
|
||||||
|
MemoryPermission original_perm{};
|
||||||
|
u16 ipc_lock_count{};
|
||||||
|
u16 device_use_count{};
|
||||||
|
|
||||||
|
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||||
|
return {
|
||||||
|
addr,
|
||||||
|
size,
|
||||||
|
static_cast<Svc::MemoryState>(state & MemoryState::Mask),
|
||||||
|
static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
|
||||||
|
static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
|
||||||
|
ipc_lock_count,
|
||||||
|
device_use_count,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr GetAddress() const {
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetNumPages() const {
|
||||||
|
return GetSize() / PageSize;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetEndAddress() const {
|
||||||
|
return GetAddress() + GetSize();
|
||||||
|
}
|
||||||
|
constexpr VAddr GetLastAddress() const {
|
||||||
|
return GetEndAddress() - 1;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class MemoryBlock final {
|
||||||
|
friend class MemoryBlockManager;
|
||||||
|
|
||||||
|
private:
|
||||||
|
VAddr addr{};
|
||||||
|
std::size_t num_pages{};
|
||||||
|
MemoryState state{MemoryState::None};
|
||||||
|
u16 ipc_lock_count{};
|
||||||
|
u16 device_use_count{};
|
||||||
|
MemoryPermission perm{MemoryPermission::None};
|
||||||
|
MemoryPermission original_perm{MemoryPermission::None};
|
||||||
|
MemoryAttribute attribute{MemoryAttribute::None};
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
|
||||||
|
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||||
|
return -1;
|
||||||
|
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr MemoryBlock() = default;
|
||||||
|
constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||||
|
MemoryPermission perm, MemoryAttribute attribute)
|
||||||
|
: addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {}
|
||||||
|
|
||||||
|
constexpr VAddr GetAddress() const {
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetNumPages() const {
|
||||||
|
return num_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
|
return GetNumPages() * PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr GetEndAddress() const {
|
||||||
|
return GetAddress() + GetSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr GetLastAddress() const {
|
||||||
|
return GetEndAddress() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr MemoryInfo GetMemoryInfo() const {
|
||||||
|
return {
|
||||||
|
GetAddress(), GetSize(), state, perm,
|
||||||
|
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
|
||||||
|
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
|
||||||
|
MemoryAttribute::IpcLocked |
|
||||||
|
MemoryAttribute::DeviceShared};
|
||||||
|
return state == s && perm == p &&
|
||||||
|
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
|
||||||
|
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||||
|
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||||
|
device_use_count == rhs.device_use_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Contains(VAddr start) const {
|
||||||
|
return GetAddress() <= start && start <= GetEndAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void Add(std::size_t count) {
|
||||||
|
ASSERT(count > 0);
|
||||||
|
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
|
||||||
|
|
||||||
|
num_pages += count;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
|
||||||
|
MemoryAttribute new_attribute) {
|
||||||
|
ASSERT(original_perm == MemoryPermission::None);
|
||||||
|
ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
|
||||||
|
|
||||||
|
state = new_state;
|
||||||
|
perm = new_perm;
|
||||||
|
|
||||||
|
// TODO(bunnei): Is this right?
|
||||||
|
attribute = static_cast<MemoryAttribute>(
|
||||||
|
new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr MemoryBlock Split(VAddr split_addr) {
|
||||||
|
ASSERT(GetAddress() < split_addr);
|
||||||
|
ASSERT(Contains(split_addr));
|
||||||
|
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||||
|
|
||||||
|
MemoryBlock block;
|
||||||
|
block.addr = addr;
|
||||||
|
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||||
|
block.state = state;
|
||||||
|
block.ipc_lock_count = ipc_lock_count;
|
||||||
|
block.device_use_count = device_use_count;
|
||||||
|
block.perm = perm;
|
||||||
|
block.original_perm = original_perm;
|
||||||
|
block.attribute = attribute;
|
||||||
|
|
||||||
|
addr = split_addr;
|
||||||
|
num_pages -= block.num_pages;
|
||||||
|
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivially_destructible<MemoryBlock>::value);
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,190 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||||
|
: start_addr{start_addr}, end_addr{end_addr} {
|
||||||
|
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||||
|
memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
|
||||||
|
MemoryAttribute::None);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
||||||
|
auto node{memory_block_tree.begin()};
|
||||||
|
while (node != end()) {
|
||||||
|
const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||||
|
if (node->GetAddress() <= addr && end_addr - 1 >= addr) {
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
node = std::next(node);
|
||||||
|
}
|
||||||
|
return end();
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||||
|
std::size_t num_pages, std::size_t align, std::size_t offset,
|
||||||
|
std::size_t guard_pages) {
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const VAddr region_end{region_start + region_num_pages * PageSize};
|
||||||
|
const VAddr region_last{region_end - 1};
|
||||||
|
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
||||||
|
const auto info{it->GetMemoryInfo()};
|
||||||
|
if (region_last < info.GetAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info.state != MemoryState::Free) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
||||||
|
area += guard_pages * PageSize;
|
||||||
|
|
||||||
|
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
||||||
|
area = (area <= offset_area) ? offset_area : offset_area + align;
|
||||||
|
|
||||||
|
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
||||||
|
const VAddr area_last{area_end - 1};
|
||||||
|
|
||||||
|
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||||
|
area_last <= info.GetLastAddress()) {
|
||||||
|
return area;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||||
|
MemoryPermission prev_perm, MemoryAttribute prev_attribute,
|
||||||
|
MemoryState state, MemoryPermission perm,
|
||||||
|
MemoryAttribute attribute) {
|
||||||
|
const std::size_t prev_count{memory_block_tree.size()};
|
||||||
|
const VAddr end_addr{addr + num_pages * PageSize};
|
||||||
|
iterator node{memory_block_tree.begin()};
|
||||||
|
|
||||||
|
prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
|
||||||
|
|
||||||
|
while (node != memory_block_tree.end()) {
|
||||||
|
MemoryBlock* block{&(*node)};
|
||||||
|
iterator next_node{std::next(node)};
|
||||||
|
const VAddr cur_addr{block->GetAddress()};
|
||||||
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||||
|
|
||||||
|
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||||
|
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
||||||
|
node = next_node;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator new_node{node};
|
||||||
|
if (addr > cur_addr) {
|
||||||
|
memory_block_tree.insert(node, block->Split(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (end_addr < cur_end_addr) {
|
||||||
|
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
new_node->Update(state, perm, attribute);
|
||||||
|
|
||||||
|
MergeAdjacent(new_node, next_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
node = next_node;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||||
|
MemoryPermission perm, MemoryAttribute attribute) {
|
||||||
|
const std::size_t prev_count{memory_block_tree.size()};
|
||||||
|
const VAddr end_addr{addr + num_pages * PageSize};
|
||||||
|
iterator node{memory_block_tree.begin()};
|
||||||
|
|
||||||
|
while (node != memory_block_tree.end()) {
|
||||||
|
MemoryBlock* block{&(*node)};
|
||||||
|
iterator next_node{std::next(node)};
|
||||||
|
const VAddr cur_addr{block->GetAddress()};
|
||||||
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||||
|
|
||||||
|
if (addr < cur_end_addr && cur_addr < end_addr) {
|
||||||
|
iterator new_node{node};
|
||||||
|
|
||||||
|
if (addr > cur_addr) {
|
||||||
|
memory_block_tree.insert(node, block->Split(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (end_addr < cur_end_addr) {
|
||||||
|
new_node = memory_block_tree.insert(node, block->Split(end_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
new_node->Update(state, perm, attribute);
|
||||||
|
|
||||||
|
MergeAdjacent(new_node, next_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_end_addr - 1 >= end_addr - 1) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
node = next_node;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||||
|
const_iterator it{FindIterator(start)};
|
||||||
|
MemoryInfo info{};
|
||||||
|
do {
|
||||||
|
info = it->GetMemoryInfo();
|
||||||
|
func(info);
|
||||||
|
it = std::next(it);
|
||||||
|
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||||
|
MemoryBlock* block{&(*it)};
|
||||||
|
|
||||||
|
auto EraseIt = [&](const iterator it_to_erase) {
|
||||||
|
if (next_it == it_to_erase) {
|
||||||
|
next_it = std::next(next_it);
|
||||||
|
}
|
||||||
|
memory_block_tree.erase(it_to_erase);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (it != memory_block_tree.begin()) {
|
||||||
|
MemoryBlock* prev{&(*std::prev(it))};
|
||||||
|
|
||||||
|
if (block->HasSameProperties(*prev)) {
|
||||||
|
const iterator prev_it{std::prev(it)};
|
||||||
|
|
||||||
|
prev->Add(block->GetNumPages());
|
||||||
|
EraseIt(it);
|
||||||
|
|
||||||
|
it = prev_it;
|
||||||
|
block = prev;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (it != cend()) {
|
||||||
|
const MemoryBlock* const next{&(*std::next(it))};
|
||||||
|
|
||||||
|
if (block->HasSameProperties(*next)) {
|
||||||
|
block->Add(next->GetNumPages());
|
||||||
|
EraseIt(std::next(it));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <list>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_block.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class MemoryBlockManager final {
|
||||||
|
public:
|
||||||
|
using MemoryBlockTree = std::list<MemoryBlock>;
|
||||||
|
using iterator = MemoryBlockTree::iterator;
|
||||||
|
using const_iterator = MemoryBlockTree::const_iterator;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||||
|
|
||||||
|
iterator end() {
|
||||||
|
return memory_block_tree.end();
|
||||||
|
}
|
||||||
|
const_iterator end() const {
|
||||||
|
return memory_block_tree.end();
|
||||||
|
}
|
||||||
|
const_iterator cend() const {
|
||||||
|
return memory_block_tree.cend();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator FindIterator(VAddr addr);
|
||||||
|
|
||||||
|
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||||
|
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||||
|
|
||||||
|
void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||||
|
MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
|
||||||
|
MemoryPermission perm, MemoryAttribute attribute);
|
||||||
|
|
||||||
|
void Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||||
|
MemoryPermission perm = MemoryPermission::None,
|
||||||
|
MemoryAttribute attribute = MemoryAttribute::None);
|
||||||
|
|
||||||
|
using IterateFunc = std::function<void(const MemoryInfo&)>;
|
||||||
|
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||||
|
|
||||||
|
MemoryBlock& FindBlock(VAddr addr) {
|
||||||
|
return *FindIterator(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void MergeAdjacent(iterator it, iterator& next_it);
|
||||||
|
|
||||||
|
const VAddr start_addr;
|
||||||
|
const VAddr end_addr;
|
||||||
|
|
||||||
|
MemoryBlockTree memory_block_tree;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,73 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class MemoryRegion final {
|
||||||
|
friend class MemoryLayout;
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr PAddr StartAddress() const {
|
||||||
|
return start_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr PAddr EndAddress() const {
|
||||||
|
return end_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr MemoryRegion() = default;
|
||||||
|
constexpr MemoryRegion(PAddr start_address, PAddr end_address)
|
||||||
|
: start_address{start_address}, end_address{end_address} {}
|
||||||
|
|
||||||
|
const PAddr start_address{};
|
||||||
|
const PAddr end_address{};
|
||||||
|
};
|
||||||
|
|
||||||
|
class MemoryLayout final {
|
||||||
|
public:
|
||||||
|
constexpr const MemoryRegion& Application() const {
|
||||||
|
return application;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const MemoryRegion& Applet() const {
|
||||||
|
return applet;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const MemoryRegion& System() const {
|
||||||
|
return system;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr MemoryLayout GetDefaultLayout() {
|
||||||
|
constexpr std::size_t application_size{0xcd500000};
|
||||||
|
constexpr std::size_t applet_size{0x1fb00000};
|
||||||
|
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||||
|
constexpr PAddr application_end_address{Core::DramMemoryMap::End};
|
||||||
|
constexpr PAddr applet_start_address{application_start_address - applet_size};
|
||||||
|
constexpr PAddr applet_end_address{applet_start_address + applet_size};
|
||||||
|
constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
|
||||||
|
constexpr PAddr system_end_address{applet_start_address};
|
||||||
|
return {application_start_address, application_end_address, applet_start_address,
|
||||||
|
applet_end_address, system_start_address, system_end_address};
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||||
|
PAddr applet_start_address, std::size_t applet_size,
|
||||||
|
PAddr system_start_address, std::size_t system_size)
|
||||||
|
: application{application_start_address, application_size},
|
||||||
|
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||||
|
|
||||||
|
const MemoryRegion application;
|
||||||
|
const MemoryRegion applet;
|
||||||
|
const MemoryRegion system;
|
||||||
|
|
||||||
|
const PAddr start_address{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,176 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/scope_exit.h"
|
||||||
|
#include "core/hle/kernel/errors.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_manager.h"
|
||||||
|
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||||
|
const auto size{end_address - start_address};
|
||||||
|
|
||||||
|
// Calculate metadata sizes
|
||||||
|
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
|
||||||
|
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
|
||||||
|
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
|
||||||
|
const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
|
||||||
|
const auto total_metadata_size{manager_size + page_heap_size};
|
||||||
|
ASSERT(manager_size <= total_metadata_size);
|
||||||
|
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
|
||||||
|
|
||||||
|
// Setup region
|
||||||
|
pool = new_pool;
|
||||||
|
|
||||||
|
// Initialize the manager's KPageHeap
|
||||||
|
heap.Initialize(start_address, size, page_heap_size);
|
||||||
|
|
||||||
|
// Free the memory to the heap
|
||||||
|
heap.Free(start_address, size / PageSize);
|
||||||
|
|
||||||
|
// Update the heap's used size
|
||||||
|
heap.UpdateUsedSize();
|
||||||
|
|
||||||
|
return total_metadata_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||||
|
ASSERT(pool < Pool::Count);
|
||||||
|
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||||
|
Direction dir) {
|
||||||
|
// Early return if we're allocating no pages
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock the pool that we're allocating from
|
||||||
|
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||||
|
std::lock_guard lock{pool_locks[pool_index]};
|
||||||
|
|
||||||
|
// Choose a heap based on our page size request
|
||||||
|
const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||||
|
|
||||||
|
// Loop, trying to iterate from each block
|
||||||
|
// TODO (bunnei): Support multiple managers
|
||||||
|
Impl& chosen_manager{managers[pool_index]};
|
||||||
|
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
|
||||||
|
|
||||||
|
// If we failed to allocate, quit now
|
||||||
|
if (!allocated_block) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we allocated more than we need, free some
|
||||||
|
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||||
|
if (allocated_pages > num_pages) {
|
||||||
|
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
return allocated_block;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||||
|
Direction dir) {
|
||||||
|
ASSERT(page_list.GetNumPages() == 0);
|
||||||
|
|
||||||
|
// Early return if we're allocating no pages
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock the pool that we're allocating from
|
||||||
|
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||||
|
std::lock_guard lock{pool_locks[pool_index]};
|
||||||
|
|
||||||
|
// Choose a heap based on our page size request
|
||||||
|
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
|
||||||
|
if (heap_index < 0) {
|
||||||
|
return ERR_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO (bunnei): Support multiple managers
|
||||||
|
Impl& chosen_manager{managers[pool_index]};
|
||||||
|
|
||||||
|
// Ensure that we don't leave anything un-freed
|
||||||
|
auto group_guard = detail::ScopeExit([&] {
|
||||||
|
for (const auto& it : page_list.Nodes()) {
|
||||||
|
const auto num_pages{std::min(
|
||||||
|
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
||||||
|
chosen_manager.Free(it.GetAddress(), num_pages);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Keep allocating until we've allocated all our pages
|
||||||
|
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||||
|
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||||
|
|
||||||
|
while (num_pages >= pages_per_alloc) {
|
||||||
|
// Allocate a block
|
||||||
|
VAddr allocated_block{chosen_manager.AllocateBlock(index)};
|
||||||
|
if (!allocated_block) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safely add it to our group
|
||||||
|
{
|
||||||
|
auto block_guard = detail::ScopeExit(
|
||||||
|
[&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
|
||||||
|
|
||||||
|
if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
|
||||||
|
result.IsError()) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
block_guard.Cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
num_pages -= pages_per_alloc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only succeed if we allocated as many pages as we wanted
|
||||||
|
ASSERT(num_pages >= 0);
|
||||||
|
if (num_pages) {
|
||||||
|
return ERR_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We succeeded!
|
||||||
|
group_guard.Cancel();
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||||
|
Direction dir) {
|
||||||
|
// Early return if we're freeing no pages
|
||||||
|
if (!num_pages) {
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock the pool that we're freeing from
|
||||||
|
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||||
|
std::lock_guard lock{pool_locks[pool_index]};
|
||||||
|
|
||||||
|
// TODO (bunnei): Support multiple managers
|
||||||
|
Impl& chosen_manager{managers[pool_index]};
|
||||||
|
|
||||||
|
// Free all of the pages
|
||||||
|
for (const auto& it : page_list.Nodes()) {
|
||||||
|
const auto num_pages{std::min(
|
||||||
|
it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
|
||||||
|
chosen_manager.Free(it.GetAddress(), num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,97 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/memory/page_heap.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class PageLinkedList;
|
||||||
|
|
||||||
|
class MemoryManager final : NonCopyable {
|
||||||
|
public:
|
||||||
|
enum class Pool : u32 {
|
||||||
|
Application = 0,
|
||||||
|
Applet = 1,
|
||||||
|
System = 2,
|
||||||
|
SystemNonSecure = 3,
|
||||||
|
|
||||||
|
Count,
|
||||||
|
|
||||||
|
Shift = 4,
|
||||||
|
Mask = (0xF << Shift),
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class Direction : u32 {
|
||||||
|
FromFront = 0,
|
||||||
|
FromBack = 1,
|
||||||
|
|
||||||
|
Shift = 0,
|
||||||
|
Mask = (0xF << Shift),
|
||||||
|
};
|
||||||
|
|
||||||
|
MemoryManager() = default;
|
||||||
|
|
||||||
|
constexpr std::size_t GetSize(Pool pool) const {
|
||||||
|
return managers[static_cast<std::size_t>(pool)].GetSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
|
||||||
|
VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||||
|
Direction dir = Direction::FromFront);
|
||||||
|
ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||||
|
Direction dir = Direction::FromFront);
|
||||||
|
ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||||
|
Direction dir = Direction::FromFront);
|
||||||
|
|
||||||
|
static constexpr std::size_t MaxManagerCount = 10;
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Impl final : NonCopyable {
|
||||||
|
private:
|
||||||
|
using RefCount = u16;
|
||||||
|
|
||||||
|
private:
|
||||||
|
PageHeap heap;
|
||||||
|
Pool pool{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
Impl() = default;
|
||||||
|
|
||||||
|
std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
|
||||||
|
|
||||||
|
VAddr AllocateBlock(s32 index) {
|
||||||
|
return heap.AllocateBlock(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free(VAddr addr, std::size_t num_pages) {
|
||||||
|
heap.Free(addr, num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
|
return heap.GetSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr GetAddress() const {
|
||||||
|
return heap.GetAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr GetEndAddress() const {
|
||||||
|
return heap.GetEndAddress();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
|
||||||
|
std::array<Impl, MaxManagerCount> managers;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
constexpr std::size_t PageBits{12};
|
||||||
|
constexpr std::size_t PageSize{1 << PageBits};
|
||||||
|
|
||||||
|
using Page = std::array<u8, PageSize>;
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphère, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
|
||||||
|
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/memory/page_heap.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||||
|
// Check our assumptions
|
||||||
|
ASSERT(Common::IsAligned((address), PageSize));
|
||||||
|
ASSERT(Common::IsAligned(size, PageSize));
|
||||||
|
|
||||||
|
// Set our members
|
||||||
|
heap_address = address;
|
||||||
|
heap_size = size;
|
||||||
|
|
||||||
|
// Setup bitmaps
|
||||||
|
metadata.resize(metadata_size / sizeof(u64));
|
||||||
|
u64* cur_bitmap_storage{metadata.data()};
|
||||||
|
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||||
|
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||||
|
const std::size_t next_block_shift{
|
||||||
|
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||||
|
cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
|
||||||
|
next_block_shift, cur_bitmap_storage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||||
|
const std::size_t needed_size{blocks[index].GetSize()};
|
||||||
|
|
||||||
|
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||||
|
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||||
|
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||||
|
allocated_size > needed_size) {
|
||||||
|
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||||
|
do {
|
||||||
|
block = blocks[index++].PushBlock(block);
|
||||||
|
} while (block != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||||
|
// Freeing no pages is a no-op
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the largest block size that we can free, and free as many as possible
|
||||||
|
s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
|
||||||
|
const VAddr start{addr};
|
||||||
|
const VAddr end{(num_pages * PageSize) + addr};
|
||||||
|
VAddr before_start{start};
|
||||||
|
VAddr before_end{start};
|
||||||
|
VAddr after_start{end};
|
||||||
|
VAddr after_end{end};
|
||||||
|
while (big_index >= 0) {
|
||||||
|
const std::size_t block_size{blocks[big_index].GetSize()};
|
||||||
|
const VAddr big_start{Common::AlignUp((start), block_size)};
|
||||||
|
const VAddr big_end{Common::AlignDown((end), block_size)};
|
||||||
|
if (big_start < big_end) {
|
||||||
|
// Free as many big blocks as we can
|
||||||
|
for (auto block{big_start}; block < big_end; block += block_size) {
|
||||||
|
FreeBlock(block, big_index);
|
||||||
|
}
|
||||||
|
before_end = big_start;
|
||||||
|
after_start = big_end;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
big_index--;
|
||||||
|
}
|
||||||
|
ASSERT(big_index >= 0);
|
||||||
|
|
||||||
|
// Free space before the big blocks
|
||||||
|
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||||
|
const std::size_t block_size{blocks[i].GetSize()};
|
||||||
|
while (before_start + block_size <= before_end) {
|
||||||
|
before_end -= block_size;
|
||||||
|
FreeBlock(before_end, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free space after the big blocks
|
||||||
|
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||||
|
const std::size_t block_size{blocks[i].GetSize()};
|
||||||
|
while (after_start + block_size <= after_end) {
|
||||||
|
FreeBlock(after_start, i);
|
||||||
|
after_start += block_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||||
|
std::size_t overhead_size = 0;
|
||||||
|
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||||
|
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||||
|
const std::size_t next_block_shift{
|
||||||
|
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||||
|
overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
|
||||||
|
region_size, cur_block_shift, next_block_shift);
|
||||||
|
}
|
||||||
|
return Common::AlignUp(overhead_size, PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,370 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphère, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/bit_util.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class PageHeap final : NonCopyable {
|
||||||
|
public:
|
||||||
|
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||||
|
const auto target_pages{std::max(num_pages, align_pages)};
|
||||||
|
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||||
|
if (target_pages <=
|
||||||
|
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
|
return static_cast<s32>(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||||
|
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||||
|
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||||
|
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||||
|
return GetBlockSize(index) / PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||||
|
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||||
|
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||||
|
};
|
||||||
|
|
||||||
|
class Block final : NonCopyable {
|
||||||
|
private:
|
||||||
|
class Bitmap final : NonCopyable {
|
||||||
|
public:
|
||||||
|
static constexpr std::size_t MaxDepth{4};
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::array<u64*, MaxDepth> bit_storages{};
|
||||||
|
std::size_t num_bits{};
|
||||||
|
std::size_t used_depths{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr Bitmap() = default;
|
||||||
|
|
||||||
|
constexpr std::size_t GetNumBits() const {
|
||||||
|
return num_bits;
|
||||||
|
}
|
||||||
|
constexpr s32 GetHighestDepthIndex() const {
|
||||||
|
return static_cast<s32>(used_depths) - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr u64* Initialize(u64* storage, std::size_t size) {
|
||||||
|
//* Initially, everything is un-set
|
||||||
|
num_bits = 0;
|
||||||
|
|
||||||
|
// Calculate the needed bitmap depth
|
||||||
|
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||||
|
ASSERT(used_depths <= MaxDepth);
|
||||||
|
|
||||||
|
// Set the bitmap pointers
|
||||||
|
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||||
|
bit_storages[depth] = storage;
|
||||||
|
size = Common::AlignUp(size, 64) / 64;
|
||||||
|
storage += size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage;
|
||||||
|
}
|
||||||
|
|
||||||
|
s64 FindFreeBlock() const {
|
||||||
|
uintptr_t offset{};
|
||||||
|
s32 depth{};
|
||||||
|
|
||||||
|
do {
|
||||||
|
const u64 v{bit_storages[depth][offset]};
|
||||||
|
if (v == 0) {
|
||||||
|
// Non-zero depth indicates that a previous level had a free block
|
||||||
|
ASSERT(depth == 0);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
offset = offset * 64 + Common::CountTrailingZeroes64(v);
|
||||||
|
++depth;
|
||||||
|
} while (depth < static_cast<s32>(used_depths));
|
||||||
|
|
||||||
|
return static_cast<s64>(offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void SetBit(std::size_t offset) {
|
||||||
|
SetBit(GetHighestDepthIndex(), offset);
|
||||||
|
num_bits++;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void ClearBit(std::size_t offset) {
|
||||||
|
ClearBit(GetHighestDepthIndex(), offset);
|
||||||
|
num_bits--;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||||
|
const s32 depth{GetHighestDepthIndex()};
|
||||||
|
const auto bit_ind{offset / 64};
|
||||||
|
u64* bits{bit_storages[depth]};
|
||||||
|
if (count < 64) {
|
||||||
|
const auto shift{offset % 64};
|
||||||
|
ASSERT(shift + count <= 64);
|
||||||
|
// Check that all the bits are set
|
||||||
|
const u64 mask{((1ULL << count) - 1) << shift};
|
||||||
|
u64 v{bits[bit_ind]};
|
||||||
|
if ((v & mask) != mask) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the bits
|
||||||
|
v &= ~mask;
|
||||||
|
bits[bit_ind] = v;
|
||||||
|
if (v == 0) {
|
||||||
|
ClearBit(depth - 1, bit_ind);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT(offset % 64 == 0);
|
||||||
|
ASSERT(count % 64 == 0);
|
||||||
|
// Check that all the bits are set
|
||||||
|
std::size_t remaining{count};
|
||||||
|
std::size_t i = 0;
|
||||||
|
do {
|
||||||
|
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
remaining -= 64;
|
||||||
|
} while (remaining > 0);
|
||||||
|
|
||||||
|
// Clear the bits
|
||||||
|
remaining = count;
|
||||||
|
i = 0;
|
||||||
|
do {
|
||||||
|
bits[bit_ind + i] = 0;
|
||||||
|
ClearBit(depth - 1, bit_ind + i);
|
||||||
|
i++;
|
||||||
|
remaining -= 64;
|
||||||
|
} while (remaining > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
num_bits -= count;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr void SetBit(s32 depth, std::size_t offset) {
|
||||||
|
while (depth >= 0) {
|
||||||
|
const auto ind{offset / 64};
|
||||||
|
const auto which{offset % 64};
|
||||||
|
const u64 mask{1ULL << which};
|
||||||
|
|
||||||
|
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||||
|
const u64 v{*bit};
|
||||||
|
ASSERT((v & mask) == 0);
|
||||||
|
*bit = v | mask;
|
||||||
|
if (v) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
offset = ind;
|
||||||
|
depth--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void ClearBit(s32 depth, std::size_t offset) {
|
||||||
|
while (depth >= 0) {
|
||||||
|
const auto ind{offset / 64};
|
||||||
|
const auto which{offset % 64};
|
||||||
|
const u64 mask{1ULL << which};
|
||||||
|
|
||||||
|
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||||
|
u64 v{*bit};
|
||||||
|
ASSERT((v & mask) != 0);
|
||||||
|
v &= ~mask;
|
||||||
|
*bit = v;
|
||||||
|
if (v) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
offset = ind;
|
||||||
|
depth--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||||
|
s32 depth = 0;
|
||||||
|
while (true) {
|
||||||
|
region_size /= 64;
|
||||||
|
depth++;
|
||||||
|
if (region_size == 0) {
|
||||||
|
return depth;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||||
|
std::size_t overhead_bits = 0;
|
||||||
|
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
|
||||||
|
region_size = Common::AlignUp(region_size, 64) / 64;
|
||||||
|
overhead_bits += region_size;
|
||||||
|
}
|
||||||
|
return overhead_bits * sizeof(u64);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
Bitmap bitmap;
|
||||||
|
VAddr heap_address{};
|
||||||
|
uintptr_t end_offset{};
|
||||||
|
std::size_t block_shift{};
|
||||||
|
std::size_t next_block_shift{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr Block() = default;
|
||||||
|
|
||||||
|
constexpr std::size_t GetShift() const {
|
||||||
|
return block_shift;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetNextShift() const {
|
||||||
|
return next_block_shift;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
|
return static_cast<std::size_t>(1) << GetShift();
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetNumPages() const {
|
||||||
|
return GetSize() / PageSize;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetNumFreeBlocks() const {
|
||||||
|
return bitmap.GetNumBits();
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetNumFreePages() const {
|
||||||
|
return GetNumFreeBlocks() * GetNumPages();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||||
|
u64* bit_storage) {
|
||||||
|
// Set shifts
|
||||||
|
block_shift = bs;
|
||||||
|
next_block_shift = nbs;
|
||||||
|
|
||||||
|
// Align up the address
|
||||||
|
VAddr end{addr + size};
|
||||||
|
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||||
|
: (1ULL << block_shift)};
|
||||||
|
addr = Common::AlignDown((addr), align);
|
||||||
|
end = Common::AlignUp((end), align);
|
||||||
|
|
||||||
|
heap_address = addr;
|
||||||
|
end_offset = (end - addr) / (1ULL << block_shift);
|
||||||
|
return bitmap.Initialize(bit_storage, end_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr VAddr PushBlock(VAddr address) {
|
||||||
|
// Set the bit for the free block
|
||||||
|
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||||
|
bitmap.SetBit(offset);
|
||||||
|
|
||||||
|
// If we have a next shift, try to clear the blocks below and return the address
|
||||||
|
if (GetNextShift()) {
|
||||||
|
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||||
|
offset = Common::AlignDown(offset, diff);
|
||||||
|
if (bitmap.ClearRange(offset, diff)) {
|
||||||
|
return heap_address + (offset << GetShift());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We couldn't coalesce, or we're already as big as possible
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr PopBlock() {
|
||||||
|
// Find a free block
|
||||||
|
const s64 soffset{bitmap.FindFreeBlock()};
|
||||||
|
if (soffset < 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
const auto offset{static_cast<std::size_t>(soffset)};
|
||||||
|
|
||||||
|
// Update our tracking and return it
|
||||||
|
bitmap.ClearBit(offset);
|
||||||
|
return heap_address + (offset << GetShift());
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
|
||||||
|
std::size_t cur_block_shift,
|
||||||
|
std::size_t next_block_shift) {
|
||||||
|
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||||
|
const auto next_block_size{(1ULL << next_block_shift)};
|
||||||
|
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||||
|
return Bitmap::CalculateMetadataOverheadSize(
|
||||||
|
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
PageHeap() = default;
|
||||||
|
|
||||||
|
constexpr VAddr GetAddress() const {
|
||||||
|
return heap_address;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
|
return heap_size;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetEndAddress() const {
|
||||||
|
return GetAddress() + GetSize();
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||||
|
return (block - GetAddress()) / PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||||
|
VAddr AllocateBlock(s32 index);
|
||||||
|
void Free(VAddr addr, std::size_t num_pages);
|
||||||
|
|
||||||
|
void UpdateUsedSize() {
|
||||||
|
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr std::size_t GetNumFreePages() const {
|
||||||
|
std::size_t num_free{};
|
||||||
|
|
||||||
|
for (const auto& block : blocks) {
|
||||||
|
num_free += block.GetNumFreePages();
|
||||||
|
}
|
||||||
|
|
||||||
|
return num_free;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeBlock(VAddr block, s32 index);
|
||||||
|
|
||||||
|
VAddr heap_address{};
|
||||||
|
std::size_t heap_size{};
|
||||||
|
std::size_t used_size{};
|
||||||
|
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||||
|
std::vector<u64> metadata;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,93 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <list>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_types.h"
|
||||||
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class PageLinkedList final {
|
||||||
|
public:
|
||||||
|
class Node final {
|
||||||
|
public:
|
||||||
|
constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {}
|
||||||
|
|
||||||
|
constexpr u64 GetAddress() const {
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetNumPages() const {
|
||||||
|
return num_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
u64 addr{};
|
||||||
|
std::size_t num_pages{};
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
PageLinkedList() = default;
|
||||||
|
PageLinkedList(u64 address, u64 num_pages) {
|
||||||
|
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::list<Node>& Nodes() {
|
||||||
|
return nodes;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const std::list<Node>& Nodes() const {
|
||||||
|
return nodes;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t GetNumPages() const {
|
||||||
|
std::size_t num_pages = 0;
|
||||||
|
for (const Node& node : nodes) {
|
||||||
|
num_pages += node.GetNumPages();
|
||||||
|
}
|
||||||
|
return num_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsEqual(PageLinkedList& other) const {
|
||||||
|
auto this_node = nodes.begin();
|
||||||
|
auto other_node = other.nodes.begin();
|
||||||
|
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||||
|
if (this_node->GetAddress() != other_node->GetAddress() ||
|
||||||
|
this_node->GetNumPages() != other_node->GetNumPages()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
this_node = std::next(this_node);
|
||||||
|
other_node = std::next(other_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
return this_node == nodes.end() && other_node == other.nodes.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode AddBlock(u64 address, u64 num_pages) {
|
||||||
|
if (!num_pages) {
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
if (!nodes.empty()) {
|
||||||
|
const auto node = nodes.back();
|
||||||
|
if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
|
||||||
|
address = node.GetAddress();
|
||||||
|
num_pages += node.GetNumPages();
|
||||||
|
nodes.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodes.push_back({address, num_pages});
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::list<Node> nodes;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,276 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <list>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/page_table.h"
|
||||||
|
#include "core/file_sys/program_metadata.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_block.h"
|
||||||
|
#include "core/hle/kernel/memory/memory_manager.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
class MemoryBlockManager;
|
||||||
|
|
||||||
|
class PageTable final : NonCopyable {
|
||||||
|
public:
|
||||||
|
explicit PageTable(Core::System& system);
|
||||||
|
|
||||||
|
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||||
|
VAddr code_addr, std::size_t code_size,
|
||||||
|
Memory::MemoryManager::Pool pool);
|
||||||
|
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
|
||||||
|
MemoryPermission perm);
|
||||||
|
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
|
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
|
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||||
|
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||||
|
ResultCode UnmapMemory(VAddr addr, std::size_t size);
|
||||||
|
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
|
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
|
ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
|
||||||
|
MemoryPermission perm);
|
||||||
|
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||||
|
MemoryInfo QueryInfo(VAddr addr);
|
||||||
|
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||||
|
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||||
|
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
|
||||||
|
MemoryAttribute value);
|
||||||
|
ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
|
||||||
|
ResultVal<VAddr> SetHeapSize(std::size_t size);
|
||||||
|
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||||
|
bool is_map_only, VAddr region_start,
|
||||||
|
std::size_t region_num_pages, MemoryState state,
|
||||||
|
MemoryPermission perm, PAddr map_addr = 0);
|
||||||
|
|
||||||
|
Common::PageTable& PageTableImpl() {
|
||||||
|
return page_table_impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Common::PageTable& PageTableImpl() const {
|
||||||
|
return page_table_impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
enum class OperationType : u32 {
|
||||||
|
Map,
|
||||||
|
MapGroup,
|
||||||
|
Unmap,
|
||||||
|
ChangePermissions,
|
||||||
|
ChangePermissionsAndRefresh,
|
||||||
|
};
|
||||||
|
|
||||||
|
static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
|
||||||
|
MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
|
||||||
|
|
||||||
|
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||||
|
ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
|
||||||
|
void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||||
|
bool IsRegionMapped(VAddr address, u64 size);
|
||||||
|
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||||
|
void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
|
||||||
|
MemoryInfo QueryInfoImpl(VAddr addr);
|
||||||
|
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||||
|
std::size_t align);
|
||||||
|
ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
|
||||||
|
OperationType operation);
|
||||||
|
ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
|
||||||
|
OperationType operation, PAddr map_addr = 0);
|
||||||
|
constexpr VAddr GetRegionAddress(MemoryState state) const;
|
||||||
|
constexpr std::size_t GetRegionSize(MemoryState state) const;
|
||||||
|
constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
|
||||||
|
|
||||||
|
constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
|
||||||
|
MemoryState state, MemoryPermission perm_mask,
|
||||||
|
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||||
|
MemoryAttribute attr) const;
|
||||||
|
ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
|
||||||
|
MemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||||
|
MemoryState state_mask, MemoryState state,
|
||||||
|
MemoryPermission perm_mask, MemoryPermission perm,
|
||||||
|
MemoryAttribute attr_mask, MemoryAttribute attr,
|
||||||
|
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||||
|
ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
|
||||||
|
MemoryState state, MemoryPermission perm_mask,
|
||||||
|
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||||
|
MemoryAttribute attr,
|
||||||
|
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||||
|
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
|
||||||
|
perm, attr_mask, attr, ignore_attr);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::recursive_mutex page_table_lock;
|
||||||
|
std::unique_ptr<MemoryBlockManager> block_manager;
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr VAddr GetAddressSpaceStart() const {
|
||||||
|
return address_space_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetAddressSpaceEnd() const {
|
||||||
|
return address_space_end;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetAddressSpaceSize() const {
|
||||||
|
return address_space_end - address_space_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetHeapRegionStart() const {
|
||||||
|
return heap_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetHeapRegionEnd() const {
|
||||||
|
return heap_region_end;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetHeapRegionSize() const {
|
||||||
|
return heap_region_end - heap_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetAliasRegionStart() const {
|
||||||
|
return alias_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetAliasRegionEnd() const {
|
||||||
|
return alias_region_end;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetAliasRegionSize() const {
|
||||||
|
return alias_region_end - alias_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetStackRegionStart() const {
|
||||||
|
return stack_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetStackRegionEnd() const {
|
||||||
|
return stack_region_end;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetStackRegionSize() const {
|
||||||
|
return stack_region_end - stack_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetKernelMapRegionStart() const {
|
||||||
|
return kernel_map_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetKernelMapRegionEnd() const {
|
||||||
|
return kernel_map_region_end;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetCodeRegionStart() const {
|
||||||
|
return code_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetCodeRegionEnd() const {
|
||||||
|
return code_region_end;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetAliasCodeRegionStart() const {
|
||||||
|
return alias_code_region_start;
|
||||||
|
}
|
||||||
|
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||||
|
return alias_code_region_end - alias_code_region_start;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetAddressSpaceWidth() const {
|
||||||
|
return address_space_width;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetHeapSize() {
|
||||||
|
return current_heap_addr - heap_region_start;
|
||||||
|
}
|
||||||
|
constexpr std::size_t GetTotalHeapSize() {
|
||||||
|
return GetHeapSize() + physical_memory_usage;
|
||||||
|
}
|
||||||
|
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
|
||||||
|
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
|
||||||
|
}
|
||||||
|
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
|
||||||
|
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
|
||||||
|
}
|
||||||
|
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
|
||||||
|
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
|
||||||
|
}
|
||||||
|
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
|
||||||
|
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
||||||
|
}
|
||||||
|
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
|
||||||
|
return address + size > heap_region_start && heap_region_end > address;
|
||||||
|
}
|
||||||
|
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
|
||||||
|
return address + size > alias_region_start && alias_region_end > address;
|
||||||
|
}
|
||||||
|
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
|
||||||
|
if (IsInvalidRegion(address, size)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (IsInsideHeapRegion(address, size)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (IsInsideAliasRegion(address, size)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||||
|
return !IsOutsideASLRRegion(address, size);
|
||||||
|
}
|
||||||
|
constexpr PAddr GetPhysicalAddr(VAddr addr) {
|
||||||
|
return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr bool Contains(VAddr addr) const {
|
||||||
|
return address_space_start <= addr && addr <= address_space_end - 1;
|
||||||
|
}
|
||||||
|
constexpr bool Contains(VAddr addr, std::size_t size) const {
|
||||||
|
return address_space_start <= addr && addr < addr + size &&
|
||||||
|
addr + size - 1 <= address_space_end - 1;
|
||||||
|
}
|
||||||
|
constexpr bool IsKernel() const {
|
||||||
|
return is_kernel;
|
||||||
|
}
|
||||||
|
constexpr bool IsAslrEnabled() const {
|
||||||
|
return is_aslr_enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetNumGuardPages() const {
|
||||||
|
return IsKernel() ? 1 : 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||||
|
return (address_space_start <= addr) &&
|
||||||
|
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||||
|
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
VAddr address_space_start{};
|
||||||
|
VAddr address_space_end{};
|
||||||
|
VAddr heap_region_start{};
|
||||||
|
VAddr heap_region_end{};
|
||||||
|
VAddr current_heap_end{};
|
||||||
|
VAddr alias_region_start{};
|
||||||
|
VAddr alias_region_end{};
|
||||||
|
VAddr stack_region_start{};
|
||||||
|
VAddr stack_region_end{};
|
||||||
|
VAddr kernel_map_region_start{};
|
||||||
|
VAddr kernel_map_region_end{};
|
||||||
|
VAddr code_region_start{};
|
||||||
|
VAddr code_region_end{};
|
||||||
|
VAddr alias_code_region_start{};
|
||||||
|
VAddr alias_code_region_end{};
|
||||||
|
VAddr current_heap_addr{};
|
||||||
|
|
||||||
|
std::size_t heap_capacity{};
|
||||||
|
std::size_t physical_memory_usage{};
|
||||||
|
std::size_t max_heap_size{};
|
||||||
|
std::size_t max_physical_memory_size{};
|
||||||
|
std::size_t address_space_width{};
|
||||||
|
|
||||||
|
bool is_kernel{};
|
||||||
|
bool is_aslr_enabled{};
|
||||||
|
|
||||||
|
MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
|
||||||
|
|
||||||
|
Common::PageTable page_table_impl;
|
||||||
|
|
||||||
|
Core::System& system;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,164 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphère, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory {
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
class SlabHeapImpl final : NonCopyable {
|
||||||
|
public:
|
||||||
|
struct Node {
|
||||||
|
Node* next{};
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr SlabHeapImpl() = default;
|
||||||
|
|
||||||
|
void Initialize(std::size_t size) {
|
||||||
|
ASSERT(head == nullptr);
|
||||||
|
obj_size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetObjectSize() const {
|
||||||
|
return obj_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
Node* GetHead() const {
|
||||||
|
return head;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* Allocate() {
|
||||||
|
Node* ret = head.load();
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (ret == nullptr) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (!head.compare_exchange_weak(ret, ret->next));
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free(void* obj) {
|
||||||
|
Node* node = reinterpret_cast<Node*>(obj);
|
||||||
|
|
||||||
|
Node* cur_head = head.load();
|
||||||
|
do {
|
||||||
|
node->next = cur_head;
|
||||||
|
} while (!head.compare_exchange_weak(cur_head, node));
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::atomic<Node*> head{};
|
||||||
|
std::size_t obj_size{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace impl
|
||||||
|
|
||||||
|
class SlabHeapBase : NonCopyable {
|
||||||
|
public:
|
||||||
|
constexpr SlabHeapBase() = default;
|
||||||
|
|
||||||
|
constexpr bool Contains(uintptr_t addr) const {
|
||||||
|
return start <= addr && addr < end;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetSlabHeapSize() const {
|
||||||
|
return (end - start) / GetObjectSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetObjectSize() const {
|
||||||
|
return impl.GetObjectSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr uintptr_t GetSlabHeapAddress() const {
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t GetObjectIndexImpl(const void* obj) const {
|
||||||
|
return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t GetPeakIndex() const {
|
||||||
|
return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
|
||||||
|
}
|
||||||
|
|
||||||
|
void* AllocateImpl() {
|
||||||
|
return impl.Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeImpl(void* obj) {
|
||||||
|
// Don't allow freeing an object that wasn't allocated from this heap
|
||||||
|
ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
|
||||||
|
impl.Free(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
|
||||||
|
// Ensure we don't initialize a slab using null memory
|
||||||
|
ASSERT(memory != nullptr);
|
||||||
|
|
||||||
|
// Initialize the base allocator
|
||||||
|
impl.Initialize(obj_size);
|
||||||
|
|
||||||
|
// Set our tracking variables
|
||||||
|
const std::size_t num_obj = (memory_size / obj_size);
|
||||||
|
start = reinterpret_cast<uintptr_t>(memory);
|
||||||
|
end = start + num_obj * obj_size;
|
||||||
|
peak = start;
|
||||||
|
|
||||||
|
// Free the objects
|
||||||
|
u8* cur = reinterpret_cast<u8*>(end);
|
||||||
|
|
||||||
|
for (std::size_t i{}; i < num_obj; i++) {
|
||||||
|
cur -= obj_size;
|
||||||
|
impl.Free(cur);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
using Impl = impl::SlabHeapImpl;
|
||||||
|
|
||||||
|
Impl impl;
|
||||||
|
uintptr_t peak{};
|
||||||
|
uintptr_t start{};
|
||||||
|
uintptr_t end{};
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class SlabHeap final : public SlabHeapBase {
|
||||||
|
public:
|
||||||
|
constexpr SlabHeap() : SlabHeapBase() {}
|
||||||
|
|
||||||
|
void Initialize(void* memory, std::size_t memory_size) {
|
||||||
|
InitializeImpl(sizeof(T), memory, memory_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
T* Allocate() {
|
||||||
|
T* obj = reinterpret_cast<T*>(AllocateImpl());
|
||||||
|
if (obj != nullptr) {
|
||||||
|
new (obj) T();
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free(T* obj) {
|
||||||
|
FreeImpl(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr std::size_t GetObjectIndex(const T* obj) const {
|
||||||
|
return GetObjectIndexImpl(obj);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory
|
@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <random>
|
||||||
|
|
||||||
|
#include "core/hle/kernel/memory/system_control.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory::SystemControl {
|
||||||
|
|
||||||
|
u64 GenerateRandomU64ForInit() {
|
||||||
|
static std::random_device device;
|
||||||
|
static std::mt19937 gen(device());
|
||||||
|
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||||
|
return distribution(gen);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||||
|
/* Handle the case where the difference is too large to represent. */
|
||||||
|
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||||
|
return f();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Iterate until we get a value in range. */
|
||||||
|
const u64 range_size = ((max + 1) - min);
|
||||||
|
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||||
|
while (true) {
|
||||||
|
if (const u64 rnd = f(); rnd < effective_max) {
|
||||||
|
return min + (rnd % range_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GenerateRandomRange(u64 min, u64 max) {
|
||||||
|
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory::SystemControl
|
@ -0,0 +1,18 @@
|
|||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Memory::SystemControl {
|
||||||
|
|
||||||
|
u64 GenerateRandomU64ForInit();
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
u64 GenerateUniformRange(u64 min, u64 max, F f);
|
||||||
|
|
||||||
|
u64 GenerateRandomRange(u64 min, u64 max);
|
||||||
|
|
||||||
|
} // namespace Kernel::Memory::SystemControl
|
@ -0,0 +1,68 @@
|
|||||||
|
// Copyright 2020 yuzu emulator team
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel::Svc {
|
||||||
|
|
||||||
|
enum class MemoryState : u32 {
|
||||||
|
Free = 0x00,
|
||||||
|
Io = 0x01,
|
||||||
|
Static = 0x02,
|
||||||
|
Code = 0x03,
|
||||||
|
CodeData = 0x04,
|
||||||
|
Normal = 0x05,
|
||||||
|
Shared = 0x06,
|
||||||
|
Alias = 0x07,
|
||||||
|
AliasCode = 0x08,
|
||||||
|
AliasCodeData = 0x09,
|
||||||
|
Ipc = 0x0A,
|
||||||
|
Stack = 0x0B,
|
||||||
|
ThreadLocal = 0x0C,
|
||||||
|
Transfered = 0x0D,
|
||||||
|
SharedTransfered = 0x0E,
|
||||||
|
SharedCode = 0x0F,
|
||||||
|
Inaccessible = 0x10,
|
||||||
|
NonSecureIpc = 0x11,
|
||||||
|
NonDeviceIpc = 0x12,
|
||||||
|
Kernel = 0x13,
|
||||||
|
GeneratedCode = 0x14,
|
||||||
|
CodeOut = 0x15,
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||||
|
|
||||||
|
enum class MemoryAttribute : u32 {
|
||||||
|
Locked = (1 << 0),
|
||||||
|
IpcLocked = (1 << 1),
|
||||||
|
DeviceShared = (1 << 2),
|
||||||
|
Uncached = (1 << 3),
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||||
|
|
||||||
|
enum class MemoryPermission : u32 {
|
||||||
|
None = (0 << 0),
|
||||||
|
Read = (1 << 0),
|
||||||
|
Write = (1 << 1),
|
||||||
|
Execute = (1 << 2),
|
||||||
|
ReadWrite = Read | Write,
|
||||||
|
ReadExecute = Read | Execute,
|
||||||
|
DontCare = (1 << 28),
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||||
|
|
||||||
|
struct MemoryInfo {
|
||||||
|
u64 addr{};
|
||||||
|
u64 size{};
|
||||||
|
MemoryState state{};
|
||||||
|
MemoryAttribute attr{};
|
||||||
|
MemoryPermission perm{};
|
||||||
|
u32 ipc_refcount{};
|
||||||
|
u32 device_refcount{};
|
||||||
|
u32 padding{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel::Svc
|
File diff suppressed because it is too large
Load Diff
@ -1,796 +0,0 @@
|
|||||||
// Copyright 2015 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <map>
|
|
||||||
#include <memory>
|
|
||||||
#include <tuple>
|
|
||||||
#include <vector>
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "common/memory_hook.h"
|
|
||||||
#include "common/page_table.h"
|
|
||||||
#include "core/hle/kernel/physical_memory.h"
|
|
||||||
#include "core/hle/result.h"
|
|
||||||
#include "core/memory.h"
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
class System;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace FileSys {
|
|
||||||
enum class ProgramAddressSpaceType : u8;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
enum class VMAType : u8 {
|
|
||||||
/// VMA represents an unmapped region of the address space.
|
|
||||||
Free,
|
|
||||||
/// VMA is backed by a ref-counted allocate memory block.
|
|
||||||
AllocatedMemoryBlock,
|
|
||||||
/// VMA is backed by a raw, unmanaged pointer.
|
|
||||||
BackingMemory,
|
|
||||||
/// VMA is mapped to MMIO registers at a fixed PAddr.
|
|
||||||
MMIO,
|
|
||||||
// TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Permissions for mapped memory blocks
|
|
||||||
enum class VMAPermission : u8 {
|
|
||||||
None = 0,
|
|
||||||
Read = 1,
|
|
||||||
Write = 2,
|
|
||||||
Execute = 4,
|
|
||||||
|
|
||||||
ReadWrite = Read | Write,
|
|
||||||
ReadExecute = Read | Execute,
|
|
||||||
WriteExecute = Write | Execute,
|
|
||||||
ReadWriteExecute = Read | Write | Execute,
|
|
||||||
|
|
||||||
// Used as a wildcard when checking permissions across memory ranges
|
|
||||||
All = 0xFF,
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) {
|
|
||||||
return static_cast<VMAPermission>(u32(lhs) | u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) {
|
|
||||||
return static_cast<VMAPermission>(u32(lhs) & u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) {
|
|
||||||
return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VMAPermission operator~(VMAPermission permission) {
|
|
||||||
return static_cast<VMAPermission>(~u32(permission));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) {
|
|
||||||
lhs = lhs | rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) {
|
|
||||||
lhs = lhs & rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) {
|
|
||||||
lhs = lhs ^ rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attribute flags that can be applied to a VMA
|
|
||||||
enum class MemoryAttribute : u32 {
|
|
||||||
Mask = 0xFF,
|
|
||||||
|
|
||||||
/// No particular qualities
|
|
||||||
None = 0,
|
|
||||||
/// Memory locked/borrowed for use. e.g. This would be used by transfer memory.
|
|
||||||
Locked = 1,
|
|
||||||
/// Memory locked for use by IPC-related internals.
|
|
||||||
LockedForIPC = 2,
|
|
||||||
/// Mapped as part of the device address space.
|
|
||||||
DeviceMapped = 4,
|
|
||||||
/// Uncached memory
|
|
||||||
Uncached = 8,
|
|
||||||
|
|
||||||
IpcAndDeviceMapped = LockedForIPC | DeviceMapped,
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) {
|
|
||||||
return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) {
|
|
||||||
return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) {
|
|
||||||
return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryAttribute operator~(MemoryAttribute attribute) {
|
|
||||||
return static_cast<MemoryAttribute>(~u32(attribute));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) {
|
|
||||||
lhs = lhs | rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) {
|
|
||||||
lhs = lhs & rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) {
|
|
||||||
lhs = lhs ^ rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) {
|
|
||||||
return static_cast<u32>(attribute & MemoryAttribute::Mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
// clang-format off
|
|
||||||
/// Represents memory states and any relevant flags, as used by the kernel.
|
|
||||||
/// svcQueryMemory interprets these by masking away all but the first eight
|
|
||||||
/// bits when storing memory state into a MemoryInfo instance.
|
|
||||||
enum class MemoryState : u32 {
|
|
||||||
Mask = 0xFF,
|
|
||||||
FlagProtect = 1U << 8,
|
|
||||||
FlagDebug = 1U << 9,
|
|
||||||
FlagIPC0 = 1U << 10,
|
|
||||||
FlagIPC3 = 1U << 11,
|
|
||||||
FlagIPC1 = 1U << 12,
|
|
||||||
FlagMapped = 1U << 13,
|
|
||||||
FlagCode = 1U << 14,
|
|
||||||
FlagAlias = 1U << 15,
|
|
||||||
FlagModule = 1U << 16,
|
|
||||||
FlagTransfer = 1U << 17,
|
|
||||||
FlagQueryPhysicalAddressAllowed = 1U << 18,
|
|
||||||
FlagSharedDevice = 1U << 19,
|
|
||||||
FlagSharedDeviceAligned = 1U << 20,
|
|
||||||
FlagIPCBuffer = 1U << 21,
|
|
||||||
FlagMemoryPoolAllocated = 1U << 22,
|
|
||||||
FlagMapProcess = 1U << 23,
|
|
||||||
FlagUncached = 1U << 24,
|
|
||||||
FlagCodeMemory = 1U << 25,
|
|
||||||
|
|
||||||
// Wildcard used in range checking to indicate all states.
|
|
||||||
All = 0xFFFFFFFF,
|
|
||||||
|
|
||||||
// Convenience flag sets to reduce repetition
|
|
||||||
IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1,
|
|
||||||
|
|
||||||
CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed |
|
|
||||||
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer |
|
|
||||||
FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned |
|
|
||||||
FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached,
|
|
||||||
|
|
||||||
Unmapped = 0x00,
|
|
||||||
Io = 0x01 | FlagMapped,
|
|
||||||
Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed,
|
|
||||||
Code = 0x03 | CodeFlags | FlagMapProcess,
|
|
||||||
CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory,
|
|
||||||
Heap = 0x05 | DataFlags | FlagCodeMemory,
|
|
||||||
Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated,
|
|
||||||
ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess,
|
|
||||||
ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory,
|
|
||||||
|
|
||||||
IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated |
|
|
||||||
IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned,
|
|
||||||
|
|
||||||
Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed |
|
|
||||||
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed |
|
|
||||||
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated |
|
|
||||||
FlagUncached,
|
|
||||||
|
|
||||||
TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
|
|
||||||
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
// Used to signify an inaccessible or invalid memory region with memory queries
|
|
||||||
Inaccessible = 0x10,
|
|
||||||
|
|
||||||
IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed |
|
|
||||||
FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed |
|
|
||||||
FlagSharedDeviceAligned | FlagMemoryPoolAllocated,
|
|
||||||
|
|
||||||
KernelStack = 0x13 | FlagMapped,
|
|
||||||
};
|
|
||||||
// clang-format on
|
|
||||||
|
|
||||||
constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) {
|
|
||||||
return static_cast<MemoryState>(u32(lhs) | u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) {
|
|
||||||
return static_cast<MemoryState>(u32(lhs) & u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) {
|
|
||||||
return static_cast<MemoryState>(u32(lhs) ^ u32(rhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryState operator~(MemoryState lhs) {
|
|
||||||
return static_cast<MemoryState>(~u32(lhs));
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) {
|
|
||||||
lhs = lhs | rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) {
|
|
||||||
lhs = lhs & rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) {
|
|
||||||
lhs = lhs ^ rhs;
|
|
||||||
return lhs;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr u32 ToSvcMemoryState(MemoryState state) {
|
|
||||||
return static_cast<u32>(state & MemoryState::Mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MemoryInfo {
|
|
||||||
u64 base_address;
|
|
||||||
u64 size;
|
|
||||||
u32 state;
|
|
||||||
u32 attributes;
|
|
||||||
u32 permission;
|
|
||||||
u32 ipc_ref_count;
|
|
||||||
u32 device_ref_count;
|
|
||||||
};
|
|
||||||
static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size.");
|
|
||||||
|
|
||||||
struct PageInfo {
|
|
||||||
u32 flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
|
|
||||||
* with homogeneous attributes across its extents. In this particular implementation each VMA is
|
|
||||||
* also backed by a single host memory allocation.
|
|
||||||
*/
|
|
||||||
struct VirtualMemoryArea {
|
|
||||||
/// Gets the starting (base) address of this VMA.
|
|
||||||
VAddr StartAddress() const {
|
|
||||||
return base;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the ending address of this VMA.
|
|
||||||
VAddr EndAddress() const {
|
|
||||||
return base + size - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Virtual base address of the region.
|
|
||||||
VAddr base = 0;
|
|
||||||
/// Size of the region.
|
|
||||||
u64 size = 0;
|
|
||||||
|
|
||||||
VMAType type = VMAType::Free;
|
|
||||||
VMAPermission permissions = VMAPermission::None;
|
|
||||||
MemoryState state = MemoryState::Unmapped;
|
|
||||||
MemoryAttribute attribute = MemoryAttribute::None;
|
|
||||||
|
|
||||||
// Settings for type = AllocatedMemoryBlock
|
|
||||||
/// Memory block backing this VMA.
|
|
||||||
std::shared_ptr<PhysicalMemory> backing_block = nullptr;
|
|
||||||
/// Offset into the backing_memory the mapping starts from.
|
|
||||||
std::size_t offset = 0;
|
|
||||||
|
|
||||||
// Settings for type = BackingMemory
|
|
||||||
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
|
||||||
u8* backing_memory = nullptr;
|
|
||||||
|
|
||||||
// Settings for type = MMIO
|
|
||||||
/// Physical address of the register area this VMA maps to.
|
|
||||||
PAddr paddr = 0;
|
|
||||||
Common::MemoryHookPointer mmio_handler = nullptr;
|
|
||||||
|
|
||||||
/// Tests if this area can be merged to the right with `next`.
|
|
||||||
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Manages a process' virtual addressing space. This class maintains a list of allocated and free
|
|
||||||
* regions in the address space, along with their attributes, and allows kernel clients to
|
|
||||||
* manipulate it, adjusting the page table to match.
|
|
||||||
*
|
|
||||||
* This is similar in idea and purpose to the VM manager present in operating system kernels, with
|
|
||||||
* the main difference being that it doesn't have to support swapping or memory mapping of files.
|
|
||||||
* The implementation is also simplified by not having to allocate page frames. See these articles
|
|
||||||
* about the Linux kernel for an explantion of the concept and implementation:
|
|
||||||
* - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
|
|
||||||
* - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
|
|
||||||
*/
|
|
||||||
class VMManager final {
|
|
||||||
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
|
|
||||||
|
|
||||||
public:
|
|
||||||
using VMAHandle = VMAMap::const_iterator;
|
|
||||||
|
|
||||||
explicit VMManager(Core::System& system);
|
|
||||||
~VMManager();
|
|
||||||
|
|
||||||
/// Clears the address space map, re-initializing with a single free area.
|
|
||||||
void Reset(FileSys::ProgramAddressSpaceType type);
|
|
||||||
|
|
||||||
/// Finds the VMA in which the given address is included in, or `vma_map.end()`.
|
|
||||||
VMAHandle FindVMA(VAddr target) const;
|
|
||||||
|
|
||||||
/// Indicates whether or not the given handle is within the VMA map.
|
|
||||||
bool IsValidHandle(VMAHandle handle) const;
|
|
||||||
|
|
||||||
// TODO(yuriks): Should these functions actually return the handle?
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Maps part of a ref-counted block of memory at a given address.
|
|
||||||
*
|
|
||||||
* @param target The guest address to start the mapping at.
|
|
||||||
* @param block The block to be mapped.
|
|
||||||
* @param offset Offset into `block` to map from.
|
|
||||||
* @param size Size of the mapping.
|
|
||||||
* @param state MemoryState tag to attach to the VMA.
|
|
||||||
*/
|
|
||||||
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block,
|
|
||||||
std::size_t offset, u64 size, MemoryState state,
|
|
||||||
VMAPermission perm = VMAPermission::ReadWrite);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Maps an unmanaged host memory pointer at a given address.
|
|
||||||
*
|
|
||||||
* @param target The guest address to start the mapping at.
|
|
||||||
* @param memory The memory to be mapped.
|
|
||||||
* @param size Size of the mapping.
|
|
||||||
* @param state MemoryState tag to attach to the VMA.
|
|
||||||
*/
|
|
||||||
ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Finds the first free memory region of the given size within
|
|
||||||
* the user-addressable ASLR memory region.
|
|
||||||
*
|
|
||||||
* @param size The size of the desired region in bytes.
|
|
||||||
*
|
|
||||||
* @returns If successful, the base address of the free region with
|
|
||||||
* the given size.
|
|
||||||
*/
|
|
||||||
ResultVal<VAddr> FindFreeRegion(u64 size) const;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Finds the first free address range that can hold a region of the desired size
|
|
||||||
*
|
|
||||||
* @param begin The starting address of the range.
|
|
||||||
* This is treated as an inclusive beginning address.
|
|
||||||
*
|
|
||||||
* @param end The ending address of the range.
|
|
||||||
* This is treated as an exclusive ending address.
|
|
||||||
*
|
|
||||||
* @param size The size of the free region to attempt to locate,
|
|
||||||
* in bytes.
|
|
||||||
*
|
|
||||||
* @returns If successful, the base address of the free region with
|
|
||||||
* the given size.
|
|
||||||
*
|
|
||||||
* @returns If unsuccessful, a result containing an error code.
|
|
||||||
*
|
|
||||||
* @pre The starting address must be less than the ending address.
|
|
||||||
* @pre The size must not exceed the address range itself.
|
|
||||||
*/
|
|
||||||
ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Maps a memory-mapped IO region at a given address.
|
|
||||||
*
|
|
||||||
* @param target The guest address to start the mapping at.
|
|
||||||
* @param paddr The physical address where the registers are present.
|
|
||||||
* @param size Size of the mapping.
|
|
||||||
* @param state MemoryState tag to attach to the VMA.
|
|
||||||
* @param mmio_handler The handler that will implement read and write for this MMIO region.
|
|
||||||
*/
|
|
||||||
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
|
|
||||||
Common::MemoryHookPointer mmio_handler);
|
|
||||||
|
|
||||||
/// Unmaps a range of addresses, splitting VMAs as necessary.
|
|
||||||
ResultCode UnmapRange(VAddr target, u64 size);
|
|
||||||
|
|
||||||
/// Changes the permissions of the given VMA.
|
|
||||||
VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms);
|
|
||||||
|
|
||||||
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
|
|
||||||
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
|
|
||||||
|
|
||||||
ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
|
|
||||||
|
|
||||||
/// Attempts to allocate a heap with the given size.
|
|
||||||
///
|
|
||||||
/// @param size The size of the heap to allocate in bytes.
|
|
||||||
///
|
|
||||||
/// @note If a heap is currently allocated, and this is called
|
|
||||||
/// with a size that is equal to the size of the current heap,
|
|
||||||
/// then this function will do nothing and return the current
|
|
||||||
/// heap's starting address, as there's no need to perform
|
|
||||||
/// any additional heap allocation work.
|
|
||||||
///
|
|
||||||
/// @note If a heap is currently allocated, and this is called
|
|
||||||
/// with a size less than the current heap's size, then
|
|
||||||
/// this function will attempt to shrink the heap.
|
|
||||||
///
|
|
||||||
/// @note If a heap is currently allocated, and this is called
|
|
||||||
/// with a size larger than the current heap's size, then
|
|
||||||
/// this function will attempt to extend the size of the heap.
|
|
||||||
///
|
|
||||||
/// @returns A result indicating either success or failure.
|
|
||||||
/// <p>
|
|
||||||
/// If successful, this function will return a result
|
|
||||||
/// containing the starting address to the allocated heap.
|
|
||||||
/// <p>
|
|
||||||
/// If unsuccessful, this function will return a result
|
|
||||||
/// containing an error code.
|
|
||||||
///
|
|
||||||
/// @pre The given size must lie within the allowable heap
|
|
||||||
/// memory region managed by this VMManager instance.
|
|
||||||
/// Failure to abide by this will result in ERR_OUT_OF_MEMORY
|
|
||||||
/// being returned as the result.
|
|
||||||
///
|
|
||||||
ResultVal<VAddr> SetHeapSize(u64 size);
|
|
||||||
|
|
||||||
/// Maps memory at a given address.
|
|
||||||
///
|
|
||||||
/// @param target The virtual address to map memory at.
|
|
||||||
/// @param size The amount of memory to map.
|
|
||||||
///
|
|
||||||
/// @note The destination address must lie within the Map region.
|
|
||||||
///
|
|
||||||
/// @note This function requires that SystemResourceSize be non-zero,
|
|
||||||
/// however, this is just because if it were not then the
|
|
||||||
/// resulting page tables could be exploited on hardware by
|
|
||||||
/// a malicious program. SystemResource usage does not need
|
|
||||||
/// to be explicitly checked or updated here.
|
|
||||||
ResultCode MapPhysicalMemory(VAddr target, u64 size);
|
|
||||||
|
|
||||||
/// Unmaps memory at a given address.
|
|
||||||
///
|
|
||||||
/// @param target The virtual address to unmap memory at.
|
|
||||||
/// @param size The amount of memory to unmap.
|
|
||||||
///
|
|
||||||
/// @note The destination address must lie within the Map region.
|
|
||||||
///
|
|
||||||
/// @note This function requires that SystemResourceSize be non-zero,
|
|
||||||
/// however, this is just because if it were not then the
|
|
||||||
/// resulting page tables could be exploited on hardware by
|
|
||||||
/// a malicious program. SystemResource usage does not need
|
|
||||||
/// to be explicitly checked or updated here.
|
|
||||||
ResultCode UnmapPhysicalMemory(VAddr target, u64 size);
|
|
||||||
|
|
||||||
/// Maps a region of memory as code memory.
|
|
||||||
///
|
|
||||||
/// @param dst_address The base address of the region to create the aliasing memory region.
|
|
||||||
/// @param src_address The base address of the region to be aliased.
|
|
||||||
/// @param size The total amount of memory to map in bytes.
|
|
||||||
///
|
|
||||||
/// @pre Both memory regions lie within the actual addressable address space.
|
|
||||||
///
|
|
||||||
/// @post After this function finishes execution, assuming success, then the address range
|
|
||||||
/// [dst_address, dst_address+size) will alias the memory region,
|
|
||||||
/// [src_address, src_address+size).
|
|
||||||
/// <p>
|
|
||||||
/// What this also entails is as follows:
|
|
||||||
/// 1. The aliased region gains the Locked memory attribute.
|
|
||||||
/// 2. The aliased region becomes read-only.
|
|
||||||
/// 3. The aliasing region becomes read-only.
|
|
||||||
/// 4. The aliasing region is created with a memory state of MemoryState::CodeModule.
|
|
||||||
///
|
|
||||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size);
|
|
||||||
|
|
||||||
/// Unmaps a region of memory designated as code module memory.
|
|
||||||
///
|
|
||||||
/// @param dst_address The base address of the memory region aliasing the source memory region.
|
|
||||||
/// @param src_address The base address of the memory region being aliased.
|
|
||||||
/// @param size The size of the memory region to unmap in bytes.
|
|
||||||
///
|
|
||||||
/// @pre Both memory ranges lie within the actual addressable address space.
|
|
||||||
///
|
|
||||||
/// @pre The memory region being unmapped has been previously been mapped
|
|
||||||
/// by a call to MapCodeMemory.
|
|
||||||
///
|
|
||||||
/// @post After execution of the function, if successful. the aliasing memory region
|
|
||||||
/// will be unmapped and the aliased region will have various traits about it
|
|
||||||
/// restored to what they were prior to the original mapping call preceding
|
|
||||||
/// this function call.
|
|
||||||
/// <p>
|
|
||||||
/// What this also entails is as follows:
|
|
||||||
/// 1. The state of the memory region will now indicate a general heap region.
|
|
||||||
/// 2. All memory attributes for the memory region are cleared.
|
|
||||||
/// 3. Memory permissions for the region are restored to user read/write.
|
|
||||||
///
|
|
||||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size);
|
|
||||||
|
|
||||||
/// Queries the memory manager for information about the given address.
|
|
||||||
///
|
|
||||||
/// @param address The address to query the memory manager about for information.
|
|
||||||
///
|
|
||||||
/// @return A MemoryInfo instance containing information about the given address.
|
|
||||||
///
|
|
||||||
MemoryInfo QueryMemory(VAddr address) const;
|
|
||||||
|
|
||||||
/// Sets an attribute across the given address range.
|
|
||||||
///
|
|
||||||
/// @param address The starting address
|
|
||||||
/// @param size The size of the range to set the attribute on.
|
|
||||||
/// @param mask The attribute mask
|
|
||||||
/// @param attribute The attribute to set across the given address range
|
|
||||||
///
|
|
||||||
/// @returns RESULT_SUCCESS if successful
|
|
||||||
/// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set.
|
|
||||||
///
|
|
||||||
ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask,
|
|
||||||
MemoryAttribute attribute);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scans all VMAs and updates the page table range of any that use the given vector as backing
|
|
||||||
* memory. This should be called after any operation that causes reallocation of the vector.
|
|
||||||
*/
|
|
||||||
void RefreshMemoryBlockMappings(const PhysicalMemory* block);
|
|
||||||
|
|
||||||
/// Dumps the address space layout to the log, for debugging
|
|
||||||
void LogLayout() const;
|
|
||||||
|
|
||||||
/// Gets the total memory usage, used by svcGetInfo
|
|
||||||
u64 GetTotalPhysicalMemoryAvailable() const;
|
|
||||||
|
|
||||||
/// Gets the address space base address
|
|
||||||
VAddr GetAddressSpaceBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the address space end address
|
|
||||||
VAddr GetAddressSpaceEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the total address space address size in bytes
|
|
||||||
u64 GetAddressSpaceSize() const;
|
|
||||||
|
|
||||||
/// Gets the address space width in bits.
|
|
||||||
u64 GetAddressSpaceWidth() const;
|
|
||||||
|
|
||||||
/// Determines whether or not the given address range lies within the address space.
|
|
||||||
bool IsWithinAddressSpace(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Gets the base address of the ASLR region.
|
|
||||||
VAddr GetASLRRegionBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the end address of the ASLR region.
|
|
||||||
VAddr GetASLRRegionEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the size of the ASLR region
|
|
||||||
u64 GetASLRRegionSize() const;
|
|
||||||
|
|
||||||
/// Determines whether or not the specified address range is within the ASLR region.
|
|
||||||
bool IsWithinASLRRegion(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Gets the base address of the code region.
|
|
||||||
VAddr GetCodeRegionBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the end address of the code region.
|
|
||||||
VAddr GetCodeRegionEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the total size of the code region in bytes.
|
|
||||||
u64 GetCodeRegionSize() const;
|
|
||||||
|
|
||||||
/// Determines whether or not the specified range is within the code region.
|
|
||||||
bool IsWithinCodeRegion(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Gets the base address of the heap region.
|
|
||||||
VAddr GetHeapRegionBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the end address of the heap region;
|
|
||||||
VAddr GetHeapRegionEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the total size of the heap region in bytes.
|
|
||||||
u64 GetHeapRegionSize() const;
|
|
||||||
|
|
||||||
/// Gets the total size of the current heap in bytes.
|
|
||||||
///
|
|
||||||
/// @note This is the current allocated heap size, not the size
|
|
||||||
/// of the region it's allowed to exist within.
|
|
||||||
///
|
|
||||||
u64 GetCurrentHeapSize() const;
|
|
||||||
|
|
||||||
/// Determines whether or not the specified range is within the heap region.
|
|
||||||
bool IsWithinHeapRegion(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Gets the base address of the map region.
|
|
||||||
VAddr GetMapRegionBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the end address of the map region.
|
|
||||||
VAddr GetMapRegionEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the total size of the map region in bytes.
|
|
||||||
u64 GetMapRegionSize() const;
|
|
||||||
|
|
||||||
/// Determines whether or not the specified range is within the map region.
|
|
||||||
bool IsWithinMapRegion(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Gets the base address of the stack region.
|
|
||||||
VAddr GetStackRegionBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the end address of the stack region.
|
|
||||||
VAddr GetStackRegionEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the total size of the stack region in bytes.
|
|
||||||
u64 GetStackRegionSize() const;
|
|
||||||
|
|
||||||
/// Determines whether or not the given address range is within the stack region
|
|
||||||
bool IsWithinStackRegion(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Gets the base address of the TLS IO region.
|
|
||||||
VAddr GetTLSIORegionBaseAddress() const;
|
|
||||||
|
|
||||||
/// Gets the end address of the TLS IO region.
|
|
||||||
VAddr GetTLSIORegionEndAddress() const;
|
|
||||||
|
|
||||||
/// Gets the total size of the TLS IO region in bytes.
|
|
||||||
u64 GetTLSIORegionSize() const;
|
|
||||||
|
|
||||||
/// Determines if the given address range is within the TLS IO region.
|
|
||||||
bool IsWithinTLSIORegion(VAddr address, u64 size) const;
|
|
||||||
|
|
||||||
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
|
||||||
/// is scheduled.
|
|
||||||
Common::PageTable page_table{Memory::PAGE_BITS};
|
|
||||||
|
|
||||||
using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>;
|
|
||||||
|
|
||||||
/// Checks if an address range adheres to the specified states provided.
|
|
||||||
///
|
|
||||||
/// @param address The starting address of the address range.
|
|
||||||
/// @param size The size of the address range.
|
|
||||||
/// @param state_mask The memory state mask.
|
|
||||||
/// @param state The state to compare the individual VMA states against,
|
|
||||||
/// which is done in the form of: (vma.state & state_mask) != state.
|
|
||||||
/// @param permission_mask The memory permissions mask.
|
|
||||||
/// @param permissions The permission to compare the individual VMA permissions against,
|
|
||||||
/// which is done in the form of:
|
|
||||||
/// (vma.permission & permission_mask) != permission.
|
|
||||||
/// @param attribute_mask The memory attribute mask.
|
|
||||||
/// @param attribute The memory attributes to compare the individual VMA attributes
|
|
||||||
/// against, which is done in the form of:
|
|
||||||
/// (vma.attributes & attribute_mask) != attribute.
|
|
||||||
/// @param ignore_mask The memory attributes to ignore during the check.
|
|
||||||
///
|
|
||||||
/// @returns If successful, returns a tuple containing the memory attributes
|
|
||||||
/// (with ignored bits specified by ignore_mask unset), memory permissions, and
|
|
||||||
/// memory state across the memory range.
|
|
||||||
/// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE.
|
|
||||||
///
|
|
||||||
CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state,
|
|
||||||
VMAPermission permission_mask, VMAPermission permissions,
|
|
||||||
MemoryAttribute attribute_mask, MemoryAttribute attribute,
|
|
||||||
MemoryAttribute ignore_mask) const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
using VMAIter = VMAMap::iterator;
|
|
||||||
|
|
||||||
/// Converts a VMAHandle to a mutable VMAIter.
|
|
||||||
VMAIter StripIterConstness(const VMAHandle& iter);
|
|
||||||
|
|
||||||
/// Unmaps the given VMA.
|
|
||||||
VMAIter Unmap(VMAIter vma);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
|
|
||||||
* the appropriate error checking.
|
|
||||||
*/
|
|
||||||
ResultVal<VMAIter> CarveVMA(VAddr base, u64 size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
|
|
||||||
* end of the range.
|
|
||||||
*/
|
|
||||||
ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Splits a VMA in two, at the specified offset.
|
|
||||||
* @returns the right side of the split, with the original iterator becoming the left side.
|
|
||||||
*/
|
|
||||||
VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks for and merges the specified VMA with adjacent ones if possible.
|
|
||||||
* @returns the merged VMA or the original if no merging was possible.
|
|
||||||
*/
|
|
||||||
VMAIter MergeAdjacent(VMAIter vma);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Merges two adjacent VMAs.
|
|
||||||
*/
|
|
||||||
void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right);
|
|
||||||
|
|
||||||
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
|
|
||||||
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
|
|
||||||
|
|
||||||
/// Initializes memory region ranges to adhere to a given address space type.
|
|
||||||
void InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type);
|
|
||||||
|
|
||||||
/// Clears the underlying map and page table.
|
|
||||||
void Clear();
|
|
||||||
|
|
||||||
/// Clears out the VMA map, unmapping any previously mapped ranges.
|
|
||||||
void ClearVMAMap();
|
|
||||||
|
|
||||||
/// Clears out the page table
|
|
||||||
void ClearPageTable();
|
|
||||||
|
|
||||||
/// Gets the amount of memory currently mapped (state != Unmapped) in a range.
|
|
||||||
ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const;
|
|
||||||
|
|
||||||
/// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range.
|
|
||||||
ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
|
|
||||||
std::size_t size) const;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A map covering the entirety of the managed address space, keyed by the `base` field of each
|
|
||||||
* VMA. It must always be modified by splitting or merging VMAs, so that the invariant
|
|
||||||
* `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
|
|
||||||
* merged when possible so that no two similar and adjacent regions exist that have not been
|
|
||||||
* merged.
|
|
||||||
*/
|
|
||||||
VMAMap vma_map;
|
|
||||||
|
|
||||||
u32 address_space_width = 0;
|
|
||||||
VAddr address_space_base = 0;
|
|
||||||
VAddr address_space_end = 0;
|
|
||||||
|
|
||||||
VAddr aslr_region_base = 0;
|
|
||||||
VAddr aslr_region_end = 0;
|
|
||||||
|
|
||||||
VAddr code_region_base = 0;
|
|
||||||
VAddr code_region_end = 0;
|
|
||||||
|
|
||||||
VAddr heap_region_base = 0;
|
|
||||||
VAddr heap_region_end = 0;
|
|
||||||
|
|
||||||
VAddr map_region_base = 0;
|
|
||||||
VAddr map_region_end = 0;
|
|
||||||
|
|
||||||
VAddr stack_region_base = 0;
|
|
||||||
VAddr stack_region_end = 0;
|
|
||||||
|
|
||||||
VAddr tls_io_region_base = 0;
|
|
||||||
VAddr tls_io_region_end = 0;
|
|
||||||
|
|
||||||
// Memory used to back the allocations in the regular heap. A single vector is used to cover
|
|
||||||
// the entire virtual address space extents that bound the allocations, including any holes.
|
|
||||||
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
|
|
||||||
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
|
|
||||||
std::shared_ptr<PhysicalMemory> heap_memory;
|
|
||||||
|
|
||||||
// The end of the currently allocated heap. This is not an inclusive
|
|
||||||
// end of the range. This is essentially 'base_address + current_size'.
|
|
||||||
VAddr heap_end = 0;
|
|
||||||
|
|
||||||
// The current amount of memory mapped via MapPhysicalMemory.
|
|
||||||
// This is used here (and in Nintendo's kernel) only for debugging, and does not impact
|
|
||||||
// any behavior.
|
|
||||||
u64 physical_memory_mapped = 0;
|
|
||||||
|
|
||||||
Core::System& system;
|
|
||||||
};
|
|
||||||
} // namespace Kernel
|
|
Loading…
Reference in New Issue