|
|
@ -2,221 +2,336 @@
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
|
|
|
|
|
|
|
#include "core/hle/kernel/k_memory_block_manager.h"
|
|
|
|
#include "core/hle/kernel/k_memory_block_manager.h"
|
|
|
|
#include "core/hle/kernel/memory_types.h"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
namespace Kernel {
|
|
|
|
|
|
|
|
|
|
|
|
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
|
|
|
|
KMemoryBlockManager::KMemoryBlockManager() = default;
|
|
|
|
: start_addr{start_addr_}, end_addr{end_addr_} {
|
|
|
|
|
|
|
|
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
|
|
|
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
|
|
|
|
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
|
|
|
|
// Allocate a block to encapsulate the address space, insert it into the tree.
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None);
|
|
|
|
KMemoryBlock* start_block = slab_manager->Allocate();
|
|
|
|
|
|
|
|
R_UNLESS(start_block != nullptr, ResultOutOfResource);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Set our start and end.
|
|
|
|
|
|
|
|
m_start_address = st;
|
|
|
|
|
|
|
|
m_end_address = nd;
|
|
|
|
|
|
|
|
ASSERT(Common::IsAligned(m_start_address, PageSize));
|
|
|
|
|
|
|
|
ASSERT(Common::IsAligned(m_end_address, PageSize));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Initialize and insert the block.
|
|
|
|
|
|
|
|
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
|
|
|
|
|
|
|
|
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
|
|
|
|
|
|
|
|
m_memory_block_tree.insert(*start_block);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
|
|
|
|
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
|
|
|
auto node{memory_block_tree.begin()};
|
|
|
|
HostUnmapCallback&& host_unmap_callback) {
|
|
|
|
while (node != end()) {
|
|
|
|
// Erase every block until we have none left.
|
|
|
|
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
|
|
|
auto it = m_memory_block_tree.begin();
|
|
|
|
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
|
|
|
|
while (it != m_memory_block_tree.end()) {
|
|
|
|
return node;
|
|
|
|
KMemoryBlock* block = std::addressof(*it);
|
|
|
|
|
|
|
|
it = m_memory_block_tree.erase(it);
|
|
|
|
|
|
|
|
slab_manager->Free(block);
|
|
|
|
|
|
|
|
host_unmap_callback(block->GetAddress(), block->GetSize());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
node = std::next(node);
|
|
|
|
|
|
|
|
}
|
|
|
|
ASSERT(m_memory_block_tree.empty());
|
|
|
|
return end();
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
|
|
|
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
|
|
|
|
std::size_t num_pages, std::size_t align,
|
|
|
|
size_t num_pages, size_t alignment, size_t offset,
|
|
|
|
std::size_t offset, std::size_t guard_pages) {
|
|
|
|
size_t guard_pages) const {
|
|
|
|
if (num_pages == 0) {
|
|
|
|
if (num_pages > 0) {
|
|
|
|
return {};
|
|
|
|
const VAddr region_end = region_start + region_num_pages * PageSize;
|
|
|
|
}
|
|
|
|
const VAddr region_last = region_end - 1;
|
|
|
|
|
|
|
|
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
|
|
|
|
const VAddr region_end{region_start + region_num_pages * PageSize};
|
|
|
|
it++) {
|
|
|
|
const VAddr region_last{region_end - 1};
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
|
|
|
|
|
|
|
const auto info{it->GetMemoryInfo()};
|
|
|
|
|
|
|
|
if (region_last < info.GetAddress()) {
|
|
|
|
if (region_last < info.GetAddress()) {
|
|
|
|
break;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.m_state != KMemoryState::Free) {
|
|
|
|
if (info.state != KMemoryState::Free) {
|
|
|
|
|
|
|
|
continue;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
|
|
|
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
|
|
|
|
area += guard_pages * PageSize;
|
|
|
|
area += guard_pages * PageSize;
|
|
|
|
|
|
|
|
|
|
|
|
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
|
|
|
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
|
|
|
|
area = (area <= offset_area) ? offset_area : offset_area + align;
|
|
|
|
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
|
|
|
|
|
|
|
|
|
|
|
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
|
|
|
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
|
|
|
const VAddr area_last{area_end - 1};
|
|
|
|
const VAddr area_last = area_end - 1;
|
|
|
|
|
|
|
|
|
|
|
|
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
|
|
|
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
|
|
|
area_last <= info.GetLastAddress()) {
|
|
|
|
area_last <= info.GetLastAddress()) {
|
|
|
|
return area;
|
|
|
|
return area;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return {};
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
|
|
|
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
|
|
|
|
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
|
|
|
|
VAddr address, size_t num_pages) {
|
|
|
|
|
|
|
|
// Find the iterator now that we've updated.
|
|
|
|
|
|
|
|
iterator it = this->FindIterator(address);
|
|
|
|
|
|
|
|
if (address != m_start_address) {
|
|
|
|
|
|
|
|
it--;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Coalesce blocks that we can.
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
|
|
|
|
iterator prev = it++;
|
|
|
|
|
|
|
|
if (it == m_memory_block_tree.end()) {
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (prev->CanMergeWith(*it)) {
|
|
|
|
|
|
|
|
KMemoryBlock* block = std::addressof(*it);
|
|
|
|
|
|
|
|
m_memory_block_tree.erase(it);
|
|
|
|
|
|
|
|
prev->Add(*block);
|
|
|
|
|
|
|
|
allocator->Free(block);
|
|
|
|
|
|
|
|
it = prev;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
|
|
|
|
|
|
|
size_t num_pages, KMemoryState state, KMemoryPermission perm,
|
|
|
|
|
|
|
|
KMemoryAttribute attr,
|
|
|
|
|
|
|
|
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
|
|
|
|
|
|
|
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
|
|
|
|
|
|
|
// Ensure for auditing that we never end up with an invalid tree.
|
|
|
|
|
|
|
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
|
|
|
|
|
|
|
ASSERT(Common::IsAligned(address, PageSize));
|
|
|
|
|
|
|
|
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
|
|
|
|
|
|
|
KMemoryAttribute::None);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VAddr cur_address = address;
|
|
|
|
|
|
|
|
size_t remaining_pages = num_pages;
|
|
|
|
|
|
|
|
iterator it = this->FindIterator(address);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while (remaining_pages > 0) {
|
|
|
|
|
|
|
|
const size_t remaining_size = remaining_pages * PageSize;
|
|
|
|
|
|
|
|
KMemoryInfo cur_info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
if (it->HasProperties(state, perm, attr)) {
|
|
|
|
|
|
|
|
// If we already have the right properties, just advance.
|
|
|
|
|
|
|
|
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
|
|
|
|
|
|
|
remaining_pages = 0;
|
|
|
|
|
|
|
|
cur_address += remaining_size;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
remaining_pages =
|
|
|
|
|
|
|
|
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
|
|
|
|
|
|
|
cur_address = cur_info.GetEndAddress();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
// If we need to, create a new block before and insert it.
|
|
|
|
|
|
|
|
if (cur_info.GetAddress() != cur_address) {
|
|
|
|
|
|
|
|
KMemoryBlock* new_block = allocator->Allocate();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
it->Split(new_block, cur_address);
|
|
|
|
|
|
|
|
it = m_memory_block_tree.insert(*new_block);
|
|
|
|
|
|
|
|
it++;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
cur_address = cur_info.GetAddress();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If we need to, create a new block after and insert it.
|
|
|
|
|
|
|
|
if (cur_info.GetSize() > remaining_size) {
|
|
|
|
|
|
|
|
KMemoryBlock* new_block = allocator->Allocate();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
it->Split(new_block, cur_address + remaining_size);
|
|
|
|
|
|
|
|
it = m_memory_block_tree.insert(*new_block);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Update block state.
|
|
|
|
|
|
|
|
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
|
|
|
|
|
|
|
|
static_cast<u8>(clear_disable_attr));
|
|
|
|
|
|
|
|
cur_address += cur_info.GetSize();
|
|
|
|
|
|
|
|
remaining_pages -= cur_info.GetNumPages();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
it++;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
this->CoalesceForUpdate(allocator, address, num_pages);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
|
|
|
|
|
|
|
|
VAddr address, size_t num_pages, KMemoryState test_state,
|
|
|
|
|
|
|
|
KMemoryPermission test_perm, KMemoryAttribute test_attr,
|
|
|
|
KMemoryState state, KMemoryPermission perm,
|
|
|
|
KMemoryState state, KMemoryPermission perm,
|
|
|
|
KMemoryAttribute attribute) {
|
|
|
|
KMemoryAttribute attr) {
|
|
|
|
const VAddr update_end_addr{addr + num_pages * PageSize};
|
|
|
|
// Ensure for auditing that we never end up with an invalid tree.
|
|
|
|
iterator node{memory_block_tree.begin()};
|
|
|
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
|
|
|
|
|
|
|
ASSERT(Common::IsAligned(address, PageSize));
|
|
|
|
|
|
|
|
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
|
|
|
|
|
|
|
KMemoryAttribute::None);
|
|
|
|
|
|
|
|
|
|
|
|
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
|
|
|
|
VAddr cur_address = address;
|
|
|
|
|
|
|
|
size_t remaining_pages = num_pages;
|
|
|
|
|
|
|
|
iterator it = this->FindIterator(address);
|
|
|
|
|
|
|
|
|
|
|
|
while (node != memory_block_tree.end()) {
|
|
|
|
while (remaining_pages > 0) {
|
|
|
|
KMemoryBlock* block{&(*node)};
|
|
|
|
const size_t remaining_size = remaining_pages * PageSize;
|
|
|
|
iterator next_node{std::next(node)};
|
|
|
|
KMemoryInfo cur_info = it->GetMemoryInfo();
|
|
|
|
const VAddr cur_addr{block->GetAddress()};
|
|
|
|
if (it->HasProperties(test_state, test_perm, test_attr) &&
|
|
|
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
|
|
|
!it->HasProperties(state, perm, attr)) {
|
|
|
|
|
|
|
|
// If we need to, create a new block before and insert it.
|
|
|
|
|
|
|
|
if (cur_info.GetAddress() != cur_address) {
|
|
|
|
|
|
|
|
KMemoryBlock* new_block = allocator->Allocate();
|
|
|
|
|
|
|
|
|
|
|
|
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
|
|
|
it->Split(new_block, cur_address);
|
|
|
|
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
|
|
|
it = m_memory_block_tree.insert(*new_block);
|
|
|
|
node = next_node;
|
|
|
|
it++;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
|
|
cur_info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
cur_address = cur_info.GetAddress();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
iterator new_node{node};
|
|
|
|
// If we need to, create a new block after and insert it.
|
|
|
|
if (addr > cur_addr) {
|
|
|
|
if (cur_info.GetSize() > remaining_size) {
|
|
|
|
memory_block_tree.insert(node, block->Split(addr));
|
|
|
|
KMemoryBlock* new_block = allocator->Allocate();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
it->Split(new_block, cur_address + remaining_size);
|
|
|
|
|
|
|
|
it = m_memory_block_tree.insert(*new_block);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (update_end_addr < cur_end_addr) {
|
|
|
|
// Update block state.
|
|
|
|
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
|
|
|
it->Update(state, perm, attr, false, 0, 0);
|
|
|
|
|
|
|
|
cur_address += cur_info.GetSize();
|
|
|
|
|
|
|
|
remaining_pages -= cur_info.GetNumPages();
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
// If we already have the right properties, just advance.
|
|
|
|
|
|
|
|
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
|
|
|
|
|
|
|
remaining_pages = 0;
|
|
|
|
|
|
|
|
cur_address += remaining_size;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
remaining_pages =
|
|
|
|
|
|
|
|
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
|
|
|
|
|
|
|
cur_address = cur_info.GetEndAddress();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
new_node->Update(state, perm, attribute);
|
|
|
|
this->CoalesceForUpdate(allocator, address, num_pages);
|
|
|
|
|
|
|
|
|
|
|
|
MergeAdjacent(new_node, next_node);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node = next_node;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
|
|
|
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
|
|
|
KMemoryPermission perm, KMemoryAttribute attribute) {
|
|
|
|
size_t num_pages, MemoryBlockLockFunction lock_func,
|
|
|
|
const VAddr update_end_addr{addr + num_pages * PageSize};
|
|
|
|
|
|
|
|
iterator node{memory_block_tree.begin()};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while (node != memory_block_tree.end()) {
|
|
|
|
|
|
|
|
KMemoryBlock* block{&(*node)};
|
|
|
|
|
|
|
|
iterator next_node{std::next(node)};
|
|
|
|
|
|
|
|
const VAddr cur_addr{block->GetAddress()};
|
|
|
|
|
|
|
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
|
|
|
|
|
|
|
iterator new_node{node};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (addr > cur_addr) {
|
|
|
|
|
|
|
|
memory_block_tree.insert(node, block->Split(addr));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (update_end_addr < cur_end_addr) {
|
|
|
|
|
|
|
|
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
new_node->Update(state, perm, attribute);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MergeAdjacent(new_node, next_node);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node = next_node;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
|
|
|
|
|
|
|
KMemoryPermission perm) {
|
|
|
|
KMemoryPermission perm) {
|
|
|
|
const VAddr update_end_addr{addr + num_pages * PageSize};
|
|
|
|
// Ensure for auditing that we never end up with an invalid tree.
|
|
|
|
iterator node{memory_block_tree.begin()};
|
|
|
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
|
|
|
|
|
|
|
ASSERT(Common::IsAligned(address, PageSize));
|
|
|
|
|
|
|
|
|
|
|
|
while (node != memory_block_tree.end()) {
|
|
|
|
VAddr cur_address = address;
|
|
|
|
KMemoryBlock* block{&(*node)};
|
|
|
|
size_t remaining_pages = num_pages;
|
|
|
|
iterator next_node{std::next(node)};
|
|
|
|
iterator it = this->FindIterator(address);
|
|
|
|
const VAddr cur_addr{block->GetAddress()};
|
|
|
|
|
|
|
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
|
|
|
const VAddr end_address = address + (num_pages * PageSize);
|
|
|
|
iterator new_node{node};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (addr > cur_addr) {
|
|
|
|
while (remaining_pages > 0) {
|
|
|
|
memory_block_tree.insert(node, block->Split(addr));
|
|
|
|
const size_t remaining_size = remaining_pages * PageSize;
|
|
|
|
|
|
|
|
KMemoryInfo cur_info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If we need to, create a new block before and insert it.
|
|
|
|
|
|
|
|
if (cur_info.m_address != cur_address) {
|
|
|
|
|
|
|
|
KMemoryBlock* new_block = allocator->Allocate();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
it->Split(new_block, cur_address);
|
|
|
|
|
|
|
|
it = m_memory_block_tree.insert(*new_block);
|
|
|
|
|
|
|
|
it++;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
cur_address = cur_info.GetAddress();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (update_end_addr < cur_end_addr) {
|
|
|
|
if (cur_info.GetSize() > remaining_size) {
|
|
|
|
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
|
|
|
// If we need to, create a new block after and insert it.
|
|
|
|
|
|
|
|
KMemoryBlock* new_block = allocator->Allocate();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
it->Split(new_block, cur_address + remaining_size);
|
|
|
|
|
|
|
|
it = m_memory_block_tree.insert(*new_block);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lock_func(new_node, perm);
|
|
|
|
// Call the locked update function.
|
|
|
|
|
|
|
|
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
|
|
|
|
MergeAdjacent(new_node, next_node);
|
|
|
|
cur_info.GetEndAddress() == end_address);
|
|
|
|
|
|
|
|
cur_address += cur_info.GetSize();
|
|
|
|
|
|
|
|
remaining_pages -= cur_info.GetNumPages();
|
|
|
|
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
|
|
|
this->CoalesceForUpdate(allocator, address, num_pages);
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node = next_node;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
|
|
|
// Debug.
|
|
|
|
const_iterator it{FindIterator(start)};
|
|
|
|
bool KMemoryBlockManager::CheckState() const {
|
|
|
|
KMemoryInfo info{};
|
|
|
|
// Loop over every block, ensuring that we are sorted and coalesced.
|
|
|
|
do {
|
|
|
|
auto it = m_memory_block_tree.cbegin();
|
|
|
|
info = it->GetMemoryInfo();
|
|
|
|
auto prev = it++;
|
|
|
|
func(info);
|
|
|
|
while (it != m_memory_block_tree.cend()) {
|
|
|
|
it = std::next(it);
|
|
|
|
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
|
|
|
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
|
|
|
const KMemoryInfo cur_info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
|
|
|
// Sequential blocks which can be merged should be merged.
|
|
|
|
KMemoryBlock* block{&(*it)};
|
|
|
|
if (prev->CanMergeWith(*it)) {
|
|
|
|
|
|
|
|
return false;
|
|
|
|
auto EraseIt = [&](const iterator it_to_erase) {
|
|
|
|
|
|
|
|
if (next_it == it_to_erase) {
|
|
|
|
|
|
|
|
next_it = std::next(next_it);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memory_block_tree.erase(it_to_erase);
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (it != memory_block_tree.begin()) {
|
|
|
|
// Sequential blocks should be sequential.
|
|
|
|
KMemoryBlock* prev{&(*std::prev(it))};
|
|
|
|
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (block->HasSameProperties(*prev)) {
|
|
|
|
// If the block is ipc locked, it must have a count.
|
|
|
|
const iterator prev_it{std::prev(it)};
|
|
|
|
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
|
|
|
|
|
|
|
cur_info.m_ipc_lock_count == 0) {
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
prev->Add(block->GetNumPages());
|
|
|
|
// If the block is device shared, it must have a count.
|
|
|
|
EraseIt(it);
|
|
|
|
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
|
|
|
|
|
|
|
cur_info.m_device_use_count == 0) {
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
it = prev_it;
|
|
|
|
// Advance the iterator.
|
|
|
|
block = prev;
|
|
|
|
prev = it++;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Our loop will miss checking the last block, potentially, so check it.
|
|
|
|
|
|
|
|
if (prev != m_memory_block_tree.cend()) {
|
|
|
|
|
|
|
|
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
|
|
|
|
|
|
|
// If the block is ipc locked, it must have a count.
|
|
|
|
|
|
|
|
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
|
|
|
|
|
|
|
prev_info.m_ipc_lock_count == 0) {
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If the block is device shared, it must have a count.
|
|
|
|
|
|
|
|
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
|
|
|
|
|
|
|
prev_info.m_device_use_count == 0) {
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (it != cend()) {
|
|
|
|
return true;
|
|
|
|
const KMemoryBlock* const next{&(*std::next(it))};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (block->HasSameProperties(*next)) {
|
|
|
|
|
|
|
|
block->Add(next->GetNumPages());
|
|
|
|
|
|
|
|
EraseIt(std::next(it));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
} // namespace Kernel
|
|
|
|
} // namespace Kernel
|
|
|
|