@ -36,10 +36,11 @@ struct Memory::Impl {
}
}
void MapMemoryRegion ( Common : : PageTable & page_table , VAddr base , u64 size , PAddr target ) {
void MapMemoryRegion ( Common : : PageTable & page_table , VAddr base , u64 size , PAddr target ) {
ASSERT_MSG ( ( size & PAGE_ MASK) = = 0 , " non-page aligned size: {:016X} " , size ) ;
ASSERT_MSG ( ( size & YUZU_ PAGEMASK) = = 0 , " non-page aligned size: {:016X} " , size ) ;
ASSERT_MSG ( ( base & PAGE_ MASK) = = 0 , " non-page aligned base: {:016X} " , base ) ;
ASSERT_MSG ( ( base & YUZU_ PAGEMASK) = = 0 , " non-page aligned base: {:016X} " , base ) ;
ASSERT_MSG ( target > = DramMemoryMap : : Base , " Out of bounds target: {:016X} " , target ) ;
ASSERT_MSG ( target > = DramMemoryMap : : Base , " Out of bounds target: {:016X} " , target ) ;
MapPages ( page_table , base / PAGE_SIZE , size / PAGE_SIZE , target , Common : : PageType : : Memory ) ;
MapPages ( page_table , base / YUZU_PAGESIZE , size / YUZU_PAGESIZE , target ,
Common : : PageType : : Memory ) ;
if ( Settings : : IsFastmemEnabled ( ) ) {
if ( Settings : : IsFastmemEnabled ( ) ) {
system . DeviceMemory ( ) . buffer . Map ( base , target - DramMemoryMap : : Base , size ) ;
system . DeviceMemory ( ) . buffer . Map ( base , target - DramMemoryMap : : Base , size ) ;
@ -47,9 +48,10 @@ struct Memory::Impl {
}
}
void UnmapRegion ( Common : : PageTable & page_table , VAddr base , u64 size ) {
void UnmapRegion ( Common : : PageTable & page_table , VAddr base , u64 size ) {
ASSERT_MSG ( ( size & PAGE_MASK ) = = 0 , " non-page aligned size: {:016X} " , size ) ;
ASSERT_MSG ( ( size & YUZU_PAGEMASK ) = = 0 , " non-page aligned size: {:016X} " , size ) ;
ASSERT_MSG ( ( base & PAGE_MASK ) = = 0 , " non-page aligned base: {:016X} " , base ) ;
ASSERT_MSG ( ( base & YUZU_PAGEMASK ) = = 0 , " non-page aligned base: {:016X} " , base ) ;
MapPages ( page_table , base / PAGE_SIZE , size / PAGE_SIZE , 0 , Common : : PageType : : Unmapped ) ;
MapPages ( page_table , base / YUZU_PAGESIZE , size / YUZU_PAGESIZE , 0 ,
Common : : PageType : : Unmapped ) ;
if ( Settings : : IsFastmemEnabled ( ) ) {
if ( Settings : : IsFastmemEnabled ( ) ) {
system . DeviceMemory ( ) . buffer . Unmap ( base , size ) ;
system . DeviceMemory ( ) . buffer . Unmap ( base , size ) ;
@ -57,7 +59,7 @@ struct Memory::Impl {
}
}
[[nodiscard]] u8 * GetPointerFromRasterizerCachedMemory ( VAddr vaddr ) const {
[[nodiscard]] u8 * GetPointerFromRasterizerCachedMemory ( VAddr vaddr ) const {
const PAddr paddr { current_page_table - > backing_addr [ vaddr > > PAGE_ BITS] } ;
const PAddr paddr { current_page_table - > backing_addr [ vaddr > > YUZU_ PAGEBITS] } ;
if ( ! paddr ) {
if ( ! paddr ) {
return { } ;
return { } ;
@ -67,7 +69,7 @@ struct Memory::Impl {
}
}
[[nodiscard]] u8 * GetPointerFromDebugMemory ( VAddr vaddr ) const {
[[nodiscard]] u8 * GetPointerFromDebugMemory ( VAddr vaddr ) const {
const PAddr paddr { current_page_table - > backing_addr [ vaddr > > PAGE_ BITS] } ;
const PAddr paddr { current_page_table - > backing_addr [ vaddr > > YUZU_ PAGEBITS] } ;
if ( paddr = = 0 ) {
if ( paddr = = 0 ) {
return { } ;
return { } ;
@ -176,13 +178,14 @@ struct Memory::Impl {
auto on_unmapped , auto on_memory , auto on_rasterizer , auto increment ) {
auto on_unmapped , auto on_memory , auto on_rasterizer , auto increment ) {
const auto & page_table = process . PageTable ( ) . PageTableImpl ( ) ;
const auto & page_table = process . PageTable ( ) . PageTableImpl ( ) ;
std : : size_t remaining_size = size ;
std : : size_t remaining_size = size ;
std : : size_t page_index = addr > > PAGE_ BITS;
std : : size_t page_index = addr > > YUZU_ PAGEBITS;
std : : size_t page_offset = addr & PAGE_ MASK;
std : : size_t page_offset = addr & YUZU_ PAGEMASK;
while ( remaining_size ) {
while ( remaining_size ) {
const std : : size_t copy_amount =
const std : : size_t copy_amount =
std : : min ( static_cast < std : : size_t > ( PAGE_SIZE ) - page_offset , remaining_size ) ;
std : : min ( static_cast < std : : size_t > ( YUZU_PAGESIZE ) - page_offset , remaining_size ) ;
const auto current_vaddr = static_cast < VAddr > ( ( page_index < < PAGE_BITS ) + page_offset ) ;
const auto current_vaddr =
static_cast < VAddr > ( ( page_index < < YUZU_PAGEBITS ) + page_offset ) ;
const auto [ pointer , type ] = page_table . pointers [ page_index ] . PointerType ( ) ;
const auto [ pointer , type ] = page_table . pointers [ page_index ] . PointerType ( ) ;
switch ( type ) {
switch ( type ) {
@ -192,7 +195,7 @@ struct Memory::Impl {
}
}
case Common : : PageType : : Memory : {
case Common : : PageType : : Memory : {
DEBUG_ASSERT ( pointer ) ;
DEBUG_ASSERT ( pointer ) ;
u8 * mem_ptr = pointer + page_offset + ( page_index < < PAGE_ BITS) ;
u8 * mem_ptr = pointer + page_offset + ( page_index < < YUZU_ PAGEBITS) ;
on_memory ( copy_amount , mem_ptr ) ;
on_memory ( copy_amount , mem_ptr ) ;
break ;
break ;
}
}
@ -339,10 +342,10 @@ struct Memory::Impl {
// Iterate over a contiguous CPU address space, marking/unmarking the region.
// Iterate over a contiguous CPU address space, marking/unmarking the region.
// The region is at a granularity of CPU pages.
// The region is at a granularity of CPU pages.
const u64 num_pages = ( ( vaddr + size - 1 ) > > PAGE_ BITS) - ( vaddr > > PAGE_ BITS) + 1 ;
const u64 num_pages = ( ( vaddr + size - 1 ) > > YUZU_ PAGEBITS) - ( vaddr > > YUZU_ PAGEBITS) + 1 ;
for ( u64 i = 0 ; i < num_pages ; + + i , vaddr + = PAGE_ SIZE) {
for ( u64 i = 0 ; i < num_pages ; + + i , vaddr + = YUZU_ PAGESIZE) {
const Common : : PageType page_type {
const Common : : PageType page_type {
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Type ( ) } ;
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Type ( ) } ;
if ( debug ) {
if ( debug ) {
// Switch page type to debug if now debug
// Switch page type to debug if now debug
switch ( page_type ) {
switch ( page_type ) {
@ -354,7 +357,7 @@ struct Memory::Impl {
// Page is already marked.
// Page is already marked.
break ;
break ;
case Common : : PageType : : Memory :
case Common : : PageType : : Memory :
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Store (
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Store (
nullptr , Common : : PageType : : DebugMemory ) ;
nullptr , Common : : PageType : : DebugMemory ) ;
break ;
break ;
default :
default :
@ -371,9 +374,9 @@ struct Memory::Impl {
// Don't mess with already non-debug or rasterizer memory.
// Don't mess with already non-debug or rasterizer memory.
break ;
break ;
case Common : : PageType : : DebugMemory : {
case Common : : PageType : : DebugMemory : {
u8 * const pointer { GetPointerFromDebugMemory ( vaddr & ~ PAGE_ MASK) } ;
u8 * const pointer { GetPointerFromDebugMemory ( vaddr & ~ YUZU_ PAGEMASK) } ;
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Store (
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Store (
pointer - ( vaddr & ~ PAGE_ MASK) , Common : : PageType : : Memory ) ;
pointer - ( vaddr & ~ YUZU_ PAGEMASK) , Common : : PageType : : Memory ) ;
break ;
break ;
}
}
default :
default :
@ -398,10 +401,10 @@ struct Memory::Impl {
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
// is different). This assumes the specified GPU address region is contiguous as well.
// is different). This assumes the specified GPU address region is contiguous as well.
const u64 num_pages = ( ( vaddr + size - 1 ) > > PAGE_ BITS) - ( vaddr > > PAGE_ BITS) + 1 ;
const u64 num_pages = ( ( vaddr + size - 1 ) > > YUZU_ PAGEBITS) - ( vaddr > > YUZU_ PAGEBITS) + 1 ;
for ( u64 i = 0 ; i < num_pages ; + + i , vaddr + = PAGE_ SIZE) {
for ( u64 i = 0 ; i < num_pages ; + + i , vaddr + = YUZU_ PAGESIZE) {
const Common : : PageType page_type {
const Common : : PageType page_type {
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Type ( ) } ;
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Type ( ) } ;
if ( cached ) {
if ( cached ) {
// Switch page type to cached if now cached
// Switch page type to cached if now cached
switch ( page_type ) {
switch ( page_type ) {
@ -411,7 +414,7 @@ struct Memory::Impl {
break ;
break ;
case Common : : PageType : : DebugMemory :
case Common : : PageType : : DebugMemory :
case Common : : PageType : : Memory :
case Common : : PageType : : Memory :
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Store (
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Store (
nullptr , Common : : PageType : : RasterizerCachedMemory ) ;
nullptr , Common : : PageType : : RasterizerCachedMemory ) ;
break ;
break ;
case Common : : PageType : : RasterizerCachedMemory :
case Common : : PageType : : RasterizerCachedMemory :
@ -434,16 +437,16 @@ struct Memory::Impl {
// that this area is already unmarked as cached.
// that this area is already unmarked as cached.
break ;
break ;
case Common : : PageType : : RasterizerCachedMemory : {
case Common : : PageType : : RasterizerCachedMemory : {
u8 * const pointer { GetPointerFromRasterizerCachedMemory ( vaddr & ~ PAGE_ MASK) } ;
u8 * const pointer { GetPointerFromRasterizerCachedMemory ( vaddr & ~ YUZU_ PAGEMASK) } ;
if ( pointer = = nullptr ) {
if ( pointer = = nullptr ) {
// It's possible that this function has been called while updating the
// It's possible that this function has been called while updating the
// pagetable after unmapping a VMA. In that case the underlying VMA will no
// pagetable after unmapping a VMA. In that case the underlying VMA will no
// longer exist, and we should just leave the pagetable entry blank.
// longer exist, and we should just leave the pagetable entry blank.
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Store (
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Store (
nullptr , Common : : PageType : : Unmapped ) ;
nullptr , Common : : PageType : : Unmapped ) ;
} else {
} else {
current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Store (
current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Store (
pointer - ( vaddr & ~ PAGE_ MASK) , Common : : PageType : : Memory ) ;
pointer - ( vaddr & ~ YUZU_ PAGEMASK) , Common : : PageType : : Memory ) ;
}
}
break ;
break ;
}
}
@ -465,8 +468,8 @@ struct Memory::Impl {
*/
*/
void MapPages ( Common : : PageTable & page_table , VAddr base , u64 size , PAddr target ,
void MapPages ( Common : : PageTable & page_table , VAddr base , u64 size , PAddr target ,
Common : : PageType type ) {
Common : : PageType type ) {
LOG_DEBUG ( HW_Memory , " Mapping {:016X} onto {:016X}-{:016X} " , target , base * PAGE_ SIZE,
LOG_DEBUG ( HW_Memory , " Mapping {:016X} onto {:016X}-{:016X} " , target , base * YUZU_ PAGESIZE,
( base + size ) * PAGE_ SIZE) ;
( base + size ) * YUZU_ PAGESIZE) ;
// During boot, current_page_table might not be set yet, in which case we need not flush
// During boot, current_page_table might not be set yet, in which case we need not flush
if ( system . IsPoweredOn ( ) ) {
if ( system . IsPoweredOn ( ) ) {
@ -474,7 +477,7 @@ struct Memory::Impl {
for ( u64 i = 0 ; i < size ; i + + ) {
for ( u64 i = 0 ; i < size ; i + + ) {
const auto page = base + i ;
const auto page = base + i ;
if ( page_table . pointers [ page ] . Type ( ) = = Common : : PageType : : RasterizerCachedMemory ) {
if ( page_table . pointers [ page ] . Type ( ) = = Common : : PageType : : RasterizerCachedMemory ) {
gpu . FlushAndInvalidateRegion ( page < < PAGE_ BITS, PAGE_ SIZE) ;
gpu . FlushAndInvalidateRegion ( page < < YUZU_ PAGEBITS, YUZU_ PAGESIZE) ;
}
}
}
}
}
}
@ -485,7 +488,7 @@ struct Memory::Impl {
if ( ! target ) {
if ( ! target ) {
ASSERT_MSG ( type ! = Common : : PageType : : Memory ,
ASSERT_MSG ( type ! = Common : : PageType : : Memory ,
" Mapping memory page without a pointer @ {:016x} " , base * PAGE_ SIZE) ;
" Mapping memory page without a pointer @ {:016x} " , base * YUZU_ PAGESIZE) ;
while ( base ! = end ) {
while ( base ! = end ) {
page_table . pointers [ base ] . Store ( nullptr , type ) ;
page_table . pointers [ base ] . Store ( nullptr , type ) ;
@ -496,14 +499,14 @@ struct Memory::Impl {
} else {
} else {
while ( base ! = end ) {
while ( base ! = end ) {
page_table . pointers [ base ] . Store (
page_table . pointers [ base ] . Store (
system . DeviceMemory ( ) . GetPointer ( target ) - ( base < < PAGE_ BITS) , type ) ;
system . DeviceMemory ( ) . GetPointer ( target ) - ( base < < YUZU_ PAGEBITS) , type ) ;
page_table . backing_addr [ base ] = target - ( base < < PAGE_ BITS) ;
page_table . backing_addr [ base ] = target - ( base < < YUZU_ PAGEBITS) ;
ASSERT_MSG ( page_table . pointers [ base ] . Pointer ( ) ,
ASSERT_MSG ( page_table . pointers [ base ] . Pointer ( ) ,
" memory mapping base yield a nullptr within the table " ) ;
" memory mapping base yield a nullptr within the table " ) ;
base + = 1 ;
base + = 1 ;
target + = PAGE_ SIZE;
target + = YUZU_ PAGESIZE;
}
}
}
}
}
}
@ -518,7 +521,7 @@ struct Memory::Impl {
}
}
// Avoid adding any extra logic to this fast-path block
// Avoid adding any extra logic to this fast-path block
const uintptr_t raw_pointer = current_page_table - > pointers [ vaddr > > PAGE_ BITS] . Raw ( ) ;
const uintptr_t raw_pointer = current_page_table - > pointers [ vaddr > > YUZU_ PAGEBITS] . Raw ( ) ;
if ( u8 * const pointer = Common : : PageTable : : PageInfo : : ExtractPointer ( raw_pointer ) ) {
if ( u8 * const pointer = Common : : PageTable : : PageInfo : : ExtractPointer ( raw_pointer ) ) {
return & pointer [ vaddr ] ;
return & pointer [ vaddr ] ;
}
}
@ -657,7 +660,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
bool Memory : : IsValidVirtualAddress ( const VAddr vaddr ) const {
bool Memory : : IsValidVirtualAddress ( const VAddr vaddr ) const {
const Kernel : : KProcess & process = * system . CurrentProcess ( ) ;
const Kernel : : KProcess & process = * system . CurrentProcess ( ) ;
const auto & page_table = process . PageTable ( ) . PageTableImpl ( ) ;
const auto & page_table = process . PageTable ( ) . PageTableImpl ( ) ;
const size_t page = vaddr > > PAGE_ BITS;
const size_t page = vaddr > > YUZU_ PAGEBITS;
if ( page > = page_table . pointers . size ( ) ) {
if ( page > = page_table . pointers . size ( ) ) {
return false ;
return false ;
}
}
@ -668,9 +671,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
bool Memory : : IsValidVirtualAddressRange ( VAddr base , u64 size ) const {
bool Memory : : IsValidVirtualAddressRange ( VAddr base , u64 size ) const {
VAddr end = base + size ;
VAddr end = base + size ;
VAddr page = Common : : AlignDown ( base , PAGE_ SIZE) ;
VAddr page = Common : : AlignDown ( base , YUZU_ PAGESIZE) ;
for ( ; page < end ; page + = PAGE_ SIZE) {
for ( ; page < end ; page + = YUZU_ PAGESIZE) {
if ( ! IsValidVirtualAddress ( page ) ) {
if ( ! IsValidVirtualAddress ( page ) ) {
return false ;
return false ;
}
}