diff options
-rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 881 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.h | 285 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/devices/nvmap.cpp | 3 |
3 files changed, 624 insertions, 545 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 2cf46af0a..fcffc0b88 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -25,7 +25,7 @@ namespace { using namespace Common::Literals; -constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { +constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { switch (as_type) { case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is32BitNoMap: @@ -43,28 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT } // namespace KPageTable::KPageTable(Core::System& system_) - : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} + : m_general_lock{system_.Kernel()}, + m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} KPageTable::~KPageTable() = default; Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, + VAddr code_addr, size_t code_size, KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool) { const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); }; const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); }; // Set our width and heap/alias sizes - address_space_width = GetAddressSpaceWidthFromType(as_type); + m_address_space_width = GetAddressSpaceWidthFromType(as_type); const VAddr start = 0; - const VAddr end{1ULL << address_space_width}; - std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; - std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; + const VAddr end{1ULL << m_address_space_width}; + size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; + size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; ASSERT(code_addr < code_addr + code_size); ASSERT(code_addr + code_size - 1 <= end - 1); @@ -76,67 +77,68 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Set code regions and determine remaining - constexpr std::size_t RegionAlignment{2_MiB}; + constexpr size_t RegionAlignment{2_MiB}; VAddr process_code_start{}; VAddr process_code_end{}; - std::size_t stack_region_size{}; - std::size_t kernel_map_region_size{}; + size_t stack_region_size{}; + size_t kernel_map_region_size{}; - if (address_space_width == 39) { + if (m_address_space_width == 39) { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); - alias_code_region_start = code_region_start; - alias_code_region_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = m_code_region_end; process_code_start = Common::AlignDown(code_addr, RegionAlignment); process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); } else { stack_region_size = 0; kernel_map_region_size = 0; - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - stack_region_start = code_region_start; - alias_code_region_start = code_region_start; - alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + - GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); - stack_region_end = code_region_end; - kernel_map_region_start = code_region_start; - kernel_map_region_end = code_region_end; - process_code_start = code_region_start; - process_code_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + m_stack_region_start = m_code_region_start; + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + m_stack_region_end = m_code_region_end; + m_kernel_map_region_start = m_code_region_start; + m_kernel_map_region_end = m_code_region_end; + process_code_start = m_code_region_start; + process_code_end = m_code_region_end; } // Set other basic fields - is_aslr_enabled = enable_aslr; - address_space_start = start; - address_space_end = end; - is_kernel = false; - memory_block_slab_manager = mem_block_slab_manager; + m_enable_aslr = enable_aslr; + m_enable_device_address_space_merge = false; + m_address_space_start = start; + m_address_space_end = end; + m_is_kernel = false; + m_memory_block_slab_manager = mem_block_slab_manager; // Determine the region we can place our undetermineds in VAddr alloc_start{}; - std::size_t alloc_size{}; - if ((process_code_start - code_region_start) >= (end - process_code_end)) { - alloc_start = code_region_start; - alloc_size = process_code_start - code_region_start; + size_t alloc_size{}; + if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { + alloc_start = m_code_region_start; + alloc_size = process_code_start - m_code_region_start; } else { alloc_start = process_code_end; alloc_size = end - process_code_end; } - const std::size_t needed_size{ + const size_t needed_size{ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; if (alloc_size < needed_size) { ASSERT(false); return ResultOutOfMemory; } - const std::size_t remaining_size{alloc_size - needed_size}; + const size_t remaining_size{alloc_size - needed_size}; // Determine random placements for each region - std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; if (enable_aslr) { alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; @@ -149,124 +151,124 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Setup heap and alias regions - alias_region_start = alloc_start + alias_rnd; - alias_region_end = alias_region_start + alias_region_size; - heap_region_start = alloc_start + heap_rnd; - heap_region_end = heap_region_start + heap_region_size; + m_alias_region_start = alloc_start + alias_rnd; + m_alias_region_end = m_alias_region_start + alias_region_size; + m_heap_region_start = alloc_start + heap_rnd; + m_heap_region_end = m_heap_region_start + heap_region_size; if (alias_rnd <= heap_rnd) { - heap_region_start += alias_region_size; - heap_region_end += alias_region_size; + m_heap_region_start += alias_region_size; + m_heap_region_end += alias_region_size; } else { - alias_region_start += heap_region_size; - alias_region_end += heap_region_size; + m_alias_region_start += heap_region_size; + m_alias_region_end += heap_region_size; } // Setup stack region if (stack_region_size) { - stack_region_start = alloc_start + stack_rnd; - stack_region_end = stack_region_start + stack_region_size; + m_stack_region_start = alloc_start + stack_rnd; + m_stack_region_end = m_stack_region_start + stack_region_size; if (alias_rnd < stack_rnd) { - stack_region_start += alias_region_size; - stack_region_end += alias_region_size; + m_stack_region_start += alias_region_size; + m_stack_region_end += alias_region_size; } else { - alias_region_start += stack_region_size; - alias_region_end += stack_region_size; + m_alias_region_start += stack_region_size; + m_alias_region_end += stack_region_size; } if (heap_rnd < stack_rnd) { - stack_region_start += heap_region_size; - stack_region_end += heap_region_size; + m_stack_region_start += heap_region_size; + m_stack_region_end += heap_region_size; } else { - heap_region_start += stack_region_size; - heap_region_end += stack_region_size; + m_heap_region_start += stack_region_size; + m_heap_region_end += stack_region_size; } } // Setup kernel map region if (kernel_map_region_size) { - kernel_map_region_start = alloc_start + kmap_rnd; - kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + m_kernel_map_region_start = alloc_start + kmap_rnd; + m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; if (alias_rnd < kmap_rnd) { - kernel_map_region_start += alias_region_size; - kernel_map_region_end += alias_region_size; + m_kernel_map_region_start += alias_region_size; + m_kernel_map_region_end += alias_region_size; } else { - alias_region_start += kernel_map_region_size; - alias_region_end += kernel_map_region_size; + m_alias_region_start += kernel_map_region_size; + m_alias_region_end += kernel_map_region_size; } if (heap_rnd < kmap_rnd) { - kernel_map_region_start += heap_region_size; - kernel_map_region_end += heap_region_size; + m_kernel_map_region_start += heap_region_size; + m_kernel_map_region_end += heap_region_size; } else { - heap_region_start += kernel_map_region_size; - heap_region_end += kernel_map_region_size; + m_heap_region_start += kernel_map_region_size; + m_heap_region_end += kernel_map_region_size; } if (stack_region_size) { if (stack_rnd < kmap_rnd) { - kernel_map_region_start += stack_region_size; - kernel_map_region_end += stack_region_size; + m_kernel_map_region_start += stack_region_size; + m_kernel_map_region_end += stack_region_size; } else { - stack_region_start += kernel_map_region_size; - stack_region_end += kernel_map_region_size; + m_stack_region_start += kernel_map_region_size; + m_stack_region_end += kernel_map_region_size; } } } // Set heap members - current_heap_end = heap_region_start; - max_heap_size = 0; - max_physical_memory_size = 0; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_max_physical_memory_size = 0; // Ensure that we regions inside our address space auto IsInAddressSpace = [&](VAddr addr) { - return address_space_start <= addr && addr <= address_space_end; + return m_address_space_start <= addr && addr <= m_address_space_end; }; - ASSERT(IsInAddressSpace(alias_region_start)); - ASSERT(IsInAddressSpace(alias_region_end)); - ASSERT(IsInAddressSpace(heap_region_start)); - ASSERT(IsInAddressSpace(heap_region_end)); - ASSERT(IsInAddressSpace(stack_region_start)); - ASSERT(IsInAddressSpace(stack_region_end)); - ASSERT(IsInAddressSpace(kernel_map_region_start)); - ASSERT(IsInAddressSpace(kernel_map_region_end)); + ASSERT(IsInAddressSpace(m_alias_region_start)); + ASSERT(IsInAddressSpace(m_alias_region_end)); + ASSERT(IsInAddressSpace(m_heap_region_start)); + ASSERT(IsInAddressSpace(m_heap_region_end)); + ASSERT(IsInAddressSpace(m_stack_region_start)); + ASSERT(IsInAddressSpace(m_stack_region_end)); + ASSERT(IsInAddressSpace(m_kernel_map_region_start)); + ASSERT(IsInAddressSpace(m_kernel_map_region_end)); // Ensure that we selected regions that don't overlap - const VAddr alias_start{alias_region_start}; - const VAddr alias_last{alias_region_end - 1}; - const VAddr heap_start{heap_region_start}; - const VAddr heap_last{heap_region_end - 1}; - const VAddr stack_start{stack_region_start}; - const VAddr stack_last{stack_region_end - 1}; - const VAddr kmap_start{kernel_map_region_start}; - const VAddr kmap_last{kernel_map_region_end - 1}; + const VAddr alias_start{m_alias_region_start}; + const VAddr alias_last{m_alias_region_end - 1}; + const VAddr heap_start{m_heap_region_start}; + const VAddr heap_last{m_heap_region_end - 1}; + const VAddr stack_start{m_stack_region_start}; + const VAddr stack_last{m_stack_region_end - 1}; + const VAddr kmap_start{m_kernel_map_region_start}; + const VAddr kmap_last{m_kernel_map_region_end - 1}; ASSERT(alias_last < heap_start || heap_last < alias_start); ASSERT(alias_last < stack_start || stack_last < alias_start); ASSERT(alias_last < kmap_start || kmap_last < alias_start); ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - current_heap_end = heap_region_start; - max_heap_size = 0; - mapped_physical_memory_size = 0; - memory_pool = pool; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_memory_pool = pool; - page_table_impl.Resize(address_space_width, PageBits); + m_page_table_impl.Resize(m_address_space_width, PageBits); - return memory_block_manager.Initialize(address_space_start, address_space_end, - memory_block_slab_manager); + return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager); } void KPageTable::Finalize() { - memory_block_manager.Finalize(memory_block_slab_manager, [&](VAddr addr, u64 size) { - system.Memory().UnmapRegion(page_table_impl, addr, size); + m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { + m_system.Memory().UnmapRegion(m_page_table_impl, addr, size); }); } -Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, +Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { const u64 size{num_pages * PageSize}; @@ -274,7 +276,7 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the destination memory is unmapped. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, @@ -284,43 +286,43 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); // Allocate and open. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, - KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); + KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is normal heap. KMemoryState src_state{}; KMemoryPermission src_perm{}; - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Verify that the destination memory is unmapped. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, @@ -328,20 +330,22 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator( - std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator( - std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); R_TRY(dst_allocator_result); // Map the code memory. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; // Create page groups for the memory being mapped. KPageGroup pg; @@ -366,37 +370,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size unprot_guard.Cancel(); // Apply the memory block updates. - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, - src_state, new_perm, KMemoryAttribute::Locked, - KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); } return ResultSuccess; } -Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, +Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is locked normal heap. - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::Locked)); // Verify that the destination memory is aliasable code. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, @@ -405,7 +409,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Determine whether any pages being unmapped are code. bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); while (true) { // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -431,9 +435,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si SCOPE_EXIT({ if (reprotected_pages && any_code_pages) { if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { - system.InvalidateCpuInstructionCacheRange(dst_address, size); + m_system.InvalidateCpuInstructionCacheRange(dst_address, size); } else { - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); } } }); @@ -441,19 +445,19 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Unmap. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), - memory_block_slab_manager, + m_memory_block_slab_manager, num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), - memory_block_slab_manager, + m_memory_block_slab_manager, num_dst_allocator_blocks); R_TRY(dst_allocator_result); @@ -465,14 +469,14 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si OperationType::ChangePermissions)); // Apply the memory block updates. - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::None, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, - KMemoryState::Normal, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update( + std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); // Note that we reprotected pages. reprotected_pages = true; @@ -481,9 +485,8 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si return ResultSuccess; } -VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t alignment, std::size_t offset, - std::size_t guard_pages) { +VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) { VAddr address = 0; if (num_pages <= region_num_pages) { @@ -492,8 +495,8 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, } // Find the first free area. if (address == 0) { - address = memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, - alignment, offset, guard_pages); + address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); } } @@ -511,7 +514,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); + R_UNLESS(m_page_table_impl.BeginTraversal(next_entry, context, addr), + ResultInvalidCurrentMemory); // Prepare tracking variables. PAddr cur_addr = next_entry.phys_addr; @@ -519,9 +523,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { size_t tot_size = cur_size; // Iterate, adding to group as we go. - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); while (tot_size < size) { - R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), + R_UNLESS(m_page_table_impl.ContinueTraversal(next_entry, context), ResultInvalidCurrentMemory); if (next_entry.phys_addr != (cur_addr + cur_size)) { @@ -557,7 +561,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu const size_t size = num_pages * PageSize; const auto& pg = pg_ll.Nodes(); - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); // Empty groups are necessarily invalid. if (pg.empty()) { @@ -584,7 +588,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { + if (!m_page_table_impl.BeginTraversal(next_entry, context, addr)) { return false; } @@ -595,7 +599,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Iterate, comparing expected to actual. while (tot_size < size) { - if (!page_table_impl.ContinueTraversal(next_entry, context)) { + if (!m_page_table_impl.ContinueTraversal(next_entry, context)) { return false; } @@ -641,11 +645,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); } -Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, +Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; // Check that the memory is mapped in the destination process. size_t num_allocator_blocks; @@ -663,48 +667,48 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Apply the memory block update. - memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); return ResultSuccess; } -Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address; - std::size_t mapped_size; + size_t mapped_size; // The entire mapping process can be retried. while (true) { // Check if the memory is already mapped. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Iterate over the memory. cur_address = address; mapped_size = 0; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -735,20 +739,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { { // Reserve the memory from the process resource limit. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, size - mapped_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the new memory. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &pg, (size - mapped_size) / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); // Map the memory. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); size_t num_allocator_blocks = 0; @@ -758,10 +762,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { size_t checked_mapped_size = 0; cur_address = address; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -805,7 +809,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); @@ -818,10 +822,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Iterate, unmapping the pages. cur_address = address; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -857,10 +861,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -913,10 +917,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { memory_reservation.Commit(); // Increase our tracked mapped size. - mapped_physical_memory_size += (size - mapped_size); + m_mapped_physical_memory_size += (size - mapped_size); // Update the relevant memory blocks. - memory_block_manager.UpdateIfMatch( + m_memory_block_manager.UpdateIfMatch( std::addressof(allocator), address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, KMemoryPermission::UserReadWrite, KMemoryAttribute::None); @@ -930,20 +934,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { } } -Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address = 0; - std::size_t mapped_size = 0; - std::size_t num_allocator_blocks = 0; + size_t mapped_size = 0; + size_t num_allocator_blocks = 0; // Check if the memory is mapped. { @@ -951,10 +955,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; mapped_size = 0; - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1053,7 +1057,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Reset the current tracking address, and make sure we clean up on failure. @@ -1064,7 +1068,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; // Iterate over the memory we unmapped. - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); auto pg_it = pg.Nodes().begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1119,10 +1123,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { }); // Iterate over the memory, unmapping as we go. - auto it = memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != memory_block_manager.end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1149,20 +1153,20 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } // Release the memory resource. - mapped_physical_memory_size -= mapped_size; - auto process{system.Kernel().CurrentProcess()}; + m_mapped_physical_memory_size -= mapped_size; + auto process{m_system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); // Update memory blocks. - memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, - KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // TODO(bunnei): This is a workaround until the next set of changes, where we add reference // counting for mapped pages. Until then, we must manually close the reference to the page // group. - system.Kernel().MemoryManager().Close(pg); + m_system.Kernel().MemoryManager().Close(pg); // We succeeded. remap_guard.Cancel(); @@ -1170,9 +1174,9 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { return ResultSuccess; } -Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that the source address's state is valid. KMemoryState src_state; @@ -1192,19 +1196,21 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t s // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator( - std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator( - std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); R_TRY(dst_allocator_result); // Map the memory. KPageGroup page_linked_list; - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; @@ -1223,21 +1229,21 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t s } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, - new_src_perm, new_src_attr, - KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::Stack, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + new_src_perm, new_src_attr, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::Stack, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that the source address's state is valid. KMemoryState src_state; @@ -1258,19 +1264,21 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t // Create an update allocator for the source. Result src_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator src_allocator( - std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks); + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); R_TRY(src_allocator_result); // Create an update allocator for the destination. Result dst_allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator dst_allocator( - std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks); + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); R_TRY(dst_allocator_result); KPageGroup src_pages; KPageGroup dst_pages; - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; AddRegionToPages(src_address, num_pages, src_pages); AddRegionToPages(dst_address, num_pages, dst_pages); @@ -1290,14 +1298,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); - memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, - KMemoryState::None, KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1312,7 +1320,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; + const size_t num_pages{(addr - cur_addr) / PageSize}; ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); @@ -1329,12 +1337,12 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm) { // Check that the map is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, @@ -1344,23 +1352,22 @@ Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemory // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); // Map the pages. R_TRY(MapPages(address, page_linked_list, perm)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, - PAddr phys_addr, bool is_pa_valid, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm) { +Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm) { ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); // Ensure this is a valid map request. @@ -1369,7 +1376,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Find a random address to map at. VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, @@ -1385,7 +1392,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); // Perform mapping operation. if (is_pa_valid) { @@ -1395,9 +1402,9 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t } // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); // We successfully mapped the pages. *out_addr = addr; @@ -1424,12 +1431,12 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { // Check that the unmap is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. size_t num_allocator_blocks; @@ -1441,31 +1448,31 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform the unmap. R_TRY(UnmapPages(address, page_linked_list)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } -Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { // Check that the unmap is in range. - const std::size_t size = num_pages * PageSize; + const size_t size = num_pages * PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. - std::size_t num_allocator_blocks{}; + size_t num_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, @@ -1474,17 +1481,17 @@ Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform the unmap. R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); return ResultSuccess; } @@ -1501,7 +1508,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check if state allows us to create the group. R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, @@ -1514,12 +1521,12 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n return ResultSuccess; } -Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, +Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1559,7 +1566,7 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform mapping operation. @@ -1568,29 +1575,29 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, R_TRY(Operate(addr, num_pages, new_perm, operation)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Ensure cache coherency, if we're setting pages as executable. if (is_x) { - system.InvalidateCpuInstructionCacheRange(addr, size); + m_system.InvalidateCpuInstructionCacheRange(addr, size); } return ResultSuccess; } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - return memory_block_manager.FindBlock(addr)->GetMemoryInfo(); + return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); } KMemoryInfo KPageTable::QueryInfo(VAddr addr) { if (!Contains(addr, 1)) { return { - .m_address = address_space_end, - .m_size = 0 - address_space_end, + .m_address = m_address_space_end, + .m_size = 0 - m_address_space_end, .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), .m_device_disable_merge_left_count = 0, .m_device_disable_merge_right_count = 0, @@ -1607,12 +1614,11 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) { return QueryInfoImpl(addr); } -Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, - Svc::MemoryPermission svc_perm) { +Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1631,27 +1637,27 @@ Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Perform mapping operation. R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { +Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { const size_t num_pages = size / PageSize; ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory attribute. KMemoryState old_state; @@ -1669,7 +1675,7 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Determine the new attribute. @@ -1681,124 +1687,125 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } -Result KPageTable::SetMaxHeapSize(std::size_t size) { +Result KPageTable::SetMaxHeapSize(size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Only process page tables are allowed to set heap size. ASSERT(!this->IsKernel()); - max_heap_size = size; + m_max_heap_size = size; return ResultSuccess; } -Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { +Result KPageTable::SetHeapSize(VAddr* out, size_t size) { // Lock the physical memory mutex. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Try to perform a reduction in heap, instead of an extension. VAddr cur_address{}; - std::size_t allocation_size{}; + size_t allocation_size{}; { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that setting heap size is possible at all. - R_UNLESS(!is_kernel, ResultOutOfMemory); - R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), + R_UNLESS(!m_is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), ResultOutOfMemory); - R_UNLESS(size <= max_heap_size, ResultOutOfMemory); + R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); if (size < GetHeapSize()) { // The size being requested is less than the current size, so we need to free the end of // the heap. // Validate memory state. - std::size_t num_allocator_blocks; + size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), - heap_region_start + size, GetHeapSize() - size, + m_heap_region_start + size, GetHeapSize() - size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Create an update allocator. Result allocator_result{ResultSuccess}; - KMemoryBlockManagerUpdateAllocator allocator( - std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); R_TRY(allocator_result); // Unmap the end of the heap. const auto num_pages = (GetHeapSize() - size) / PageSize; - R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, + R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Release the memory from the resource limit. - system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( LimitableResource::PhysicalMemory, num_pages * PageSize); // Apply the memory block update. - memory_block_manager.Update(std::addressof(allocator), heap_region_start + size, - num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, - KMemoryBlockDisableMergeAttribute::None, - size == 0 ? KMemoryBlockDisableMergeAttribute::Normal - : KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, + num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + size == 0 ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; + *out = m_heap_region_start; return ResultSuccess; } else if (size == GetHeapSize()) { // The size requested is exactly the current size. - *out = heap_region_start; + *out = m_heap_region_start; return ResultSuccess; } else { // We have to allocate memory. Determine how much to allocate and where while the table // is locked. - cur_address = current_heap_end; + cur_address = m_current_heap_end; allocation_size = size - GetHeapSize(); } } // Reserve memory for the heap extension. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, allocation_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, allocation_size / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option))); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); // Clear all the newly allocated pages. for (const auto& it : pg.Nodes()) { - std::memset(system.DeviceMemory().GetPointer<void>(it.GetAddress()), heap_fill_value, + std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, it.GetSize()); } // Map the pages. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Ensure that the heap hasn't changed since we began executing. - ASSERT(cur_address == current_heap_end); + ASSERT(cur_address == m_current_heap_end); // Check the memory state. - std::size_t num_allocator_blocks{}; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, + size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); @@ -1806,16 +1813,16 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator( - std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks); + std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Map the pages. const auto num_pages = allocation_size / PageSize; - R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); + R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); // Clear all the newly allocated pages. - for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { - std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, + for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, PageSize); } @@ -1823,27 +1830,27 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { memory_reservation.Commit(); // Apply the memory block update. - memory_block_manager.Update( - std::addressof(allocator), current_heap_end, num_pages, KMemoryState::Normal, + m_memory_block_manager.Update( + std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, KMemoryPermission::UserReadWrite, KMemoryAttribute::None, - heap_region_start == current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal - : KMemoryBlockDisableMergeAttribute::None, + m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; + *out = m_heap_region_start; return ResultSuccess; } } -ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, +ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, + size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); if (!CanContain(region_start, region_num_pages * PageSize, state)) { return ResultInvalidCurrentMemory; @@ -1862,33 +1869,98 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager); + m_memory_block_slab_manager); if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { KPageGroup page_group; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &page_group, needed_num_pages, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } // Update the blocks. - memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, - KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return addr; } -Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) { +Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + const auto test_state = + (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, + test_state, perm, perm, + KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, + KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::ShareToDevice, KMemoryPermission::None); + + return ResultSuccess; +} + +Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = + m_enable_device_address_space_merge + ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare + : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, + KMemoryPermission::None); + + return ResultSuccess; +} + +Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { // Lightly validate the range before doing anything else. const size_t num_pages = size / PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. size_t num_allocator_blocks; @@ -1900,17 +1972,17 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Update the memory blocks. - memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, - &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); return ResultSuccess; } -Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { +Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { return this->LockMemoryAndOpen( out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, @@ -1920,7 +1992,7 @@ Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t si KMemoryAttribute::Locked); } -Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { +Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { return this->UnlockMemory( addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, @@ -1928,9 +2000,9 @@ Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPage } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = system.DeviceMemory().GetPointer<u8>(addr); + auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr); for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != system.DeviceMemory().GetPointer<u8>(addr + offset)) { + if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) { return false; } start_ptr += PageSize; @@ -1938,8 +2010,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { return true; } -void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, - KPageGroup& page_linked_list) { +void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { VAddr addr{start}; while (addr < start + (num_pages * PageSize)) { const PAddr paddr{GetPhysicalAddr(addr)}; @@ -1949,16 +2020,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, } } -VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, - u64 needed_num_pages, std::size_t align) { - if (is_aslr_enabled) { +VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align) { + if (m_enable_aslr) { UNIMPLEMENTED(); } - return memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); + return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, +Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation) { ASSERT(this->IsLockedByCurrentThread()); @@ -1967,11 +2038,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& ASSERT(num_pages == page_group.GetNumPages()); for (const auto& node : page_group.Nodes()) { - const std::size_t size{node.GetNumPages() * PageSize}; + const size_t size{node.GetNumPages() * PageSize}; switch (operation) { case OperationType::MapGroup: - system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, size, node.GetAddress()); break; default: ASSERT(false); @@ -1983,7 +2054,7 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& return ResultSuccess; } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, +Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, PAddr map_addr) { ASSERT(this->IsLockedByCurrentThread()); @@ -1993,12 +2064,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission switch (operation) { case OperationType::Unmap: - system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + m_system.Memory().UnmapRegion(m_page_table_impl, addr, num_pages * PageSize); break; case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); - system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + m_system.Memory().MapMemoryRegion(m_page_table_impl, addr, num_pages * PageSize, map_addr); break; } case OperationType::ChangePermissions: @@ -2014,18 +2085,18 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_start; + return m_address_space_start; case KMemoryState::Normal: - return heap_region_start; + return m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_start; + return m_alias_region_start; case KMemoryState::Stack: - return stack_region_start; + return m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_start; + return m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -2036,31 +2107,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_start; + return m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_start; + return m_code_region_start; default: UNREACHABLE(); } } -std::size_t KPageTable::GetRegionSize(KMemoryState state) const { +size_t KPageTable::GetRegionSize(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_end - address_space_start; + return m_address_space_end - m_address_space_start; case KMemoryState::Normal: - return heap_region_end - heap_region_start; + return m_heap_region_end - m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_end - alias_region_start; + return m_alias_region_end - m_alias_region_start; case KMemoryState::Stack: - return stack_region_end - stack_region_start; + return m_stack_region_end - m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_end - kernel_map_region_start; + return m_kernel_map_region_end - m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -2071,16 +2142,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_end - code_region_start; + return m_code_region_end - m_code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { +bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { const VAddr end = addr + size; const VAddr last = end - 1; @@ -2089,10 +2160,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; - const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || - heap_region_start == heap_region_end); - const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || - alias_region_start == alias_region_end); + const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || + m_heap_region_start == m_heap_region_end); + const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || + m_alias_region_start == m_alias_region_end); switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: @@ -2138,16 +2209,16 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ return ResultSuccess; } -Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, +Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2165,7 +2236,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA // Advance our iterator. it++; - ASSERT(it != memory_block_manager.cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2181,8 +2252,8 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA } Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, - VAddr addr, std::size_t size, KMemoryState state_mask, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { @@ -2190,7 +2261,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2218,7 +2289,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Advance our iterator. it++; - ASSERT(it != memory_block_manager.cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2257,7 +2328,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check that the output page group is empty, if it exists. if (out_pg) { @@ -2288,7 +2359,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Decide on new perm and attr. @@ -2301,9 +2372,9 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - new_attr, KMemoryBlockDisableMergeAttribute::Locked, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); return ResultSuccess; } @@ -2322,7 +2393,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the state. KMemoryState old_state{}; @@ -2347,7 +2418,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask // Create an update allocator. Result allocator_result{ResultSuccess}; KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), - memory_block_slab_manager, num_allocator_blocks); + m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); // Update permission, if we need to. @@ -2356,9 +2427,9 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask } // Apply the memory block updates. - memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index fa11a0fe3..225854319 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -36,60 +36,66 @@ public: ~KPageTable(); Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, + VAddr code_addr, size_t code_size, KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool); void Finalize(); - Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, + Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, KMemoryPermission perm); - Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); - Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, + Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); + Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy); - Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, + Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr); - Result MapPhysicalMemory(VAddr addr, std::size_t size); - Result UnmapPhysicalMemory(VAddr addr, std::size_t size); - Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + Result MapPhysicalMemory(VAddr addr, size_t size); + Result UnmapPhysicalMemory(VAddr addr, size_t size); + Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); + Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm); } Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); - Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); - Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); + Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); + Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); - Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); - Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); - Result SetMaxHeapSize(std::size_t size); - Result SetHeapSize(VAddr* out, std::size_t size); - ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, - bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm, PAddr map_addr = 0); - Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); - Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); - Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); + Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); + Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); + Result SetMaxHeapSize(size_t size); + Result SetHeapSize(VAddr* out, size_t size); + ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, + VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm, + PAddr map_addr = 0); + + Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned); + Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); + + Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); + + Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); + Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr); Common::PageTable& PageTableImpl() { - return page_table_impl; + return m_page_table_impl; } const Common::PageTable& PageTableImpl() const { - return page_table_impl; + return m_page_table_impl; } - bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; + bool CanContain(VAddr addr, size_t size, KMemoryState state) const; private: enum class OperationType : u32 { @@ -104,30 +110,30 @@ private: KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, - bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); bool IsRegionContiguous(VAddr addr, u64 size) const; - void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); + void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); KMemoryInfo QueryInfoImpl(VAddr addr); - VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, - std::size_t align); - Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, + VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align); + Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation); - Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, - OperationType operation, PAddr map_addr = 0); + Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, + PAddr map_addr = 0); VAddr GetRegionAddress(KMemoryState state) const; - std::size_t GetRegionSize(KMemoryState state) const; + size_t GetRegionSize(KMemoryState state) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t alignment, std::size_t offset, std::size_t guard_pages); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages); - Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, + Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { @@ -139,12 +145,12 @@ private: KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, + size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, @@ -152,8 +158,8 @@ private: return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); } - Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, @@ -175,13 +181,13 @@ private: bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); bool IsLockedByCurrentThread() const { - return general_lock.IsLockedByCurrentThread(); + return m_general_lock.IsLockedByCurrentThread(); } bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { ASSERT(this->IsLockedByCurrentThread()); - return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); + return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { @@ -192,93 +198,93 @@ private: return *out != 0; } - mutable KLightLock general_lock; - mutable KLightLock map_physical_memory_lock; + mutable KLightLock m_general_lock; + mutable KLightLock m_map_physical_memory_lock; public: constexpr VAddr GetAddressSpaceStart() const { - return address_space_start; + return m_address_space_start; } constexpr VAddr GetAddressSpaceEnd() const { - return address_space_end; + return m_address_space_end; } - constexpr std::size_t GetAddressSpaceSize() const { - return address_space_end - address_space_start; + constexpr size_t GetAddressSpaceSize() const { + return m_address_space_end - m_address_space_start; } constexpr VAddr GetHeapRegionStart() const { - return heap_region_start; + return m_heap_region_start; } constexpr VAddr GetHeapRegionEnd() const { - return heap_region_end; + return m_heap_region_end; } - constexpr std::size_t GetHeapRegionSize() const { - return heap_region_end - heap_region_start; + constexpr size_t GetHeapRegionSize() const { + return m_heap_region_end - m_heap_region_start; } constexpr VAddr GetAliasRegionStart() const { - return alias_region_start; + return m_alias_region_start; } constexpr VAddr GetAliasRegionEnd() const { - return alias_region_end; + return m_alias_region_end; } - constexpr std::size_t GetAliasRegionSize() const { - return alias_region_end - alias_region_start; + constexpr size_t GetAliasRegionSize() const { + return m_alias_region_end - m_alias_region_start; } constexpr VAddr GetStackRegionStart() const { - return stack_region_start; + return m_stack_region_start; } constexpr VAddr GetStackRegionEnd() const { - return stack_region_end; + return m_stack_region_end; } - constexpr std::size_t GetStackRegionSize() const { - return stack_region_end - stack_region_start; + constexpr size_t GetStackRegionSize() const { + return m_stack_region_end - m_stack_region_start; } constexpr VAddr GetKernelMapRegionStart() const { - return kernel_map_region_start; + return m_kernel_map_region_start; } constexpr VAddr GetKernelMapRegionEnd() const { - return kernel_map_region_end; + return m_kernel_map_region_end; } constexpr VAddr GetCodeRegionStart() const { - return code_region_start; + return m_code_region_start; } constexpr VAddr GetCodeRegionEnd() const { - return code_region_end; + return m_code_region_end; } constexpr VAddr GetAliasCodeRegionStart() const { - return alias_code_region_start; + return m_alias_code_region_start; } constexpr VAddr GetAliasCodeRegionSize() const { - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; } - std::size_t GetNormalMemorySize() { - KScopedLightLock lk(general_lock); - return GetHeapSize() + mapped_physical_memory_size; + size_t GetNormalMemorySize() { + KScopedLightLock lk(m_general_lock); + return GetHeapSize() + m_mapped_physical_memory_size; } - constexpr std::size_t GetAddressSpaceWidth() const { - return address_space_width; + constexpr size_t GetAddressSpaceWidth() const { + return m_address_space_width; } - constexpr std::size_t GetHeapSize() const { - return current_heap_end - heap_region_start; + constexpr size_t GetHeapSize() const { + return m_current_heap_end - m_heap_region_start; } - constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { - return address_space_start <= address && address + size - 1 <= address_space_end - 1; + constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { + return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; } - constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { - return alias_region_start > address || address + size - 1 > alias_region_end - 1; + constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { + return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; } - constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { - return stack_region_start > address || address + size - 1 > stack_region_end - 1; + constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { + return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; } - constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + constexpr bool IsInvalidRegion(VAddr address, size_t size) const { return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; } - constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { - return address + size > heap_region_start && heap_region_end > address; + constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { + return address + size > m_heap_region_start && m_heap_region_end > address; } - constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { - return address + size > alias_region_start && alias_region_end > address; + constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { + return address + size > m_alias_region_start && m_alias_region_end > address; } - constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { if (IsInvalidRegion(address, size)) { return true; } @@ -290,77 +296,78 @@ public: } return {}; } - constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { return !IsOutsideASLRRegion(address, size); } - constexpr std::size_t GetNumGuardPages() const { + constexpr size_t GetNumGuardPages() const { return IsKernel() ? 1 : 4; } PAddr GetPhysicalAddr(VAddr addr) const { - const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; + const auto backing_addr = m_page_table_impl.backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; } constexpr bool Contains(VAddr addr) const { - return address_space_start <= addr && addr <= address_space_end - 1; + return m_address_space_start <= addr && addr <= m_address_space_end - 1; } - constexpr bool Contains(VAddr addr, std::size_t size) const { - return address_space_start <= addr && addr < addr + size && - addr + size - 1 <= address_space_end - 1; + constexpr bool Contains(VAddr addr, size_t size) const { + return m_address_space_start <= addr && addr < addr + size && + addr + size - 1 <= m_address_space_end - 1; } private: constexpr bool IsKernel() const { - return is_kernel; + return m_is_kernel; } constexpr bool IsAslrEnabled() const { - return is_aslr_enabled; + return m_enable_aslr; } - constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { - return (address_space_start <= addr) && - (num_pages <= (address_space_end - address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= address_space_end - 1); + constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { + return (m_address_space_start <= addr) && + (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); } private: - VAddr address_space_start{}; - VAddr address_space_end{}; - VAddr heap_region_start{}; - VAddr heap_region_end{}; - VAddr current_heap_end{}; - VAddr alias_region_start{}; - VAddr alias_region_end{}; - VAddr stack_region_start{}; - VAddr stack_region_end{}; - VAddr kernel_map_region_start{}; - VAddr kernel_map_region_end{}; - VAddr code_region_start{}; - VAddr code_region_end{}; - VAddr alias_code_region_start{}; - VAddr alias_code_region_end{}; - - std::size_t mapped_physical_memory_size{}; - std::size_t max_heap_size{}; - std::size_t max_physical_memory_size{}; - std::size_t address_space_width{}; - - KMemoryBlockManager memory_block_manager; - - bool is_kernel{}; - bool is_aslr_enabled{}; - - KMemoryBlockSlabManager* memory_block_slab_manager{}; - - u32 heap_fill_value{}; - const KMemoryRegion* cached_physical_heap_region{}; - - KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; - KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; - - Common::PageTable page_table_impl; - - Core::System& system; + VAddr m_address_space_start{}; + VAddr m_address_space_end{}; + VAddr m_heap_region_start{}; + VAddr m_heap_region_end{}; + VAddr m_current_heap_end{}; + VAddr m_alias_region_start{}; + VAddr m_alias_region_end{}; + VAddr m_stack_region_start{}; + VAddr m_stack_region_end{}; + VAddr m_kernel_map_region_start{}; + VAddr m_kernel_map_region_end{}; + VAddr m_code_region_start{}; + VAddr m_code_region_end{}; + VAddr m_alias_code_region_start{}; + VAddr m_alias_code_region_end{}; + + size_t m_mapped_physical_memory_size{}; + size_t m_max_heap_size{}; + size_t m_max_physical_memory_size{}; + size_t m_address_space_width{}; + + KMemoryBlockManager m_memory_block_manager; + + bool m_is_kernel{}; + bool m_enable_aslr{}; + bool m_enable_device_address_space_merge{}; + + KMemoryBlockSlabManager* m_memory_block_slab_manager{}; + + u32 m_heap_fill_value{}; + const KMemoryRegion* m_cached_physical_heap_region{}; + + KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; + KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; + + Common::PageTable m_page_table_impl; + + Core::System& m_system; }; } // namespace Kernel diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index ddf273b5e..b60679021 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) } ASSERT(system.CurrentProcess() ->PageTable() - .LockForDeviceAddressSpace(handle_description->address, handle_description->size) + .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, + Kernel::KMemoryPermission::None, true) .IsSuccess()); std::memcpy(output.data(), ¶ms, sizeof(params)); return result; |