From 1a16d055dfbeee20a402379d6d4f3e96f8715648 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 18 Feb 2022 23:42:27 -0800 Subject: core: hle: kernel: KPageTable: Improve Un/MapPhysicalMemory. - Improves the implementations of MapPhysicalMemory and UnmapPhysicalMemory to more closely reflect latest HOS. --- src/common/page_table.cpp | 58 +++++++++++++++++++++++++++++++++++++++++++++-- src/common/page_table.h | 24 ++++++++++++++++---- 2 files changed, 76 insertions(+), 6 deletions(-) (limited to 'src/common') diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 9fffd816f..75897eeae 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp @@ -10,11 +10,65 @@ PageTable::PageTable() = default; PageTable::~PageTable() noexcept = default; -void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) { - const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)}; +bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, + u64 address) const { + // Setup invalid defaults. + out_entry->phys_addr = 0; + out_entry->block_size = page_size; + out_context->next_page = 0; + + // Validate that we can read the actual entry. + const auto page = address / page_size; + if (page >= backing_addr.size()) { + return false; + } + + // Validate that the entry is mapped. + const auto phys_addr = backing_addr[page]; + if (phys_addr == 0) { + return false; + } + + // Populate the results. + out_entry->phys_addr = phys_addr + address; + out_context->next_page = page + 1; + out_context->next_offset = address + page_size; + + return true; +} + +bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { + // Setup invalid defaults. + out_entry->phys_addr = 0; + out_entry->block_size = page_size; + + // Validate that we can read the actual entry. + const auto page = context->next_page; + if (page >= backing_addr.size()) { + return false; + } + + // Validate that the entry is mapped. + const auto phys_addr = backing_addr[page]; + if (phys_addr == 0) { + return false; + } + + // Populate the results. + out_entry->phys_addr = phys_addr + context->next_offset; + context->next_page = page + 1; + context->next_offset += page_size; + + return true; +} + +void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) { + const std::size_t num_page_table_entries{1ULL + << (address_space_width_in_bits - page_size_in_bits)}; pointers.resize(num_page_table_entries); backing_addr.resize(num_page_table_entries); current_address_space_width_in_bits = address_space_width_in_bits; + page_size = 1ULL << page_size_in_bits; } } // namespace Common diff --git a/src/common/page_table.h b/src/common/page_table.h index 8267e8b4d..fe254d7ae 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h @@ -27,6 +27,16 @@ enum class PageType : u8 { * mimics the way a real CPU page table works. */ struct PageTable { + struct TraversalEntry { + u64 phys_addr{}; + std::size_t block_size{}; + }; + + struct TraversalContext { + u64 next_page{}; + u64 next_offset{}; + }; + /// Number of bits reserved for attribute tagging. /// This can be at most the guaranteed alignment of the pointers in the page table. static constexpr int ATTRIBUTE_BITS = 2; @@ -89,6 +99,10 @@ struct PageTable { PageTable(PageTable&&) noexcept = default; PageTable& operator=(PageTable&&) noexcept = default; + bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, + u64 address) const; + bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const; + /** * Resizes the page table to be able to accommodate enough pages within * a given address space. @@ -96,9 +110,9 @@ struct PageTable { * @param address_space_width_in_bits The address size width in bits. * @param page_size_in_bits The page size in bits. */ - void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits); + void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits); - size_t GetAddressSpaceBits() const { + std::size_t GetAddressSpaceBits() const { return current_address_space_width_in_bits; } @@ -110,9 +124,11 @@ struct PageTable { VirtualBuffer backing_addr; - size_t current_address_space_width_in_bits; + std::size_t current_address_space_width_in_bits{}; + + u8* fastmem_arena{}; - u8* fastmem_arena; + std::size_t page_size{}; }; } // namespace Common -- cgit v1.2.3