diff options
Diffstat (limited to '')
-rw-r--r-- | src/common/page_table.cpp | 10 | ||||
-rw-r--r-- | src/common/page_table.h | 68 | ||||
-rw-r--r-- | src/common/virtual_buffer.h | 10 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.cpp | 1 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_64.cpp | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/memory/page_table.cpp | 2 | ||||
-rw-r--r-- | src/core/memory.cpp | 187 |
7 files changed, 132 insertions, 147 deletions
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index bccea0894..8fd8620fd 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp @@ -10,16 +10,10 @@ PageTable::PageTable() = default; PageTable::~PageTable() noexcept = default; -void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits, - bool has_attribute) { - const std::size_t num_page_table_entries{1ULL - << (address_space_width_in_bits - page_size_in_bits)}; +void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) { + const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)}; pointers.resize(num_page_table_entries); backing_addr.resize(num_page_table_entries); - - if (has_attribute) { - attributes.resize(num_page_table_entries); - } } } // namespace Common diff --git a/src/common/page_table.h b/src/common/page_table.h index 9754fabf9..8d4ee9249 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h @@ -4,6 +4,7 @@ #pragma once +#include <atomic> #include <tuple> #include "common/common_types.h" @@ -20,10 +21,6 @@ enum class PageType : u8 { /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and /// invalidation RasterizerCachedMemory, - /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. - Special, - /// Page is allocated for use. - Allocated, }; struct SpecialRegion { @@ -48,6 +45,59 @@ struct SpecialRegion { * mimics the way a real CPU page table works. */ struct PageTable { + /// Number of bits reserved for attribute tagging. + /// This can be at most the guaranteed alignment of the pointers in the page table. + static constexpr int ATTRIBUTE_BITS = 2; + + /** + * Pair of host pointer and page type attribute. + * This uses the lower bits of a given pointer to store the attribute tag. + * Writing and reading the pointer attribute pair is guaranteed to be atomic for the same method + * call. In other words, they are guaranteed to be synchronized at all times. + */ + class PageInfo { + public: + /// Returns the page pointer + [[nodiscard]] u8* Pointer() const noexcept { + return ExtractPointer(raw.load(std::memory_order_relaxed)); + } + + /// Returns the page type attribute + [[nodiscard]] PageType Type() const noexcept { + return ExtractType(raw.load(std::memory_order_relaxed)); + } + + /// Returns the page pointer and attribute pair, extracted from the same atomic read + [[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept { + const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed); + return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)}; + } + + /// Returns the raw representation of the page information. + /// Use ExtractPointer and ExtractType to unpack the value. + [[nodiscard]] uintptr_t Raw() const noexcept { + return raw.load(std::memory_order_relaxed); + } + + /// Write a page pointer and type pair atomically + void Store(u8* pointer, PageType type) noexcept { + raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type)); + } + + /// Unpack a pointer from a page info raw representation + [[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept { + return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS)); + } + + /// Unpack a page type from a page info raw representation + [[nodiscard]] static PageType ExtractType(uintptr_t raw) noexcept { + return static_cast<PageType>(raw & ((uintptr_t{1} << ATTRIBUTE_BITS) - 1)); + } + + private: + std::atomic<uintptr_t> raw; + }; + PageTable(); ~PageTable() noexcept; @@ -63,20 +113,16 @@ struct PageTable { * * @param address_space_width_in_bits The address size width in bits. * @param page_size_in_bits The page size in bits. - * @param has_attribute Whether or not this page has any backing attributes. */ - void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits, - bool has_attribute); + void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits); /** * Vector of memory pointers backing each page. An entry can only be non-null if the - * corresponding entry in the `attributes` vector is of type `Memory`. + * corresponding attribute element is of type `Memory`. */ - VirtualBuffer<u8*> pointers; + VirtualBuffer<PageInfo> pointers; VirtualBuffer<u64> backing_addr; - - VirtualBuffer<PageType> attributes; }; } // namespace Common diff --git a/src/common/virtual_buffer.h b/src/common/virtual_buffer.h index 91d430036..fb1a6f81f 100644 --- a/src/common/virtual_buffer.h +++ b/src/common/virtual_buffer.h @@ -15,10 +15,12 @@ void FreeMemoryPages(void* base, std::size_t size) noexcept; template <typename T> class VirtualBuffer final { public: - static_assert( - std::is_trivially_constructible_v<T>, - "T must be trivially constructible, as non-trivial constructors will not be executed " - "with the current allocator"); + // TODO: Uncomment this and change Common::PageTable::PageInfo to be trivially constructible + // using std::atomic_ref once libc++ has support for it + // static_assert( + // std::is_trivially_constructible_v<T>, + // "T must be trivially constructible, as non-trivial constructors will not be executed " + // "with the current allocator"); constexpr VirtualBuffer() = default; explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} { diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index e9c74b1a6..8aaf11eee 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -133,6 +133,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( page_table.pointers.data()); config.absolute_offset_page_table = true; + config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS; config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; config.only_detect_misalignment_via_page_table_on_page_boundary = true; diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 7a4eb88a2..d2e1dc724 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -152,6 +152,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& // Memory config.page_table = reinterpret_cast<void**>(page_table.pointers.data()); config.page_table_address_space_bits = address_space_bits; + config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS; config.silently_mirror_page_table = false; config.absolute_offset_page_table = true; config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index f53a7be82..f3e8bc333 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t physical_memory_usage = 0; memory_pool = pool; - page_table_impl.Resize(address_space_width, PageBits, true); + page_table_impl.Resize(address_space_width, PageBits); return InitializeMemoryLayout(start, end); } diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 54a848936..f209c4949 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -4,7 +4,6 @@ #include <algorithm> #include <cstring> -#include <mutex> #include <optional> #include <utility> @@ -68,21 +67,8 @@ struct Memory::Impl { bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { const auto& page_table = process.PageTable().PageTableImpl(); - - const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { - return true; - } - - if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) { - return true; - } - - if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) { - return false; - } - - return false; + const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType(); + return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory; } bool IsValidVirtualAddress(VAddr vaddr) const { @@ -100,17 +86,15 @@ struct Memory::Impl { } u8* GetPointer(const VAddr vaddr) const { - u8* const page_pointer{current_page_table->pointers[vaddr >> PAGE_BITS]}; - if (page_pointer) { - return page_pointer + vaddr; + const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); + if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { + return pointer + vaddr; } - - if (current_page_table->attributes[vaddr >> PAGE_BITS] == - Common::PageType::RasterizerCachedMemory) { + const auto type = Common::PageTable::PageInfo::ExtractType(raw_pointer); + if (type == Common::PageType::RasterizerCachedMemory) { return GetPointerFromRasterizerCachedMemory(vaddr); } - - return {}; + return nullptr; } u8 Read8(const VAddr addr) { @@ -222,7 +206,8 @@ struct Memory::Impl { std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - switch (page_table.attributes[page_index]) { + const auto [pointer, type] = page_table.pointers[page_index].PointerType(); + switch (type) { case Common::PageType::Unmapped: { LOG_ERROR(HW_Memory, "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", @@ -231,10 +216,8 @@ struct Memory::Impl { break; } case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - const u8* const src_ptr = - page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + DEBUG_ASSERT(pointer); + const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS); std::memcpy(dest_buffer, src_ptr, copy_amount); break; } @@ -268,7 +251,8 @@ struct Memory::Impl { std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - switch (page_table.attributes[page_index]) { + const auto [pointer, type] = page_table.pointers[page_index].PointerType(); + switch (type) { case Common::PageType::Unmapped: { LOG_ERROR(HW_Memory, "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", @@ -277,10 +261,8 @@ struct Memory::Impl { break; } case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - const u8* const src_ptr = - page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + DEBUG_ASSERT(pointer); + const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS); std::memcpy(dest_buffer, src_ptr, copy_amount); break; } @@ -320,7 +302,8 @@ struct Memory::Impl { std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - switch (page_table.attributes[page_index]) { + const auto [pointer, type] = page_table.pointers[page_index].PointerType(); + switch (type) { case Common::PageType::Unmapped: { LOG_ERROR(HW_Memory, "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", @@ -328,10 +311,8 @@ struct Memory::Impl { break; } case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - u8* const dest_ptr = - page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + DEBUG_ASSERT(pointer); + u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); std::memcpy(dest_ptr, src_buffer, copy_amount); break; } @@ -364,7 +345,8 @@ struct Memory::Impl { std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - switch (page_table.attributes[page_index]) { + const auto [pointer, type] = page_table.pointers[page_index].PointerType(); + switch (type) { case Common::PageType::Unmapped: { LOG_ERROR(HW_Memory, "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", @@ -372,10 +354,8 @@ struct Memory::Impl { break; } case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - u8* const dest_ptr = - page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + DEBUG_ASSERT(pointer); + u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); std::memcpy(dest_ptr, src_buffer, copy_amount); break; } @@ -414,7 +394,8 @@ struct Memory::Impl { std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - switch (page_table.attributes[page_index]) { + const auto [pointer, type] = page_table.pointers[page_index].PointerType(); + switch (type) { case Common::PageType::Unmapped: { LOG_ERROR(HW_Memory, "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", @@ -422,10 +403,8 @@ struct Memory::Impl { break; } case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - u8* dest_ptr = - page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + DEBUG_ASSERT(pointer); + u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS); std::memset(dest_ptr, 0, copy_amount); break; } @@ -461,7 +440,8 @@ struct Memory::Impl { std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - switch (page_table.attributes[page_index]) { + const auto [pointer, type] = page_table.pointers[page_index].PointerType(); + switch (type) { case Common::PageType::Unmapped: { LOG_ERROR(HW_Memory, "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", @@ -470,9 +450,8 @@ struct Memory::Impl { break; } case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - const u8* src_ptr = - page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS); + DEBUG_ASSERT(pointer); + const u8* src_ptr = pointer + page_offset + (page_index << PAGE_BITS); WriteBlock(process, dest_addr, src_ptr, copy_amount); break; } @@ -498,34 +477,19 @@ struct Memory::Impl { return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size); } - struct PageEntry { - u8* const pointer; - const Common::PageType attribute; - }; - - PageEntry SafePageEntry(std::size_t base) const { - std::lock_guard lock{rasterizer_cache_guard}; - return { - .pointer = current_page_table->pointers[base], - .attribute = current_page_table->attributes[base], - }; - } - void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { - std::lock_guard lock{rasterizer_cache_guard}; if (vaddr == 0) { return; } - // Iterate over a contiguous CPU address space, which corresponds to the specified GPU // address space, marking the region as un/cached. The region is marked un/cached at a // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size // is different). This assumes the specified GPU address region is contiguous as well. - u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; - for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { - Common::PageType& page_type{current_page_table->attributes[vaddr >> PAGE_BITS]}; - + const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; + for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { + const Common::PageType page_type{ + current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; if (cached) { // Switch page type to cached if now cached switch (page_type) { @@ -534,8 +498,8 @@ struct Memory::Impl { // space, for example, a system module need not have a VRAM mapping. break; case Common::PageType::Memory: - page_type = Common::PageType::RasterizerCachedMemory; - current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; + current_page_table->pointers[vaddr >> PAGE_BITS].Store( + nullptr, Common::PageType::RasterizerCachedMemory); break; case Common::PageType::RasterizerCachedMemory: // There can be more than one GPU region mapped per CPU region, so it's common @@ -556,16 +520,16 @@ struct Memory::Impl { // that this area is already unmarked as cached. break; case Common::PageType::RasterizerCachedMemory: { - u8* pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; + u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; if (pointer == nullptr) { // It's possible that this function has been called while updating the // pagetable after unmapping a VMA. In that case the underlying VMA will no // longer exist, and we should just leave the pagetable entry blank. - page_type = Common::PageType::Unmapped; + current_page_table->pointers[vaddr >> PAGE_BITS].Store( + nullptr, Common::PageType::Unmapped); } else { - current_page_table->pointers[vaddr >> PAGE_BITS] = - pointer - (vaddr & ~PAGE_MASK); - page_type = Common::PageType::Memory; + current_page_table->pointers[vaddr >> PAGE_BITS].Store( + pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); } break; } @@ -595,7 +559,7 @@ struct Memory::Impl { auto& gpu = system.GPU(); for (u64 i = 0; i < size; i++) { const auto page = base + i; - if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) { + if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); } } @@ -610,20 +574,18 @@ struct Memory::Impl { "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE); while (base != end) { - page_table.attributes[base] = type; - page_table.pointers[base] = nullptr; + page_table.pointers[base].Store(nullptr, type); page_table.backing_addr[base] = 0; base += 1; } } else { while (base != end) { - page_table.pointers[base] = - system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS); - page_table.attributes[base] = type; + page_table.pointers[base].Store( + system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type); page_table.backing_addr[base] = target - (base << PAGE_BITS); - ASSERT_MSG(page_table.pointers[base], + ASSERT_MSG(page_table.pointers[base].Pointer(), "memory mapping base yield a nullptr within the table"); base += 1; @@ -646,21 +608,13 @@ struct Memory::Impl { template <typename T> T Read(const VAddr vaddr) { // Avoid adding any extra logic to this fast-path block - if (const u8* const pointer = current_page_table->pointers[vaddr >> PAGE_BITS]) { + const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); + if (const u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { T value; std::memcpy(&value, &pointer[vaddr], sizeof(T)); return value; } - - // Otherwise, we need to grab the page with a lock, in case it is currently being modified - const auto entry = SafePageEntry(vaddr >> PAGE_BITS); - if (entry.pointer) { - T value; - std::memcpy(&value, &entry.pointer[vaddr], sizeof(T)); - return value; - } - - switch (entry.attribute) { + switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { case Common::PageType::Unmapped: LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); return 0; @@ -692,20 +646,12 @@ struct Memory::Impl { template <typename T> void Write(const VAddr vaddr, const T data) { // Avoid adding any extra logic to this fast-path block - if (u8* const pointer = current_page_table->pointers[vaddr >> PAGE_BITS]) { + const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); + if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { std::memcpy(&pointer[vaddr], &data, sizeof(T)); return; } - - // Otherwise, we need to grab the page with a lock, in case it is currently being modified - const auto entry = SafePageEntry(vaddr >> PAGE_BITS); - if (entry.pointer) { - // Memory was mapped, we are done - std::memcpy(&entry.pointer[vaddr], &data, sizeof(T)); - return; - } - - switch (entry.attribute) { + switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { case Common::PageType::Unmapped: LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, static_cast<u32>(data), vaddr); @@ -726,15 +672,13 @@ struct Memory::Impl { template <typename T> bool WriteExclusive(const VAddr vaddr, const T data, const T expected) { - u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { + const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); + if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { // NOTE: Avoid adding any extra logic to this fast-path block - auto* pointer = reinterpret_cast<volatile T*>(&page_pointer[vaddr]); - return Common::AtomicCompareAndSwap(pointer, data, expected); + const auto volatile_pointer = reinterpret_cast<volatile T*>(&pointer[vaddr]); + return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); } - - const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { + switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { case Common::PageType::Unmapped: LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, static_cast<u32>(data), vaddr); @@ -755,15 +699,13 @@ struct Memory::Impl { } bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) { - u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { + const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); + if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { // NOTE: Avoid adding any extra logic to this fast-path block - auto* pointer = reinterpret_cast<volatile u64*>(&page_pointer[vaddr]); - return Common::AtomicCompareAndSwap(pointer, data, expected); + const auto volatile_pointer = reinterpret_cast<volatile u64*>(&pointer[vaddr]); + return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); } - - const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { + switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { case Common::PageType::Unmapped: LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8, static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr); @@ -783,7 +725,6 @@ struct Memory::Impl { return true; } - mutable std::mutex rasterizer_cache_guard; Common::PageTable* current_page_table = nullptr; Core::System& system; }; |