From 359f22b808f54de9f19e40a8f93604ee7e4bac80 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 6 Feb 2022 18:51:07 +0100 Subject: MemoryManager: Finish up the initial implementation. --- src/video_core/memory_manager.cpp | 177 ++++++++++++++++++++++++++++---------- src/video_core/memory_manager.h | 11 ++- 2 files changed, 138 insertions(+), 50 deletions(-) diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 836ece136..e1a8b5391 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -15,8 +15,6 @@ #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" -#pragma optimize("", off) - namespace Tegra { std::atomic MemoryManager::unique_identifier_generator{}; @@ -42,7 +40,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_entries.resize(big_page_table_size / 32, 0); big_page_table_cpu.resize(big_page_table_size); - big_page_table_physical.resize(big_page_table_size); + big_page_continous.resize(big_page_table_size / continous_bits, 0); entries.resize(page_table_size / 32, 0); } @@ -80,6 +78,19 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) { } } +inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const { + const u64 entry_mask = big_page_continous[big_page_index / continous_bits]; + const size_t sub_index = big_page_index % continous_bits; + return ((entry_mask >> sub_index) & 0x1ULL) != 0; +} + +inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) { + const u64 continous_mask = big_page_continous[big_page_index / continous_bits]; + const size_t sub_index = big_page_index % continous_bits; + big_page_continous[big_page_index / continous_bits] = + (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0); +} + template GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size) { @@ -121,9 +132,19 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr const auto index = PageEntryIndex(current_gpu_addr); const u32 sub_value = static_cast(current_cpu_addr >> cpu_page_bits); big_page_table_cpu[index] = sub_value; - const PAddr phys_address = - device_memory.GetPhysicalAddr(memory.GetPointer(current_cpu_addr)); - big_page_table_physical[index] = static_cast(phys_address); + const bool is_continous = ([&] { + uintptr_t base_ptr{ + reinterpret_cast(memory.GetPointer(current_cpu_addr))}; + for (VAddr start_cpu = current_cpu_addr + page_size; + start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) { + base_ptr += page_size; + if (base_ptr != reinterpret_cast(memory.GetPointer(start_cpu))) { + return false; + } + } + return true; + })(); + SetBigPageContinous(index, is_continous); } remaining_size -= big_page_size; } @@ -248,12 +269,17 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { return memory.GetPointer(*address); } +#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining. #pragma inline_recursion(on) +#endif template inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const { + static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v; + static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v; + static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v; u64 used_page_size; u64 used_page_mask; u64 used_page_bits; @@ -276,11 +302,31 @@ inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t si std::min(static_cast(used_page_size) - page_offset, remaining_size)}; auto entry = GetEntry(current_address); if (entry == EntryType::Mapped) [[likely]] { - func_mapped(page_index, page_offset, copy_amount); + if constexpr (BOOL_BREAK_MAPPED) { + if (func_mapped(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_mapped(page_index, page_offset, copy_amount); + } + } else if (entry == EntryType::Reserved) { - func_reserved(page_index, page_offset, copy_amount); + if constexpr (BOOL_BREAK_RESERVED) { + if (func_reserved(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_reserved(page_index, page_offset, copy_amount); + } + } else [[unlikely]] { - func_unmapped(page_index, page_offset, copy_amount); + if constexpr (BOOL_BREAK_UNMAPPED) { + if (func_unmapped(page_index, page_offset, copy_amount)) { + return; + } + } else { + func_unmapped(page_index, page_offset, copy_amount); + } } page_index++; page_offset = 0; @@ -303,7 +349,8 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, if constexpr (is_safe) { rasterizer->FlushRegion(cpu_addr_base, copy_amount); } - memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(dest_buffer, physical, copy_amount); dest_buffer = static_cast(dest_buffer) + copy_amount; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { @@ -312,9 +359,12 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, if constexpr (is_safe) { rasterizer->FlushRegion(cpu_addr_base, copy_amount); } - memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); - // u8* physical = device_memory.GetPointer(big_page_table_physical[page_index] + offset); - // std::memcpy(dest_buffer, physical, copy_amount); + if (!IsBigPageContinous(page_index)) { + memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + } else { + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(dest_buffer, physical, copy_amount); + } dest_buffer = static_cast(dest_buffer) + copy_amount; }; auto read_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -347,7 +397,8 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe if constexpr (is_safe) { rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); } - memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(physical, src_buffer, copy_amount); src_buffer = static_cast(src_buffer) + copy_amount; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { @@ -356,10 +407,12 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe if constexpr (is_safe) { rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); } - memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); - /*u8* physical = - device_memory.GetPointer(big_page_table_physical[page_index] << cpu_page_bits) + offset; - std::memcpy(physical, src_buffer, copy_amount);*/ + if (!IsBigPageContinous(page_index)) { + memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + } else { + u8* physical = memory.GetPointer(cpu_addr_base); + std::memcpy(physical, src_buffer, copy_amount); + } src_buffer = static_cast(src_buffer) + copy_amount; }; auto write_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -413,48 +466,80 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std } bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { - const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; - if (!cpu_addr) { + if (GetEntry(gpu_addr) == EntryType::Mapped) [[likely]] { + size_t page_index = gpu_addr >> big_page_bits; + if (IsBigPageContinous(page_index)) [[likely]] { + const std::size_t page{(page_index & big_page_mask) + size}; + return page <= big_page_size; + } + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + return page <= Core::Memory::YUZU_PAGESIZE; + } + if (GetEntry(gpu_addr) != EntryType::Mapped) { return false; } - const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; return page <= Core::Memory::YUZU_PAGESIZE; } bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> big_page_bits}; - const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; std::optional old_page_addr{}; - while (page_index != page_last) { - const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (!page_addr || *page_addr == 0) { - return false; + bool result{true}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + std::size_t copy_amount) { + result = false; + return true; + }; + auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = false; + return true; } - if (old_page_addr) { - if (*old_page_addr + page_size != *page_addr) { - return false; - } + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { + const VAddr cpu_addr_base = + (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != cpu_addr_base) { + result = false; + return true; } - old_page_addr = page_addr; - ++page_index; - } - return true; + old_page_addr = {cpu_addr_base + copy_amount}; + return false; + }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, short_check, fail, fail); + return !result; + }; + MemoryOperation(gpu_addr, size, big_check, fail, check_short_pages); + return result; } bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { - size_t page_index{gpu_addr >> page_bits}; - const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; - while (page_index < page_last) { - if (GetEntry(page_index << page_bits) == EntryType::Free) { - return false; - } - ++page_index; - } - return true; + std::optional old_page_addr{}; + bool result{true}; + auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { + result = false; + return true; + }; + auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, + [[maybe_unused]] std::size_t copy_amount) { return false; }; + auto check_short_pages = [&](std::size_t page_index, std::size_t offset, + std::size_t copy_amount) { + GPUVAddr base = (page_index << big_page_bits) + offset; + MemoryOperation(base, copy_amount, pass, pass, fail); + return !result; + }; + MemoryOperation(gpu_addr, size, pass, fail, check_short_pages); + return result; } -#pragma inline_recursion(on) - std::vector> MemoryManager::GetSubmappedRange( GPUVAddr gpu_addr, std::size_t size) const { std::vector> result{}; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 9c388a06e..8f8877a92 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -105,9 +105,6 @@ public: void FlushRegion(GPUVAddr gpu_addr, size_t size) const; private: - [[nodiscard]] std::optional FindFreeRange(std::size_t size, std::size_t align, - bool start_32bit_address = false) const; - template inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const; @@ -127,6 +124,9 @@ private: } } + inline bool IsBigPageContinous(size_t big_page_index) const; + inline void SetBigPageContinous(size_t big_page_index, bool value); + Core::System& system; Core::Memory::Memory& memory; Core::DeviceMemory& device_memory; @@ -169,7 +169,10 @@ private: Common::MultiLevelPageTable page_table; Common::VirtualBuffer big_page_table_cpu; - Common::VirtualBuffer big_page_table_physical; + + std::vector big_page_continous; + + constexpr static size_t continous_bits = 64; const size_t unique_identifier; -- cgit v1.2.3