summaryrefslogtreecommitdiffstats
path: root/src/common
diff options
context:
space:
mode:
authorNarr the Reg <juangerman-13@hotmail.com>2024-01-01 20:56:16 +0100
committerGitHub <noreply@github.com>2024-01-01 20:56:16 +0100
commitf0f92edbd0a78abda819251ddc325da4acc14216 (patch)
tree6a23c1be26148c4137a6f67ebdf926a3f82ce47f /src/common
parentMerge pull request #12501 from liamwhite/ips (diff)
parentheap_tracker: use linear-time mapping eviction (diff)
downloadyuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.gz
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.bz2
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.lz
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.xz
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.zst
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.zip
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/heap_tracker.cpp281
-rw-r--r--src/common/heap_tracker.h98
-rw-r--r--src/common/host_memory.cpp10
-rw-r--r--src/common/host_memory.h11
5 files changed, 395 insertions, 7 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b58a7073f..8c57d47c6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -64,6 +64,8 @@ add_library(common STATIC
fs/path_util.cpp
fs/path_util.h
hash.h
+ heap_tracker.cpp
+ heap_tracker.h
hex_util.cpp
hex_util.h
host_memory.cpp
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..683208795
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,281 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <fstream>
+#include <vector>
+
+#include "common/heap_tracker.h"
+#include "common/logging/log.h"
+
+namespace Common {
+
+namespace {
+
+s64 GetMaxPermissibleResidentMapCount() {
+ // Default value.
+ s64 value = 65530;
+
+ // Try to read how many mappings we can make.
+ std::ifstream s("/proc/sys/vm/max_map_count");
+ s >> value;
+
+ // Print, for debug.
+ LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
+
+ // Allow 20000 maps for other code and to account for split inaccuracy.
+ return std::max<s64>(value - 20000, 0);
+}
+
+} // namespace
+
+HeapTracker::HeapTracker(Common::HostMemory& buffer)
+ : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
+HeapTracker::~HeapTracker() = default;
+
+void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
+ MemoryPermission perm, bool is_separate_heap) {
+ // When mapping other memory, map pages immediately.
+ if (!is_separate_heap) {
+ m_buffer.Map(virtual_offset, host_offset, length, perm, false);
+ return;
+ }
+
+ {
+ // We are mapping part of a separate heap.
+ std::scoped_lock lk{m_lock};
+
+ auto* const map = new SeparateHeapMap{
+ .vaddr = virtual_offset,
+ .paddr = host_offset,
+ .size = length,
+ .tick = m_tick++,
+ .perm = perm,
+ .is_resident = false,
+ };
+
+ // Insert into mappings.
+ m_map_count++;
+ m_mappings.insert(*map);
+ }
+
+ // Finally, map.
+ this->DeferredMapSeparateHeap(virtual_offset);
+}
+
+void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
+ // If this is a separate heap...
+ if (is_separate_heap) {
+ std::scoped_lock lk{m_lock};
+
+ const SeparateHeapMap key{
+ .vaddr = virtual_offset,
+ };
+
+ // Split at the boundaries of the region we are removing.
+ this->SplitHeapMapLocked(virtual_offset);
+ this->SplitHeapMapLocked(virtual_offset + size);
+
+ // Erase all mappings in range.
+ auto it = m_mappings.find(key);
+ while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
+ // Get underlying item.
+ auto* const item = std::addressof(*it);
+
+ // If resident, erase from resident map.
+ if (item->is_resident) {
+ ASSERT(--m_resident_map_count >= 0);
+ m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
+ }
+
+ // Erase from map.
+ ASSERT(--m_map_count >= 0);
+ it = m_mappings.erase(it);
+
+ // Free the item.
+ delete item;
+ }
+ }
+
+ // Unmap pages.
+ m_buffer.Unmap(virtual_offset, size, false);
+}
+
+void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
+ // Ensure no rebuild occurs while reprotecting.
+ std::shared_lock lk{m_rebuild_lock};
+
+ // Split at the boundaries of the region we are reprotecting.
+ this->SplitHeapMap(virtual_offset, size);
+
+ // Declare tracking variables.
+ const VAddr end = virtual_offset + size;
+ VAddr cur = virtual_offset;
+
+ while (cur < end) {
+ VAddr next = cur;
+ bool should_protect = false;
+
+ {
+ std::scoped_lock lk2{m_lock};
+
+ const SeparateHeapMap key{
+ .vaddr = next,
+ };
+
+ // Try to get the next mapping corresponding to this address.
+ const auto it = m_mappings.nfind(key);
+
+ if (it == m_mappings.end()) {
+ // There are no separate heap mappings remaining.
+ next = end;
+ should_protect = true;
+ } else if (it->vaddr == cur) {
+ // We are in range.
+ // Update permission bits.
+ it->perm = perm;
+
+ // Determine next address and whether we should protect.
+ next = cur + it->size;
+ should_protect = it->is_resident;
+ } else /* if (it->vaddr > cur) */ {
+ // We weren't in range, but there is a block coming up that will be.
+ next = it->vaddr;
+ should_protect = true;
+ }
+ }
+
+ // Clamp to end.
+ next = std::min(next, end);
+
+ // Reprotect, if we need to.
+ if (should_protect) {
+ m_buffer.Protect(cur, next - cur, perm);
+ }
+
+ // Advance.
+ cur = next;
+ }
+}
+
+bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
+ if (m_buffer.IsInVirtualRange(fault_address)) {
+ return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
+ }
+
+ return false;
+}
+
+bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
+ bool rebuild_required = false;
+
+ {
+ std::scoped_lock lk{m_lock};
+
+ // Check to ensure this was a non-resident separate heap mapping.
+ const auto it = this->GetNearestHeapMapLocked(virtual_offset);
+ if (it == m_mappings.end() || it->is_resident) {
+ return false;
+ }
+
+ // Update tick before possible rebuild.
+ it->tick = m_tick++;
+
+ // Check if we need to rebuild.
+ if (m_resident_map_count > m_max_resident_map_count) {
+ rebuild_required = true;
+ }
+
+ // Map the area.
+ m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
+
+ // This map is now resident.
+ it->is_resident = true;
+ m_resident_map_count++;
+ m_resident_mappings.insert(*it);
+ }
+
+ if (rebuild_required) {
+ // A rebuild was required, so perform it now.
+ this->RebuildSeparateHeapAddressSpace();
+ }
+
+ return true;
+}
+
+void HeapTracker::RebuildSeparateHeapAddressSpace() {
+ std::scoped_lock lk{m_rebuild_lock, m_lock};
+
+ ASSERT(!m_resident_mappings.empty());
+
+ // Dump half of the mappings.
+ //
+ // Despite being worse in theory, this has proven to be better in practice than more
+ // regularly dumping a smaller amount, because it significantly reduces average case
+ // lock contention.
+ const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
+ const size_t evict_count = m_resident_map_count - desired_count;
+ auto it = m_resident_mappings.begin();
+
+ for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
+ // Unmark and unmap.
+ it->is_resident = false;
+ m_buffer.Unmap(it->vaddr, it->size, false);
+
+ // Advance.
+ ASSERT(--m_resident_map_count >= 0);
+ it = m_resident_mappings.erase(it);
+ }
+}
+
+void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
+ std::scoped_lock lk{m_lock};
+
+ this->SplitHeapMapLocked(offset);
+ this->SplitHeapMapLocked(offset + size);
+}
+
+void HeapTracker::SplitHeapMapLocked(VAddr offset) {
+ const auto it = this->GetNearestHeapMapLocked(offset);
+ if (it == m_mappings.end() || it->vaddr == offset) {
+ // Not contained or no split required.
+ return;
+ }
+
+ // Cache the original values.
+ auto* const left = std::addressof(*it);
+ const size_t orig_size = left->size;
+
+ // Adjust the left map.
+ const size_t left_size = offset - left->vaddr;
+ left->size = left_size;
+
+ // Create the new right map.
+ auto* const right = new SeparateHeapMap{
+ .vaddr = left->vaddr + left_size,
+ .paddr = left->paddr + left_size,
+ .size = orig_size - left_size,
+ .tick = left->tick,
+ .perm = left->perm,
+ .is_resident = left->is_resident,
+ };
+
+ // Insert the new right map.
+ m_map_count++;
+ m_mappings.insert(*right);
+
+ // If resident, also insert into resident map.
+ if (right->is_resident) {
+ m_resident_map_count++;
+ m_resident_mappings.insert(*right);
+ }
+}
+
+HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
+ const SeparateHeapMap key{
+ .vaddr = offset,
+ };
+
+ return m_mappings.find(key);
+}
+
+} // namespace Common
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h
new file mode 100644
index 000000000..ee5b0bf43
--- /dev/null
+++ b/src/common/heap_tracker.h
@@ -0,0 +1,98 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <atomic>
+#include <mutex>
+#include <set>
+#include <shared_mutex>
+
+#include "common/host_memory.h"
+#include "common/intrusive_red_black_tree.h"
+
+namespace Common {
+
+struct SeparateHeapMap {
+ Common::IntrusiveRedBlackTreeNode addr_node{};
+ Common::IntrusiveRedBlackTreeNode tick_node{};
+ VAddr vaddr{};
+ PAddr paddr{};
+ size_t size{};
+ size_t tick{};
+ MemoryPermission perm{};
+ bool is_resident{};
+};
+
+struct SeparateHeapMapAddrComparator {
+ static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
+ if (lhs.vaddr < rhs.vaddr) {
+ return -1;
+ } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+};
+
+struct SeparateHeapMapTickComparator {
+ static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
+ if (lhs.tick < rhs.tick) {
+ return -1;
+ } else if (lhs.tick > rhs.tick) {
+ return 1;
+ } else {
+ return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
+ }
+ }
+};
+
+class HeapTracker {
+public:
+ explicit HeapTracker(Common::HostMemory& buffer);
+ ~HeapTracker();
+
+ void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
+ bool is_separate_heap);
+ void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
+ void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
+ u8* VirtualBasePointer() {
+ return m_buffer.VirtualBasePointer();
+ }
+
+ bool DeferredMapSeparateHeap(u8* fault_address);
+ bool DeferredMapSeparateHeap(size_t virtual_offset);
+
+private:
+ using AddrTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
+ using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
+
+ using TickTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
+ using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
+
+ AddrTree m_mappings{};
+ TickTree m_resident_mappings{};
+
+private:
+ void SplitHeapMap(VAddr offset, size_t size);
+ void SplitHeapMapLocked(VAddr offset);
+
+ AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
+
+ void RebuildSeparateHeapAddressSpace();
+
+private:
+ Common::HostMemory& m_buffer;
+ const s64 m_max_resident_map_count;
+
+ std::shared_mutex m_rebuild_lock{};
+ std::mutex m_lock{};
+ s64 m_map_count{};
+ s64 m_resident_map_count{};
+ size_t m_tick{};
+};
+
+} // namespace Common
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index e540375b8..860c39e6a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
- MemoryPermission perms) {
+ MemoryPermission perms, bool separate_heap) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(host_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
}
-void HostMemory::Unmap(size_t virtual_offset, size_t length) {
+void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
impl->Unmap(virtual_offset + virtual_base_offset, length);
}
-void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write,
- bool execute) {
+void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
ASSERT(virtual_offset + length <= virtual_size);
if (length == 0 || !virtual_base || !impl) {
return;
}
+ const bool read = True(perm & MemoryPermission::Read);
+ const bool write = True(perm & MemoryPermission::Write);
+ const bool execute = True(perm & MemoryPermission::Execute);
impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
}
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 747c5850c..72fbb05af 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -40,11 +40,12 @@ public:
HostMemory(HostMemory&& other) noexcept;
HostMemory& operator=(HostMemory&& other) noexcept;
- void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms);
+ void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
+ bool separate_heap);
- void Unmap(size_t virtual_offset, size_t length);
+ void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
- void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false);
+ void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
void EnableDirectMappedAddress();
@@ -64,6 +65,10 @@ public:
return virtual_base;
}
+ bool IsInVirtualRange(void* address) const noexcept {
+ return address >= virtual_base && address < virtual_base + virtual_size;
+ }
+
private:
size_t backing_size{};
size_t virtual_size{};