summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2018-11-14 03:52:18 +0100
committerGitHub <noreply@github.com>2018-11-14 03:52:18 +0100
commit70f189d7af3eea0123963f781e6a579c8f4725bd (patch)
tree82e03c9e4df7286c1944f40d4abe2f3a0d876426
parentMerge pull request #1682 from lioncash/audio (diff)
parentvm_manager: Unstub GetTotalHeapUsage() (diff)
downloadyuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar
yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.gz
yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.bz2
yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.lz
yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.xz
yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.tar.zst
yuzu-70f189d7af3eea0123963f781e6a579c8f4725bd.zip
-rw-r--r--src/core/hle/kernel/process.cpp76
-rw-r--r--src/core/hle/kernel/process.h11
-rw-r--r--src/core/hle/kernel/vm_manager.cpp82
-rw-r--r--src/core/hle/kernel/vm_manager.h15
4 files changed, 98 insertions, 86 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 420218d59..e78e3a950 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,11 +5,9 @@
#include <algorithm>
#include <memory>
#include "common/assert.h"
-#include "common/common_funcs.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
@@ -241,83 +239,15 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
}
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
- if (target < vm_manager.GetHeapRegionBaseAddress() ||
- target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) {
- return ERR_INVALID_ADDRESS;
- }
-
- if (heap_memory == nullptr) {
- // Initialize heap
- heap_memory = std::make_shared<std::vector<u8>>();
- heap_start = heap_end = target;
- } else {
- vm_manager.UnmapRange(heap_start, heap_end - heap_start);
- }
-
- // If necessary, expand backing vector to cover new heap extents.
- if (target < heap_start) {
- heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
- heap_start = target;
- vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
- }
- if (target + size > heap_end) {
- heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
- heap_end = target + size;
- vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
- }
- ASSERT(heap_end - heap_start == heap_memory->size());
-
- CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
- size, MemoryState::Heap));
- vm_manager.Reprotect(vma, perms);
-
- heap_used = size;
-
- return MakeResult<VAddr>(heap_end - size);
+ return vm_manager.HeapAllocate(target, size, perms);
}
ResultCode Process::HeapFree(VAddr target, u32 size) {
- if (target < vm_manager.GetHeapRegionBaseAddress() ||
- target + size > vm_manager.GetHeapRegionEndAddress() || target + size < target) {
- return ERR_INVALID_ADDRESS;
- }
-
- if (size == 0) {
- return RESULT_SUCCESS;
- }
-
- ResultCode result = vm_manager.UnmapRange(target, size);
- if (result.IsError())
- return result;
-
- heap_used -= size;
-
- return RESULT_SUCCESS;
+ return vm_manager.HeapFree(target, size);
}
ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
- auto vma = vm_manager.FindVMA(src_addr);
-
- ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address");
- ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
-
- // The returned VMA might be a bigger one encompassing the desired address.
- auto vma_offset = src_addr - vma->first;
- ASSERT_MSG(vma_offset + size <= vma->second.size,
- "Shared memory exceeds bounds of mapped block");
-
- const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
- std::size_t backing_block_offset = vma->second.offset + vma_offset;
-
- CASCADE_RESULT(auto new_vma,
- vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
- MemoryState::Mapped));
- // Protect mirror with permissions from old region
- vm_manager.Reprotect(new_vma, vma->second.permissions);
- // Remove permissions from old region
- vm_manager.Reprotect(vma, VMAPermission::None);
-
- return RESULT_SUCCESS;
+ return vm_manager.MirrorMemory(dst_addr, src_addr, size);
}
ResultCode Process::UnmapMemory(VAddr dst_addr, VAddr /*src_addr*/, u64 size) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 8d2616c79..f79f6d7a5 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -292,17 +292,6 @@ private:
u32 allowed_thread_priority_mask = 0xFFFFFFFF;
u32 is_virtual_address_memory_enabled = 0;
- // Memory used to back the allocations in the regular heap. A single vector is used to cover
- // the entire virtual address space extents that bound the allocations, including any holes.
- // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
- // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
- std::shared_ptr<std::vector<u8>> heap_memory;
-
- // The left/right bounds of the address space covered by heap_memory.
- VAddr heap_start = 0;
- VAddr heap_end = 0;
- u64 heap_used = 0;
-
/// The Thread Local Storage area is allocated as processes create threads,
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 1a92c8f70..ec7fd6150 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -243,6 +243,85 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
return RESULT_SUCCESS;
}
+ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) {
+ if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() ||
+ target + size < target) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (heap_memory == nullptr) {
+ // Initialize heap
+ heap_memory = std::make_shared<std::vector<u8>>();
+ heap_start = heap_end = target;
+ } else {
+ UnmapRange(heap_start, heap_end - heap_start);
+ }
+
+ // If necessary, expand backing vector to cover new heap extents.
+ if (target < heap_start) {
+ heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
+ heap_start = target;
+ RefreshMemoryBlockMappings(heap_memory.get());
+ }
+ if (target + size > heap_end) {
+ heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
+ heap_end = target + size;
+ RefreshMemoryBlockMappings(heap_memory.get());
+ }
+ ASSERT(heap_end - heap_start == heap_memory->size());
+
+ CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size,
+ MemoryState::Heap));
+ Reprotect(vma, perms);
+
+ heap_used = size;
+
+ return MakeResult<VAddr>(heap_end - size);
+}
+
+ResultCode VMManager::HeapFree(VAddr target, u64 size) {
+ if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() ||
+ target + size < target) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0) {
+ return RESULT_SUCCESS;
+ }
+
+ const ResultCode result = UnmapRange(target, size);
+ if (result.IsError()) {
+ return result;
+ }
+
+ heap_used -= size;
+ return RESULT_SUCCESS;
+}
+
+ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
+ const auto vma = FindVMA(src_addr);
+
+ ASSERT_MSG(vma != vma_map.end(), "Invalid memory address");
+ ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
+
+ // The returned VMA might be a bigger one encompassing the desired address.
+ const auto vma_offset = src_addr - vma->first;
+ ASSERT_MSG(vma_offset + size <= vma->second.size,
+ "Shared memory exceeds bounds of mapped block");
+
+ const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
+ const std::size_t backing_block_offset = vma->second.offset + vma_offset;
+
+ CASCADE_RESULT(auto new_vma, MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
+ MemoryState::Mapped));
+ // Protect mirror with permissions from old region
+ Reprotect(new_vma, vma->second.permissions);
+ // Remove permissions from old region
+ Reprotect(vma, VMAPermission::None);
+
+ return RESULT_SUCCESS;
+}
+
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
// If this ever proves to have a noticeable performance impact, allow users of the function to
// specify a specific range of addresses to limit the scan to.
@@ -495,8 +574,7 @@ u64 VMManager::GetTotalMemoryUsage() const {
}
u64 VMManager::GetTotalHeapUsage() const {
- LOG_WARNING(Kernel, "(STUBBED) called");
- return 0x0;
+ return heap_used;
}
VAddr VMManager::GetAddressSpaceBaseAddress() const {
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 2447cbb8f..248cc46dc 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -186,6 +186,11 @@ public:
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
+ ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
+ ResultCode HeapFree(VAddr target, u64 size);
+
+ ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size);
+
/**
* Scans all VMAs and updates the page table range of any that use the given vector as backing
* memory. This should be called after any operation that causes reallocation of the vector.
@@ -343,5 +348,15 @@ private:
VAddr tls_io_region_base = 0;
VAddr tls_io_region_end = 0;
+
+ // Memory used to back the allocations in the regular heap. A single vector is used to cover
+ // the entire virtual address space extents that bound the allocations, including any holes.
+ // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
+ // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
+ std::shared_ptr<std::vector<u8>> heap_memory;
+ // The left/right bounds of the address space covered by heap_memory.
+ VAddr heap_start = 0;
+ VAddr heap_end = 0;
+ u64 heap_used = 0;
};
} // namespace Kernel