summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/vm_manager.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/vm_manager.cpp')
-rw-r--r--src/core/hle/kernel/vm_manager.cpp28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index d8b389b65..0f89dd9f3 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -175,9 +175,9 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
- VAddr target_end = target + size;
+ const VAddr target_end = target + size;
- VMAIter end = vma_map.end();
+ const VMAIter end = vma_map.end();
// The comparison against the end of the range must be done using addresses since VMAs can be
// merged during this process, causing invalidation of the iterators.
while (vma != end && vma->second.base < target_end) {
@@ -207,9 +207,9 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne
ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
- VAddr target_end = target + size;
+ const VAddr target_end = target + size;
- VMAIter end = vma_map.end();
+ const VMAIter end = vma_map.end();
// The comparison against the end of the range must be done using addresses since VMAs can be
// merged during this process, causing invalidation of the iterators.
while (vma != end && vma->second.base < target_end) {
@@ -258,14 +258,14 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) {
return ERR_INVALID_ADDRESS;
}
- VirtualMemoryArea& vma = vma_handle->second;
+ const VirtualMemoryArea& vma = vma_handle->second;
if (vma.type != VMAType::Free) {
// Region is already allocated
return ERR_INVALID_ADDRESS_STATE;
}
- VAddr start_in_vma = base - vma.base;
- VAddr end_in_vma = start_in_vma + size;
+ const VAddr start_in_vma = base - vma.base;
+ const VAddr end_in_vma = start_in_vma + size;
if (end_in_vma > vma.size) {
// Requested allocation doesn't fit inside VMA
@@ -288,13 +288,13 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target);
- VAddr target_end = target + size;
+ const VAddr target_end = target + size;
ASSERT(target_end >= target);
ASSERT(target_end <= MAX_ADDRESS);
ASSERT(size > 0);
VMAIter begin_vma = StripIterConstness(FindVMA(target));
- VMAIter i_end = vma_map.lower_bound(target_end);
+ const VMAIter i_end = vma_map.lower_bound(target_end);
for (auto i = begin_vma; i != i_end; ++i) {
if (i->second.type == VMAType::Free) {
return ERR_INVALID_ADDRESS_STATE;
@@ -346,7 +346,7 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
}
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
- VMAIter next_vma = std::next(iter);
+ const VMAIter next_vma = std::next(iter);
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
iter->second.size += next_vma->second.size;
vma_map.erase(next_vma);
@@ -382,22 +382,22 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
}
}
-u64 VMManager::GetTotalMemoryUsage() {
+u64 VMManager::GetTotalMemoryUsage() const {
LOG_WARNING(Kernel, "(STUBBED) called");
return 0xF8000000;
}
-u64 VMManager::GetTotalHeapUsage() {
+u64 VMManager::GetTotalHeapUsage() const {
LOG_WARNING(Kernel, "(STUBBED) called");
return 0x0;
}
-VAddr VMManager::GetAddressSpaceBaseAddr() {
+VAddr VMManager::GetAddressSpaceBaseAddr() const {
LOG_WARNING(Kernel, "(STUBBED) called");
return 0x8000000;
}
-u64 VMManager::GetAddressSpaceSize() {
+u64 VMManager::GetAddressSpaceSize() const {
LOG_WARNING(Kernel, "(STUBBED) called");
return MAX_ADDRESS;
}