diff options
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r-- | src/core/hle/kernel/init/init_slab_setup.cpp | 9 | ||||
-rw-r--r-- | src/core/hle/kernel/initial_process.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_block.h | 52 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_block_manager.cpp | 70 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_block_manager.h | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_layout.h | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_manager.cpp | 9 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_region_type.h | 105 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_group.h | 11 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 314 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.h | 33 | ||||
-rw-r--r-- | src/core/hle/kernel/k_process.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 23 | ||||
-rw-r--r-- | src/core/hle/kernel/svc/svc_memory.cpp | 10 | ||||
-rw-r--r-- | src/core/hle/kernel/svc_types.h | 1 |
15 files changed, 432 insertions, 223 deletions
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 1f2db673c..a0e20bbbb 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize == /// memory. static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, KVirtualAddress slab_addr) { - slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress()); + slab_addr -= memory_layout.GetSlabRegion().GetAddress(); return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase; } @@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { auto& kernel = system.Kernel(); // Get the start of the slab region, since that's where we'll be working. - KVirtualAddress address = memory_layout.GetSlabRegionAddress(); + const KMemoryRegion& slab_region = memory_layout.GetSlabRegion(); + KVirtualAddress address = slab_region.GetAddress(); + + // Clear the slab region. + // TODO: implement access to kernel VAs. + // std::memset(device_ptr, 0, slab_region.GetSize()); // Initialize slab type array to be in sorted order. std::array<KSlabType, KSlabType_Count> slab_types; diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h index 82195f4f7..2c95269fc 100644 --- a/src/core/hle/kernel/initial_process.h +++ b/src/core/hle/kernel/initial_process.h @@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() { MainMemoryAddress); } +static inline size_t GetInitialProcessBinarySize() { + return InitialProcessBinarySizeMax; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 41a29da24..ef3f61321 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -36,6 +36,7 @@ enum class KMemoryState : u32 { FlagCanChangeAttribute = (1 << 24), FlagCanCodeMemory = (1 << 25), FlagLinearMapped = (1 << 26), + FlagCanPermissionLock = (1 << 27), FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | @@ -50,12 +51,16 @@ enum class KMemoryState : u32 { FlagLinearMapped, Free = static_cast<u32>(Svc::MemoryState::Free), - Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | - FlagCanAlignedDeviceMap, + + IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | + FlagCanAlignedDeviceMap, + IoRegister = + static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap, + Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | - FlagCanCodeMemory, + FlagCanCodeMemory | FlagCanPermissionLock, Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | FlagLinearMapped, @@ -65,7 +70,8 @@ enum class KMemoryState : u32 { AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | FlagCanCodeAlias, AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | - FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory, + FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory | + FlagCanPermissionLock, Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, @@ -73,7 +79,7 @@ enum class KMemoryState : u32 { Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, - ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, + ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped, Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | @@ -94,7 +100,7 @@ enum class KMemoryState : u32 { NonDeviceIpc = static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, - Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, + Kernel = static_cast<u32>(Svc::MemoryState::Kernel), GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, @@ -105,34 +111,36 @@ enum class KMemoryState : u32 { Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | - FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc | + FlagCanUseNonDeviceIpc, }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); -static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); +static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001); +static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001); static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); -static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); +static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04); static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); -static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); +static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09); static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); -static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); +static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C); static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); -static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); +static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013); static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); -static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); +static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817); enum class KMemoryPermission : u8 { None = 0, @@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 { IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), + PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked), - SetMask = Uncached, + SetMask = Uncached | PermissionLocked, }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); @@ -261,6 +270,10 @@ struct KMemoryInfo { return m_state; } + constexpr Svc::MemoryState GetSvcState() const { + return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask); + } + constexpr KMemoryPermission GetPermission() const { return m_permission; } @@ -326,6 +339,10 @@ public: return this->GetEndAddress() - 1; } + constexpr KMemoryState GetState() const { + return m_memory_state; + } + constexpr u16 GetIpcLockCount() const { return m_ipc_lock_count; } @@ -443,6 +460,13 @@ public: } } + constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) { + ASSERT(False(mask & KMemoryAttribute::IpcLocked)); + ASSERT(False(mask & KMemoryAttribute::DeviceShared)); + + m_attribute = (m_attribute & ~mask) | attr; + } + constexpr void Split(KMemoryBlock* block, KProcessAddress addr) { ASSERT(this->GetAddress() < addr); ASSERT(this->Contains(addr)); diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index ab75f550e..58a1e7216 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, } // Update block state. - it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), - static_cast<u8>(clear_disable_attr)); + it->Update(state, perm, attr, it->GetAddress() == address, + static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr)); cur_address += cur_info.GetSize(); remaining_pages -= cur_info.GetNumPages(); } @@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attr) { + KMemoryPermission perm, KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr) { // Ensure for auditing that we never end up with an invalid tree. KScopedMemoryBlockManagerAuditor auditor(this); ASSERT(Common::IsAligned(GetInteger(address), PageSize)); @@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo } // Update block state. - it->Update(state, perm, attr, false, 0, 0); + it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr), + static_cast<u8>(clear_disable_attr)); cur_address += cur_info.GetSize(); remaining_pages -= cur_info.GetNumPages(); } else { @@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat this->CoalesceForUpdate(allocator, address, num_pages); } +void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, + KProcessAddress address, size_t num_pages, + KMemoryAttribute mask, KMemoryAttribute attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(GetInteger(address), PageSize)); + + KProcessAddress cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + + if ((it->GetAttribute() & mask) != attr) { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != GetInteger(cur_address)) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } + + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); + } + + // Update block state. + it->UpdateAttribute(mask, attr); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } else { + // If we already have the right attributes, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); + } + } + it++; + } + + this->CoalesceForUpdate(allocator, address, num_pages); +} + // Debug. bool KMemoryBlockManager::CheckState() const { // Loop over every block, ensuring that we are sorted and coalesced. diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index 96496e990..cb7b6f430 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -115,7 +115,11 @@ public: void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attr); + KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr); + + void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, + size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr); iterator FindIterator(KProcessAddress address) const { return m_memory_block_tree.find(KMemoryBlock( diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 54a71df56..c8122644f 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -137,11 +137,9 @@ public: return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); } - KVirtualAddress GetSlabRegionAddress() const { - return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) - .GetAddress(); + const KMemoryRegion& GetSlabRegion() const { + return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)); } - const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); } diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 74d8169e0..637558e10 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -119,7 +119,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage // Free each region to its corresponding heap. size_t reserved_sizes[MaxManagerCount] = {}; const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress(); - const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax; + const size_t ini_size = GetInitialProcessBinarySize(); + const KPhysicalAddress ini_end = ini_start + ini_size; const KPhysicalAddress ini_last = ini_end - 1; for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { @@ -137,13 +138,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage } // Open/reserve the ini memory. - manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); - reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; + manager.OpenFirst(ini_start, ini_size / PageSize); + reserved_sizes[it.GetAttributes()] += ini_size; // Free memory after the ini to the heap. if (ini_last != cur_last) { ASSERT(cur_end != 0); - manager.Free(ini_end, cur_end - ini_end); + manager.Free(ini_end, (cur_end - ini_end) / PageSize); } } else { // Ensure there's no partial overlap with the ini image. diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index e5630c1ac..bcbf450f0 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( KMemoryRegionAttr_LinearMapped); +constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown = + KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute( + KMemoryRegionAttr_LinearMapped); static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped)); +static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() == + (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | + KMemoryRegionAttr_LinearMapped)); constexpr inline auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); @@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition = static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); -constexpr inline auto KMemoryRegionType_DramPoolManagement = - KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( +// UNUSED: .Derive(4, 1); +// UNUSED: .Derive(4, 2); +constexpr inline const auto KMemoryRegionType_DramPoolManagement = + KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute( KMemoryRegionAttr_CarveoutProtected); -constexpr inline auto KMemoryRegionType_DramUserPool = - KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); +constexpr inline const auto KMemoryRegionType_DramUserPool = + KMemoryRegionType_DramPoolPartition.Derive(4, 3); static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == - (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | + (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected)); static_assert(KMemoryRegionType_DramUserPool.GetValue() == - (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); constexpr inline auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); @@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = constexpr inline auto KMemoryRegionType_DramSystemPool = KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == - (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramAppletPool.GetValue() == - (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == - (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramSystemPool.GetValue() == - (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | + (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected)); constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = - KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); + KMemoryRegionType_Dram.DeriveSparse(1, 4, 0); constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = - KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); + KMemoryRegionType_Dram.DeriveSparse(1, 4, 1); constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = - KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); + KMemoryRegionType_Dram.DeriveSparse(1, 4, 2); static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); -// UNUSED: .DeriveSparse(2, 2, 0); -constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = - KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); -static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); - -constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = - KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); -static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); - -constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = - KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); -constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = - KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); -constexpr inline auto KMemoryRegionType_VirtualDramUserPool = - KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); -static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); -static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); -static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); +// UNUSED: .Derive(4, 2); +constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = + KMemoryRegionType_Dram.Advance(2).Derive(4, 0); +constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = + KMemoryRegionType_Dram.Advance(2).Derive(4, 1); +constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown = + KMemoryRegionType_Dram.Advance(2).Derive(4, 3); +static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32)); +static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52)); +static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92)); + +// UNUSED: .Derive(4, 3); +constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = + KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0); +constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = + KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1); +constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = + KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2); +static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A); +static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A); +static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A); // NOTE: For unknown reason, the pools are derived out-of-order here. // It's worth eventually trying to understand why Nintendo made this choice. // UNUSED: .Derive(6, 0); // UNUSED: .Derive(6, 1); -constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); -constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); -constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); -constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); -static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); -static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); -static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); -static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); +constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 0); +constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 1); +constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 2); +constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 3); +static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A); +static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A); +static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A); +static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A); constexpr inline auto KMemoryRegionType_ArchDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); @@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp = static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { - if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { - return KMemoryRegionType_VirtualDramKernelTraceBuffer; - } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { + if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { return KMemoryRegionType_VirtualDramKernelPtHeap; } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; + } else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) { + return KMemoryRegionType_VirtualDramKernelSecureUnknown; + } else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { + return KMemoryRegionType_VirtualDramKernelTraceBuffer; } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { return KMemoryRegionType_VirtualDramUnknownDebug; } else { diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index b32909f05..de9d63a8d 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h @@ -183,12 +183,17 @@ private: class KScopedPageGroup { public: - explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { + explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) { if (m_pg) { - m_pg->Open(); + if (not_first) { + m_pg->Open(); + } else { + m_pg->OpenFirst(); + } } } - explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} + explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true) + : KScopedPageGroup(std::addressof(gp), not_first) {} ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 0b0cef984..217ccbae3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::All, KMemoryAttribute::None)); + KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); // Determine whether any pages being unmapped are code. bool any_code_pages = false; @@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { PageSize; // While we have pages to map, map them. - while (map_pages > 0) { - // Check if we're at the end of the physical block. - if (pg_pages == 0) { - // Ensure there are more pages to map. - ASSERT(pg_it != pg.end()); - - // Advance our physical block. - ++pg_it; - pg_phys_addr = pg_it->GetAddress(); - pg_pages = pg_it->GetNumPages(); + { + // Create a page group for the current mapping range. + KPageGroup cur_pg(m_kernel, m_block_info_manager); + { + ON_RESULT_FAILURE_2 { + cur_pg.OpenFirst(); + cur_pg.Close(); + }; + + size_t remain_pages = map_pages; + while (remain_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Add whatever we can to the current block. + const size_t cur_pages = std::min(pg_pages, remain_pages); + R_TRY(cur_pg.AddBlock(pg_phys_addr + + ((pg_pages - cur_pages) * PageSize), + cur_pages)); + + // Advance. + remain_pages -= cur_pages; + pg_pages -= cur_pages; + } } - // Map whatever we can. - const size_t cur_pages = std::min(pg_pages, map_pages); - R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, - OperationType::MapFirst, pg_phys_addr)); - - // Advance. - cur_address += cur_pages * PageSize; - map_pages -= cur_pages; - - pg_phys_addr += cur_pages * PageSize; - pg_pages -= cur_pages; + // Map the pages. + R_TRY(this->Operate(cur_address, map_pages, cur_pg, + OperationType::MapFirstGroup)); } } @@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { m_memory_block_manager.UpdateIfMatch( std::addressof(allocator), address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + address == this->GetAliasRegionStart() + ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); R_SUCCEED(); } @@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { // Iterate over the memory, unmapping as we go. auto it = m_memory_block_manager.FindIterator(cur_address); + + const auto clear_merge_attr = + (it->GetState() == KMemoryState::Normal && + it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) + ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None; + while (true) { // Check that the iterator is valid. ASSERT(it != m_memory_block_manager.end()); @@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + clear_merge_attr); // We succeeded. R_SUCCEED(); @@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, KScopedPageTableUpdater updater(this); // Perform mapping operation. - const KPageProperties properties = {perm, state == KMemoryState::Io, false, - DisableMergeAttribute::DisableHead}; + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); // Update the blocks. @@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem KScopedPageTableUpdater updater(this); // Perform mapping operation. - const KPageProperties properties = {perm, state == KMemoryState::Io, false, - DisableMergeAttribute::DisableHead}; + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); // Update the blocks. @@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas size_t num_allocator_blocks; constexpr auto AttributeTestMask = ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); - R_TRY(this->CheckMemoryState( - std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), - std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); + const KMemoryState state_test_mask = + static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached)) + ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute) + : 0) | + ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked)) + ? static_cast<u32>(KMemoryState::FlagCanPermissionLock) + : 0)); + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), + std::addressof(old_attr), std::addressof(num_allocator_blocks), + addr, size, state_test_mask, state_test_mask, + KMemoryPermission::None, KMemoryPermission::None, + AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); // Create an update allocator. Result allocator_result{ResultSuccess}; @@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); - // Determine the new attribute. - const KMemoryAttribute new_attr = - static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | - static_cast<KMemoryAttribute>(attr & mask))); - - // Perform operation. - this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); + // If we need to, perform a change attribute operation. + if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) { + // Perform operation. + R_TRY(this->Operate(addr, num_pages, old_perm, + OperationType::ChangePermissionsAndRefreshAndFlush, 0)); + } // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, + static_cast<KMemoryAttribute>(mask), + static_cast<KMemoryAttribute>(attr)); R_SUCCEED(); } @@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress &KMemoryBlock::ShareToDevice, KMemoryPermission::None); // Set whether the locked memory was io. - *out_is_io = old_state == KMemoryState::Io; + *out_is_io = + static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; R_SUCCEED(); } @@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr ASSERT(num_pages == page_group.GetNumPages()); switch (operation) { - case OperationType::MapGroup: { + case OperationType::MapGroup: + case OperationType::MapFirstGroup: { // We want to maintain a new reference to every page in the group. - KScopedPageGroup spg(page_group); + KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); for (const auto& node : page_group) { const size_t size{node.GetNumPages() * PageSize}; @@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); break; } - case OperationType::MapFirst: case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); @@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis // Open references to pages, if we should. if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { - if (operation == OperationType::MapFirst) { - m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); - } else { - m_kernel.MemoryManager().Open(map_addr, num_pages); - } + m_kernel.MemoryManager().Open(map_addr, num_pages); } break; } @@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis } case OperationType::ChangePermissions: case OperationType::ChangePermissionsAndRefresh: + case OperationType::ChangePermissionsAndRefreshAndFlush: break; default: ASSERT(false); @@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { } } -KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const { +KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const { switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: return m_address_space_start; - case KMemoryState::Normal: + case Svc::MemoryState::Normal: return m_heap_region_start; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: return m_alias_region_start; - case KMemoryState::Stack: + case Svc::MemoryState::Stack: return m_stack_region_start; - case KMemoryState::Static: - case KMemoryState::ThreadLocal: + case Svc::MemoryState::Static: + case Svc::MemoryState::ThreadLocal: return m_kernel_map_region_start; - case KMemoryState::Io: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Transfered: - case KMemoryState::SharedTransfered: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - case KMemoryState::Coverage: - case KMemoryState::Insecure: + case Svc::MemoryState::Io: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: return m_alias_code_region_start; - case KMemoryState::Code: - case KMemoryState::CodeData: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: return m_code_region_start; default: UNREACHABLE(); } } -size_t KPageTable::GetRegionSize(KMemoryState state) const { +size_t KPageTable::GetRegionSize(Svc::MemoryState state) const { switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: return m_address_space_end - m_address_space_start; - case KMemoryState::Normal: + case Svc::MemoryState::Normal: return m_heap_region_end - m_heap_region_start; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: return m_alias_region_end - m_alias_region_start; - case KMemoryState::Stack: + case Svc::MemoryState::Stack: return m_stack_region_end - m_stack_region_start; - case KMemoryState::Static: - case KMemoryState::ThreadLocal: + case Svc::MemoryState::Static: + case Svc::MemoryState::ThreadLocal: return m_kernel_map_region_end - m_kernel_map_region_start; - case KMemoryState::Io: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Transfered: - case KMemoryState::SharedTransfered: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - case KMemoryState::Coverage: - case KMemoryState::Insecure: + case Svc::MemoryState::Io: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: return m_alias_code_region_end - m_alias_code_region_start; - case KMemoryState::Code: - case KMemoryState::CodeData: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: return m_code_region_end - m_code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { +bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { const KProcessAddress end = addr + size; const KProcessAddress last = end - 1; @@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || m_alias_region_start == m_alias_region_end); switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: return is_in_region; - case KMemoryState::Io: - case KMemoryState::Static: - case KMemoryState::Code: - case KMemoryState::CodeData: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Stack: - case KMemoryState::ThreadLocal: - case KMemoryState::Transfered: - case KMemoryState::SharedTransfered: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - case KMemoryState::Coverage: - case KMemoryState::Insecure: + case Svc::MemoryState::Io: + case Svc::MemoryState::Static: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Stack: + case Svc::MemoryState::ThreadLocal: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: return is_in_region && !is_in_heap && !is_in_alias; - case KMemoryState::Normal: + case Svc::MemoryState::Normal: ASSERT(is_in_heap); return is_in_region && !is_in_alias; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: ASSERT(is_in_alias); return is_in_region && !is_in_heap; default: @@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, KMemoryAttribute* out_attr, size_t* out_blocks_needed, - KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryBlockManager::const_iterator it, + KProcessAddress last_addr, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. - const KProcessAddress last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); - // If the start address isn't aligned, we need a block. - const size_t blocks_for_start_align = - (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; - // Validate all blocks in the range have correct state. const KMemoryState first_state = info.m_state; const KMemoryPermission first_perm = info.m_permission; @@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* info = it->GetMemoryInfo(); } - // If the end address isn't aligned, we need a block. - const size_t blocks_for_end_align = - (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; - // Write output state. if (out_state != nullptr) { *out_state = first_state; @@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* if (out_attr != nullptr) { *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); } + + // If the end address isn't aligned, we need a block. if (out_blocks_needed != nullptr) { - *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; + const size_t blocks_for_end_align = + (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) + ? 1 + : 0; + *out_blocks_needed = blocks_for_end_align; + } + + R_SUCCEED(); +} + +Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { + ASSERT(this->IsLockedByCurrentThread()); + + // Check memory state. + const KProcessAddress last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); + R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, + state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); + + // If the start address isn't aligned, we need a block. + if (out_blocks_needed != nullptr && + Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { + ++(*out_blocks_needed); } + R_SUCCEED(); } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 7da675f27..3d64b6fb0 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -126,8 +126,6 @@ public: return m_block_info_manager; } - bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const; - Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { @@ -162,6 +160,21 @@ public: void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, const KPageGroup& pg); + KProcessAddress GetRegionAddress(Svc::MemoryState state) const; + size_t GetRegionSize(Svc::MemoryState state) const; + bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; + + KProcessAddress GetRegionAddress(KMemoryState state) const { + return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); + } + size_t GetRegionSize(KMemoryState state) const { + return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); + } + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { + return this->CanContain(addr, size, + static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); + } + protected: struct PageLinkedList { private: @@ -204,12 +217,13 @@ protected: private: enum class OperationType : u32 { Map = 0, - MapFirst = 1, - MapGroup = 2, + MapGroup = 1, + MapFirstGroup = 2, Unmap = 3, ChangePermissions = 4, ChangePermissionsAndRefresh = 5, - Separate = 6, + ChangePermissionsAndRefreshAndFlush = 6, + Separate = 7, }; static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = @@ -228,8 +242,6 @@ private: Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, OperationType operation, KPhysicalAddress map_addr = 0); void FinalizeUpdate(PageLinkedList* page_list); - KProcessAddress GetRegionAddress(KMemoryState state) const; - size_t GetRegionSize(KMemoryState state) const; KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, @@ -252,6 +264,13 @@ private: KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; + Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, KProcessAddress addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 4a099286b..7fa34d693 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -149,7 +149,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsed() { } u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { - return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage(); + return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize(); } bool KProcess::ReleaseUserException(KThread* thread) { diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index cb025c3d6..24433d32b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -623,14 +623,33 @@ struct KernelCore::Impl { ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); + // Insert a physical region for the secure applet memory. + const auto secure_applet_end_phys_addr = + slab_end_phys_addr + KSystemControl::SecureAppletMemorySize; + if constexpr (KSystemControl::SecureAppletMemorySize > 0) { + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( + GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize, + KMemoryRegionType_DramKernelSecureAppletMemory)); + } + + // Insert a physical region for the unknown debug2 region. + constexpr size_t SecureUnknownRegionSize = 0; + const size_t secure_unknown_size = SecureUnknownRegionSize; + const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size; + if constexpr (SecureUnknownRegionSize > 0) { + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( + GetInteger(secure_applet_end_phys_addr), secure_unknown_size, + KMemoryRegionType_DramKernelSecureUnknown)); + } + // Determine size available for kernel page table heaps, requiring > 8 MB. const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; - const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; + const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr; ASSERT(page_table_heap_size / 4_MiB > 2); // Insert a physical region for the kernel page table heap region ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( - GetInteger(slab_end_phys_addr), page_table_heap_size, + GetInteger(secure_unknown_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); // All DRAM regions that we haven't tagged by this point will be mapped under the linear diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 2cab74127..97f1210de 100644 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp @@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s } // namespace Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) { - LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size, + LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size, perm); // Validate address / size. @@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, R_UNLESS((address < address + size), ResultInvalidCurrentMemory); // Validate the attribute and mask. - constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached); + constexpr u32 SupportedMask = + static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked); R_UNLESS((mask | attr) == mask, ResultInvalidCombination); R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination); + // Check that permission locked is either being set or not masked. + R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) == + (static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked), + ResultInvalidCombination); + // Validate that the region is in range for the current process. auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()}; R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 7f380ca4f..251e6013c 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 { IpcLocked = (1 << 1), DeviceShared = (1 << 2), Uncached = (1 << 3), + PermissionLocked = (1 << 4), }; DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); |