From 0746a2084ed78777b3d8ecf7c426a792fba06509 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 22 Feb 2023 21:28:06 -0500 Subject: kernel: add InfoType::IoRegionHint --- src/core/hle/kernel/svc_types.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 542c13461..39355d9c4 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -151,6 +151,7 @@ enum class InfoType : u32 { FreeThreadCount = 24, ThreadTickCount = 25, IsSvcPermitted = 26, + IoRegionHint = 27, MesosphereMeta = 65000, MesosphereCurrentProcess = 65001, -- cgit v1.2.3 From 62711fec0275877f56d0448d78096e1403108109 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 22 Feb 2023 21:30:47 -0500 Subject: kernel: simplify KAbstractSchedulerLock::Lock --- src/core/hle/kernel/k_scheduler_lock.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 129d60472..13463717f 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -31,22 +31,23 @@ public: } if (IsLockedByCurrentThread()) { - // If we already own the lock, we can just increment the count. + // If we already own the lock, the lock count should be > 0. + // For debug, ensure this is true. ASSERT(lock_count > 0); - lock_count++; } else { // Otherwise, we want to disable scheduling and acquire the spinlock. SchedulerType::DisableScheduling(kernel); spin_lock.Lock(); - // For debug, ensure that our state is valid. ASSERT(lock_count == 0); ASSERT(owner_thread == nullptr); - // Increment count, take ownership. - lock_count = 1; + // Take ownership of the lock. owner_thread = GetCurrentThreadPointer(kernel); } + + // Increment the lock count. + lock_count++; } void Unlock() { -- cgit v1.2.3 From 4165ac06806017cfcb8da547ae84dee554e465c3 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 22 Feb 2023 21:33:43 -0500 Subject: kernel: adjust pool allocations --- .../kernel/board/nintendo/nx/k_system_control.cpp | 21 +++++++++++++++------ src/core/hle/kernel/init/init_slab_setup.cpp | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index c10b7bf30..5b8a248c8 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -14,9 +14,12 @@ namespace Kernel::Board::Nintendo::Nx { namespace impl { -constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024; -constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024; -constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024; +using namespace Common::Literals; + +constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2280 * 4_KiB; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeViFatal = 0x200 * 4_KiB; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x704 * 4_KiB; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4_KiB; } // namespace impl @@ -24,6 +27,9 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize = impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices + impl::RequiredNonSecureSystemMemorySizeMisc; +constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal = + RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal; + namespace { using namespace Common::Literals; @@ -120,10 +126,13 @@ size_t KSystemControl::Init::GetAppletPoolSize() { size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() { // Verify that our minimum is at least as large as Nintendo's. - constexpr size_t MinimumSize = RequiredNonSecureSystemMemorySize; - static_assert(MinimumSize >= 0x29C8000); + constexpr size_t MinimumSizeWithFatal = RequiredNonSecureSystemMemorySizeWithFatal; + static_assert(MinimumSizeWithFatal >= 0x2C04000); + + constexpr size_t MinimumSizeWithoutFatal = RequiredNonSecureSystemMemorySize; + static_assert(MinimumSizeWithoutFatal >= 0x2A00000); - return MinimumSize; + return MinimumSizeWithFatal; } namespace { diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index abdb5639f..be52405c6 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -131,7 +131,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd } size_t CalculateSlabHeapGapSize() { - constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB; + constexpr size_t KernelSlabHeapGapSize = 2_MiB - 356_KiB; static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); return KernelSlabHeapGapSize; } -- cgit v1.2.3 From 9f9b64cda2079a1aebf2c4a12fc20c24891c23c9 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 22 Feb 2023 21:40:53 -0500 Subject: kernel: document previous location of interrupt disables in arbiter/condvar --- src/core/hle/kernel/k_address_arbiter.cpp | 8 ++++++-- src/core/hle/kernel/k_condition_variable.cpp | 4 +++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index a442a3b98..fb86451ea 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -29,7 +29,9 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu auto& monitor = system.Monitor(); const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); - // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + // NOTE: If scheduler lock is not held here, interrupt disable is required. + // KScopedInterruptDisable di; + // TODO(bunnei): We should call CanAccessAtomic(..) here. // Load the value from the address. @@ -59,7 +61,9 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 auto& monitor = system.Monitor(); const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); - // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + // NOTE: If scheduler lock is not held here, interrupt disable is required. + // KScopedInterruptDisable di; + // TODO(bunnei): We should call CanAccessAtomic(..) here. // Load the value from the address. diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 3f0be1c3f..50a805296 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -198,7 +198,9 @@ void KConditionVariable::SignalImpl(KThread* thread) { u32 prev_tag{}; bool can_access{}; { - // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + // NOTE: If scheduler lock is not held here, interrupt disable is required. + // KScopedInterruptDisable di; + // TODO(bunnei): We should call CanAccessAtomic(..) here. can_access = true; if (can_access) [[likely]] { -- cgit v1.2.3 From 367e89f984e635ae6680e6c640fe3d1259fb692e Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 22 Feb 2023 21:46:06 -0500 Subject: kernel: barrier memory before condition variable write --- src/core/hle/kernel/k_condition_variable.cpp | 30 ++++++++++++++-------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 50a805296..c6a088942 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -112,7 +112,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { // Remove waiter thread. s32 num_waiters{}; - KThread* next_owner_thread = + KThread* const next_owner_thread = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); // Determine the next tag. @@ -122,25 +122,25 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { if (num_waiters > 1) { next_value |= Svc::HandleWaitMask; } + } - // Write the value to userspace. - Result result{ResultSuccess}; - if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] { - result = ResultSuccess; - } else { - result = ResultInvalidCurrentMemory; - } + // Synchronize memory before proceeding. + std::atomic_thread_fence(std::memory_order_seq_cst); - // Signal the next owner thread. - next_owner_thread->EndWait(result); - return result; + // Write the value to userspace. + Result result{ResultSuccess}; + if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] { + result = ResultSuccess; } else { - // Just write the value to userspace. - R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)), - ResultInvalidCurrentMemory); + result = ResultInvalidCurrentMemory; + } - return ResultSuccess; + // If necessary, signal the next owner thread. + if (next_owner_thread != nullptr) { + next_owner_thread->EndWait(result); } + + R_RETURN(result); } } -- cgit v1.2.3 From 96bd7ea42d1bb1188fd099c52569fe7ee0d92a92 Mon Sep 17 00:00:00 2001 From: Liam Date: Wed, 22 Feb 2023 22:03:15 -0500 Subject: kernel: simplify AddressSpaceInfo, update values --- src/core/hle/kernel/k_address_space_info.cpp | 79 +++++----------------------- 1 file changed, 13 insertions(+), 66 deletions(-) diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index 3e612a207..97972ebae 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp @@ -23,86 +23,33 @@ constexpr std::array AddressSpaceInfos{{ { .bit_width = 32, .address = Size_Invalid, .size = 1_GiB , .type = KAddressSpaceInfo::Type::Heap, }, { .bit_width = 36, .address = 128_MiB , .size = 2_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::MapSmall, }, { .bit_width = 36, .address = 2_GiB , .size = 64_GiB - 2_GiB , .type = KAddressSpaceInfo::Type::MapLarge, }, - { .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 36, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, }, { .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Alias, }, { .bit_width = 39, .address = 128_MiB , .size = 512_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, }, { .bit_width = 39, .address = Size_Invalid, .size = 64_GiB , .type = KAddressSpaceInfo::Type::MapSmall }, - { .bit_width = 39, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Heap, }, + { .bit_width = 39, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, }, { .bit_width = 39, .address = Size_Invalid, .size = 64_GiB , .type = KAddressSpaceInfo::Type::Alias, }, { .bit_width = 39, .address = Size_Invalid, .size = 2_GiB , .type = KAddressSpaceInfo::Type::Stack, }, }}; // clang-format on -constexpr bool IsAllowedIndexForAddress(std::size_t index) { - return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Size_Invalid; -} - -using IndexArray = - std::array(KAddressSpaceInfo::Type::Count)>; - -constexpr IndexArray AddressSpaceIndices32Bit{ - 0, 1, 0, 2, 0, 3, -}; - -constexpr IndexArray AddressSpaceIndices36Bit{ - 4, 5, 4, 6, 4, 7, -}; - -constexpr IndexArray AddressSpaceIndices39Bit{ - 9, 8, 8, 10, 12, 11, -}; - -constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) { - return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit && - type != KAddressSpaceInfo::Type::Stack; -} - -constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) { - return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit && - type != KAddressSpaceInfo::Type::Stack; -} - -constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) { - return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::MapLarge; +const KAddressSpaceInfo& GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Type type) { + for (auto& info : AddressSpaceInfos) { + if (info.bit_width == width && info.type == type) { + return info; + } + } + UNREACHABLE_MSG("Could not find AddressSpaceInfo"); } } // namespace -u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { - const std::size_t index{static_cast(type)}; - switch (width) { - case 32: - ASSERT(IsAllowed32BitType(type)); - ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index])); - return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address; - case 36: - ASSERT(IsAllowed36BitType(type)); - ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index])); - return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address; - case 39: - ASSERT(IsAllowed39BitType(type)); - ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index])); - return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; - } - ASSERT(false); - return 0; +uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) { + return GetAddressSpaceInfo(width, type).address; } -std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { - const std::size_t index{static_cast(type)}; - switch (width) { - case 32: - ASSERT(IsAllowed32BitType(type)); - return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size; - case 36: - ASSERT(IsAllowed36BitType(type)); - return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size; - case 39: - ASSERT(IsAllowed39BitType(type)); - return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; - } - ASSERT(false); - return 0; +size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) { + return GetAddressSpaceInfo(width, type).size; } } // namespace Kernel -- cgit v1.2.3 From c4ba088a5df13ff4b4d8853216231d690f2c79c0 Mon Sep 17 00:00:00 2001 From: Liam Date: Thu, 23 Feb 2023 15:49:42 -0500 Subject: kernel: refactor priority inheritance to represent locks as C++ objects --- src/core/hle/kernel/init/init_slab_setup.cpp | 6 +- src/core/hle/kernel/k_condition_variable.cpp | 16 +- src/core/hle/kernel/k_light_lock.cpp | 6 +- src/core/hle/kernel/k_process.cpp | 4 +- src/core/hle/kernel/k_thread.cpp | 269 ++++++++++++++++++--------- src/core/hle/kernel/k_thread.h | 165 +++++++++++++--- src/core/hle/kernel/kernel.cpp | 93 +++++++++ src/core/hle/kernel/kernel.h | 67 +------ 8 files changed, 436 insertions(+), 190 deletions(-) diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index be52405c6..5e4090e2b 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -33,6 +33,9 @@ namespace Kernel::Init { +// For macro convenience. +using KThreadLockInfo = KThread::LockWithPriorityInheritanceInfo; + #define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS #define FOREACH_SLAB_TYPE(HANDLER, ...) \ @@ -54,7 +57,8 @@ namespace Kernel::Init { HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \ HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ - HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) + HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \ + HANDLER(KThreadLockInfo, (SLAB_COUNT(KThread)), ##__VA_ARGS__) namespace { diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index c6a088942..8dae78397 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -111,15 +111,15 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { KScopedSchedulerLock sl(kernel); // Remove waiter thread. - s32 num_waiters{}; + bool has_waiters{}; KThread* const next_owner_thread = - owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); + owner_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr); // Determine the next tag. u32 next_value{}; if (next_owner_thread != nullptr) { next_value = next_owner_thread->GetAddressKeyValue(); - if (num_waiters > 1) { + if (has_waiters) { next_value |= Svc::HandleWaitMask; } } @@ -247,9 +247,11 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { (it->GetConditionVariableKey() == cv_key)) { KThread* target_thread = std::addressof(*it); - this->SignalImpl(target_thread); it = thread_tree.erase(it); target_thread->ClearConditionVariable(); + + this->SignalImpl(target_thread); + ++num_waiters; } @@ -279,16 +281,16 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Update the value and process for the next owner. { // Remove waiter thread. - s32 num_waiters{}; + bool has_waiters{}; KThread* next_owner_thread = - cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); + cur_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr); // Update for the next owner thread. u32 next_value{}; if (next_owner_thread != nullptr) { // Get the next tag value. next_value = next_owner_thread->GetAddressKeyValue(); - if (num_waiters > 1) { + if (has_waiters) { next_value |= Svc::HandleWaitMask; } diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index d791acbe3..b922a67a5 100644 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp @@ -90,15 +90,15 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { KScopedSchedulerLock sl(kernel); // Get the next owner. - s32 num_waiters; + bool has_waiters; KThread* next_owner = owner_thread->RemoveWaiterByKey( - std::addressof(num_waiters), reinterpret_cast(std::addressof(tag))); + std::addressof(has_waiters), reinterpret_cast(std::addressof(tag))); // Pass the lock to the next owner. uintptr_t next_tag = 0; if (next_owner != nullptr) { next_tag = - reinterpret_cast(next_owner) | static_cast(num_waiters > 1); + reinterpret_cast(next_owner) | static_cast(has_waiters); next_owner->EndWait(ResultSuccess); diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d9c1a0eb3..514f20ef4 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -156,9 +156,9 @@ bool KProcess::ReleaseUserException(KThread* thread) { exception_thread = nullptr; // Remove waiter thread. - s32 num_waiters{}; + bool has_waiters{}; if (KThread* next = thread->RemoveWaiterByKey( - std::addressof(num_waiters), + std::addressof(has_waiters), reinterpret_cast(std::addressof(exception_thread))); next != nullptr) { next->EndWait(ResultSuccess); diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 599d05947..2831df733 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -191,7 +191,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack light_ipc_data = nullptr; // We're not waiting for a lock, and we haven't disabled migration. - lock_owner = nullptr; + waiting_lock_info = nullptr; num_core_migration_disables = 0; // We have no waiters, but we do have an entrypoint. @@ -341,25 +341,39 @@ void KThread::Finalize() { // Release any waiters. { - ASSERT(lock_owner == nullptr); + ASSERT(waiting_lock_info == nullptr); KScopedSchedulerLock sl{kernel}; - auto it = waiter_list.begin(); - while (it != waiter_list.end()) { - // Get the thread. - KThread* const waiter = std::addressof(*it); + // Check that we have no kernel waiters. + ASSERT(num_kernel_waiters == 0); - // The thread shouldn't be a kernel waiter. - ASSERT(!waiter->GetAddressKeyIsKernel()); + auto it = held_lock_info_list.begin(); + while (it != held_lock_info_list.end()) { + // Get the lock info. + auto* const lock_info = std::addressof(*it); - // Clear the lock owner. - waiter->SetLockOwner(nullptr); + // The lock shouldn't have a kernel waiter. + ASSERT(!lock_info->GetIsKernelAddressKey()); - // Erase the waiter from our list. - it = waiter_list.erase(it); + // Remove all waiters. + while (lock_info->GetWaiterCount() != 0) { + // Get the front waiter. + KThread* const waiter = lock_info->GetHighestPriorityWaiter(); - // Cancel the thread's wait. - waiter->CancelWait(ResultInvalidState, true); + // Remove it from the lock. + if (lock_info->RemoveWaiter(waiter)) { + ASSERT(lock_info->GetWaiterCount() == 0); + } + + // Cancel the thread's wait. + waiter->CancelWait(ResultInvalidState, true); + } + + // Remove the held lock from our list. + it = held_lock_info_list.erase(it); + + // Free the lock info. + LockWithPriorityInheritanceInfo::Free(kernel, lock_info); } } @@ -708,6 +722,24 @@ void KThread::SetBasePriority(s32 value) { RestorePriority(kernel, this); } +KThread* KThread::GetLockOwner() const { + return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr; +} + +void KThread::IncreaseBasePriority(s32 priority_) { + ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(!this->GetStackParameters().is_pinned); + + // Set our base priority. + if (base_priority > priority_) { + base_priority = priority_; + + // Perform a priority restoration. + RestorePriority(kernel, this); + } +} + void KThread::RequestSuspend(SuspendType type) { KScopedSchedulerLock sl{kernel}; @@ -891,51 +923,87 @@ Result KThread::GetThreadContext3(std::vector& out) { R_SUCCEED(); } -void KThread::AddWaiterImpl(KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); +void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + + // Set ourselves as the lock's owner. + lock_info->SetOwner(this); - // Find the right spot to insert the waiter. - auto it = waiter_list.begin(); - while (it != waiter_list.end()) { - if (it->GetPriority() > thread->GetPriority()) { - break; + // Add the lock to our held list. + held_lock_info_list.push_front(*lock_info); +} + +KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_) { + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + + // Try to find an existing held lock. + for (auto& held_lock : held_lock_info_list) { + if (held_lock.GetAddressKey() == address_key_) { + return std::addressof(held_lock); } - it++; } + return nullptr; +} + +void KThread::AddWaiterImpl(KThread* thread) { + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); + ASSERT(thread->GetConditionVariableTree() == nullptr); + + // Get the thread's address key. + const auto address_key_ = thread->GetAddressKey(); + const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey(); + // Keep track of how many kernel waiters we have. - if (thread->GetAddressKeyIsKernel()) { + if (is_kernel_address_key_) { ASSERT((num_kernel_waiters++) >= 0); KScheduler::SetSchedulerUpdateNeeded(kernel); } - // Insert the waiter. - waiter_list.insert(it, *thread); - thread->SetLockOwner(this); + // Get the relevant lock info. + auto* lock_info = this->FindHeldLock(address_key_); + if (lock_info == nullptr) { + // Create a new lock for the address key. + lock_info = + LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_); + + // Add the new lock to our list. + this->AddHeldLock(lock_info); + } + + // Add the thread as waiter to the lock info. + lock_info->AddWaiter(thread); } void KThread::RemoveWaiterImpl(KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); // Keep track of how many kernel waiters we have. - if (thread->GetAddressKeyIsKernel()) { + if (thread->GetIsKernelAddressKey()) { ASSERT((num_kernel_waiters--) > 0); KScheduler::SetSchedulerUpdateNeeded(kernel); } + // Get the info for the lock the thread is waiting on. + auto* lock_info = thread->GetWaitingLockInfo(); + ASSERT(lock_info->GetOwner() == this); + // Remove the waiter. - waiter_list.erase(waiter_list.iterator_to(*thread)); - thread->SetLockOwner(nullptr); + if (lock_info->RemoveWaiter(thread)) { + held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); + LockWithPriorityInheritanceInfo::Free(kernel, lock_info); + } } -void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { - ASSERT(kernel_ctx.GlobalSchedulerContext().IsLocked()); +void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); - while (true) { + while (thread != nullptr) { // We want to inherit priority where possible. s32 new_priority = thread->GetBasePriority(); - if (thread->HasWaiters()) { - new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); + for (const auto& held_lock : thread->held_lock_info_list) { + new_priority = + std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); } // If the priority we would inherit is not different from ours, don't do anything. @@ -943,9 +1011,18 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { return; } + // Get the owner of whatever lock this thread is waiting on. + KThread* const lock_owner = thread->GetLockOwner(); + + // If the thread is waiting on some lock, remove it as a waiter to prevent violating red + // black tree invariants. + if (lock_owner != nullptr) { + lock_owner->RemoveWaiterImpl(thread); + } + // Ensure we don't violate condition variable red black tree invariants. if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { - BeforeUpdatePriority(kernel_ctx, cv_tree, thread); + BeforeUpdatePriority(kernel, cv_tree, thread); } // Change the priority. @@ -954,73 +1031,99 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { // Restore the condition variable, if relevant. if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { - AfterUpdatePriority(kernel_ctx, cv_tree, thread); + AfterUpdatePriority(kernel, cv_tree, thread); } - // Update the scheduler. - KScheduler::OnThreadPriorityChanged(kernel_ctx, thread, old_priority); - - // Keep the lock owner up to date. - KThread* lock_owner = thread->GetLockOwner(); - if (lock_owner == nullptr) { - return; + // If we removed the thread from some lock's waiting list, add it back. + if (lock_owner != nullptr) { + lock_owner->AddWaiterImpl(thread); } - // Update the thread in the lock owner's sorted list, and continue inheriting. - lock_owner->RemoveWaiterImpl(thread); - lock_owner->AddWaiterImpl(thread); + // Update the scheduler. + KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); + + // Continue inheriting priority. thread = lock_owner; } } void KThread::AddWaiter(KThread* thread) { - AddWaiterImpl(thread); - RestorePriority(kernel, this); + this->AddWaiterImpl(thread); + + // If the thread has a higher priority than us, we should inherit. + if (thread->GetPriority() < this->GetPriority()) { + RestorePriority(kernel, this); + } } void KThread::RemoveWaiter(KThread* thread) { - RemoveWaiterImpl(thread); - RestorePriority(kernel, this); + this->RemoveWaiterImpl(thread); + + // If our priority is the same as the thread's (and we've inherited), we may need to restore to + // lower priority. + if (this->GetPriority() == thread->GetPriority() && + this->GetPriority() < this->GetBasePriority()) { + RestorePriority(kernel, this); + } } -KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); +KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key) { + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); - s32 num_waiters{}; - KThread* next_lock_owner{}; - auto it = waiter_list.begin(); - while (it != waiter_list.end()) { - if (it->GetAddressKey() == key) { - KThread* thread = std::addressof(*it); - - // Keep track of how many kernel waiters we have. - if (thread->GetAddressKeyIsKernel()) { - ASSERT((num_kernel_waiters--) > 0); - KScheduler::SetSchedulerUpdateNeeded(kernel); - } - it = waiter_list.erase(it); + // Get the relevant lock info. + auto* lock_info = this->FindHeldLock(key); + if (lock_info == nullptr) { + *out_has_waiters = false; + return nullptr; + } - // Update the next lock owner. - if (next_lock_owner == nullptr) { - next_lock_owner = thread; - next_lock_owner->SetLockOwner(nullptr); - } else { - next_lock_owner->AddWaiterImpl(thread); - } - num_waiters++; - } else { - it++; + // Remove the lock info from our held list. + held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); + + // Keep track of how many kernel waiters we have. + if (lock_info->GetIsKernelAddressKey()) { + num_kernel_waiters -= lock_info->GetWaiterCount(); + ASSERT(num_kernel_waiters >= 0); + KScheduler::SetSchedulerUpdateNeeded(kernel); + } + + ASSERT(lock_info->GetWaiterCount() > 0); + + // Remove the highest priority waiter from the lock to be the next owner. + KThread* next_lock_owner = lock_info->GetHighestPriorityWaiter(); + if (lock_info->RemoveWaiter(next_lock_owner)) { + // The new owner was the only waiter. + *out_has_waiters = false; + + // Free the lock info, since it has no waiters. + LockWithPriorityInheritanceInfo::Free(kernel, lock_info); + } else { + // There are additional waiters on the lock. + *out_has_waiters = true; + + // Add the lock to the new owner's held list. + next_lock_owner->AddHeldLock(lock_info); + + // Keep track of any kernel waiters for the new owner. + if (lock_info->GetIsKernelAddressKey()) { + next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount(); + ASSERT(next_lock_owner->num_kernel_waiters > 0); + + // NOTE: No need to set scheduler update needed, because we will have already done so + // when removing earlier. } } - // Do priority updates, if we have a next owner. - if (next_lock_owner) { + // If our priority is the same as the next owner's (and we've inherited), we may need to restore + // to lower priority. + if (this->GetPriority() == next_lock_owner->GetPriority() && + this->GetPriority() < this->GetBasePriority()) { RestorePriority(kernel, this); - RestorePriority(kernel, next_lock_owner); + // NOTE: No need to restore priority on the next lock owner, because it was already the + // highest priority waiter on the lock. } - // Return output. - *out_num_waiters = num_waiters; + // Return the next lock owner. return next_lock_owner; } @@ -1137,9 +1240,7 @@ ThreadState KThread::RequestTerminate() { } // Change the thread's priority to be higher than any system thread's. - if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { - this->SetBasePriority(TerminatingThreadPriority); - } + this->IncreaseBasePriority(TerminatingThreadPriority); // If the thread is runnable, send a termination interrupt to other cores. if (this->GetState() == ThreadState::Runnable) { diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index a04de21bc..e09dcbea0 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -339,13 +339,7 @@ public: void SetInterruptFlag(); void ClearInterruptFlag(); - [[nodiscard]] KThread* GetLockOwner() const { - return lock_owner; - } - - void SetLockOwner(KThread* owner) { - lock_owner = owner; - } + KThread* GetLockOwner() const; [[nodiscard]] const KAffinityMask& GetAffinityMask() const { return physical_affinity_mask; @@ -601,7 +595,7 @@ public: [[nodiscard]] Result GetThreadContext3(std::vector& out); - [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key); + [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key); [[nodiscard]] VAddr GetAddressKey() const { return address_key; @@ -611,8 +605,8 @@ public: return address_key_value; } - [[nodiscard]] bool GetAddressKeyIsKernel() const { - return address_key_is_kernel; + [[nodiscard]] bool GetIsKernelAddressKey() const { + return is_kernel_address_key; } //! NB: intentional deviation from official kernel. @@ -621,20 +615,17 @@ public: // to cope with arbitrary host pointers making their way // into things. - void SetUserAddressKey(VAddr key) { - address_key = key; - address_key_is_kernel = false; - } - void SetUserAddressKey(VAddr key, u32 val) { + ASSERT(waiting_lock_info == nullptr); address_key = key; address_key_value = val; - address_key_is_kernel = false; + is_kernel_address_key = false; } void SetKernelAddressKey(VAddr key) { + ASSERT(waiting_lock_info == nullptr); address_key = key; - address_key_is_kernel = true; + is_kernel_address_key = true; } void ClearWaitQueue() { @@ -646,10 +637,6 @@ public: void EndWait(Result wait_result_); void CancelWait(Result wait_result_, bool cancel_timer_task); - [[nodiscard]] bool HasWaiters() const { - return !waiter_list.empty(); - } - [[nodiscard]] s32 GetNumKernelWaiters() const { return num_kernel_waiters; } @@ -722,13 +709,14 @@ private: }; void AddWaiterImpl(KThread* thread); - void RemoveWaiterImpl(KThread* thread); + static void RestorePriority(KernelCore& kernel, KThread* thread); void StartTermination(); - void FinishTermination(); + void IncreaseBasePriority(s32 priority); + [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, KProcess* owner, ThreadType type); @@ -737,8 +725,6 @@ private: s32 core, KProcess* owner, ThreadType type, std::function&& init_func); - static void RestorePriority(KernelCore& kernel_ctx, KThread* thread); - // For core KThread implementation ThreadContext32 thread_context_32{}; ThreadContext64 thread_context_64{}; @@ -749,6 +735,127 @@ private: &KThread::condvar_arbiter_tree_node>; using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType; + +private: + struct LockWithPriorityInheritanceComparator { + struct RedBlackKeyType { + s32 m_priority; + + constexpr s32 GetPriority() const { + return m_priority; + } + }; + + template + requires(std::same_as || std::same_as) + static constexpr int Compare(const T& lhs, const KThread& rhs) { + if (lhs.GetPriority() < rhs.GetPriority()) { + // Sort by priority. + return -1; + } else { + return 1; + } + } + }; + static_assert(std::same_as, + LockWithPriorityInheritanceComparator::RedBlackKeyType>); + + using LockWithPriorityInheritanceThreadTreeTraits = + Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< + &KThread::condvar_arbiter_tree_node>; + using LockWithPriorityInheritanceThreadTree = + ConditionVariableThreadTreeTraits::TreeType; + +public: + class LockWithPriorityInheritanceInfo : public KSlabAllocated, + public boost::intrusive::list_base_hook<> { + public: + explicit LockWithPriorityInheritanceInfo(KernelCore&) {} + + static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel, VAddr address_key, + bool is_kernel_address_key) { + // Create a new lock info. + auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel); + ASSERT(new_lock != nullptr); + + // Set the new lock's address key. + new_lock->m_address_key = address_key; + new_lock->m_is_kernel_address_key = is_kernel_address_key; + + return new_lock; + } + + void SetOwner(KThread* new_owner) { + // Set new owner. + m_owner = new_owner; + } + + void AddWaiter(KThread* waiter) { + // Insert the waiter. + m_tree.insert(*waiter); + m_waiter_count++; + + waiter->SetWaitingLockInfo(this); + } + + [[nodiscard]] bool RemoveWaiter(KThread* waiter) { + m_tree.erase(m_tree.iterator_to(*waiter)); + + waiter->SetWaitingLockInfo(nullptr); + + return (--m_waiter_count) == 0; + } + + KThread* GetHighestPriorityWaiter() { + return std::addressof(m_tree.front()); + } + const KThread* GetHighestPriorityWaiter() const { + return std::addressof(m_tree.front()); + } + + LockWithPriorityInheritanceThreadTree& GetThreadTree() { + return m_tree; + } + const LockWithPriorityInheritanceThreadTree& GetThreadTree() const { + return m_tree; + } + + VAddr GetAddressKey() const { + return m_address_key; + } + bool GetIsKernelAddressKey() const { + return m_is_kernel_address_key; + } + KThread* GetOwner() const { + return m_owner; + } + u32 GetWaiterCount() const { + return m_waiter_count; + } + + private: + LockWithPriorityInheritanceThreadTree m_tree{}; + VAddr m_address_key{}; + KThread* m_owner{}; + u32 m_waiter_count{}; + bool m_is_kernel_address_key{}; + }; + + void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { + waiting_lock_info = lock; + } + + LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { + return waiting_lock_info; + } + + void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); + LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key); + +private: + using LockWithPriorityInheritanceInfoList = + boost::intrusive::list; + ConditionVariableThreadTree* condvar_tree{}; u64 condvar_key{}; u64 virtual_affinity_mask{}; @@ -765,9 +872,9 @@ private: s64 last_scheduled_tick{}; std::array per_core_priority_queue_entry{}; KThreadQueue* wait_queue{}; - WaiterList waiter_list{}; + LockWithPriorityInheritanceInfoList held_lock_info_list{}; + LockWithPriorityInheritanceInfo* waiting_lock_info{}; WaiterList pinned_waiter_list{}; - KThread* lock_owner{}; u32 address_key_value{}; u32 suspend_request_flags{}; u32 suspend_allowed_flags{}; @@ -791,7 +898,7 @@ private: bool debug_attached{}; s8 priority_inheritance_count{}; bool resource_limit_release_hint{}; - bool address_key_is_kernel{}; + bool is_kernel_address_key{}; StackParameters stack_parameters{}; Common::SpinLock context_guard{}; @@ -814,6 +921,7 @@ public: void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, u32 value) { + ASSERT(waiting_lock_info == nullptr); condvar_tree = tree; condvar_key = cv_key; address_key = address; @@ -829,6 +937,7 @@ public: } void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { + ASSERT(waiting_lock_info == nullptr); condvar_tree = tree; condvar_key = address; } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index ce94d3605..ef7057ff7 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -1318,4 +1318,97 @@ const Core::System& KernelCore::System() const { return impl->system; } +struct KernelCore::SlabHeapContainer { + KSlabHeap client_session; + KSlabHeap event; + KSlabHeap linked_list_node; + KSlabHeap port; + KSlabHeap process; + KSlabHeap resource_limit; + KSlabHeap session; + KSlabHeap shared_memory; + KSlabHeap shared_memory_info; + KSlabHeap thread; + KSlabHeap transfer_memory; + KSlabHeap code_memory; + KSlabHeap device_address_space; + KSlabHeap page_buffer; + KSlabHeap thread_local_page; + KSlabHeap object_name; + KSlabHeap session_request; + KSlabHeap secure_system_resource; + KSlabHeap lock_info; + KSlabHeap event_info; + KSlabHeap debug; +}; + +template +KSlabHeap& KernelCore::SlabHeap() { + if constexpr (std::is_same_v) { + return slab_heap_container->client_session; + } else if constexpr (std::is_same_v) { + return slab_heap_container->event; + } else if constexpr (std::is_same_v) { + return slab_heap_container->linked_list_node; + } else if constexpr (std::is_same_v) { + return slab_heap_container->port; + } else if constexpr (std::is_same_v) { + return slab_heap_container->process; + } else if constexpr (std::is_same_v) { + return slab_heap_container->resource_limit; + } else if constexpr (std::is_same_v) { + return slab_heap_container->session; + } else if constexpr (std::is_same_v) { + return slab_heap_container->shared_memory; + } else if constexpr (std::is_same_v) { + return slab_heap_container->shared_memory_info; + } else if constexpr (std::is_same_v) { + return slab_heap_container->thread; + } else if constexpr (std::is_same_v) { + return slab_heap_container->transfer_memory; + } else if constexpr (std::is_same_v) { + return slab_heap_container->code_memory; + } else if constexpr (std::is_same_v) { + return slab_heap_container->device_address_space; + } else if constexpr (std::is_same_v) { + return slab_heap_container->page_buffer; + } else if constexpr (std::is_same_v) { + return slab_heap_container->thread_local_page; + } else if constexpr (std::is_same_v) { + return slab_heap_container->object_name; + } else if constexpr (std::is_same_v) { + return slab_heap_container->session_request; + } else if constexpr (std::is_same_v) { + return slab_heap_container->secure_system_resource; + } else if constexpr (std::is_same_v) { + return slab_heap_container->lock_info; + } else if constexpr (std::is_same_v) { + return slab_heap_container->event_info; + } else if constexpr (std::is_same_v) { + return slab_heap_container->debug; + } +} + +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); +template KSlabHeap& KernelCore::SlabHeap(); + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 4449f6949..1b380a07b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -305,49 +305,7 @@ public: /// Gets the slab heap for the specified kernel object type. template - KSlabHeap& SlabHeap() { - if constexpr (std::is_same_v) { - return slab_heap_container->client_session; - } else if constexpr (std::is_same_v) { - return slab_heap_container->event; - } else if constexpr (std::is_same_v) { - return slab_heap_container->linked_list_node; - } else if constexpr (std::is_same_v) { - return slab_heap_container->port; - } else if constexpr (std::is_same_v) { - return slab_heap_container->process; - } else if constexpr (std::is_same_v) { - return slab_heap_container->resource_limit; - } else if constexpr (std::is_same_v) { - return slab_heap_container->session; - } else if constexpr (std::is_same_v) { - return slab_heap_container->shared_memory; - } else if constexpr (std::is_same_v) { - return slab_heap_container->shared_memory_info; - } else if constexpr (std::is_same_v) { - return slab_heap_container->thread; - } else if constexpr (std::is_same_v) { - return slab_heap_container->transfer_memory; - } else if constexpr (std::is_same_v) { - return slab_heap_container->code_memory; - } else if constexpr (std::is_same_v) { - return slab_heap_container->device_address_space; - } else if constexpr (std::is_same_v) { - return slab_heap_container->page_buffer; - } else if constexpr (std::is_same_v) { - return slab_heap_container->thread_local_page; - } else if constexpr (std::is_same_v) { - return slab_heap_container->object_name; - } else if constexpr (std::is_same_v) { - return slab_heap_container->session_request; - } else if constexpr (std::is_same_v) { - return slab_heap_container->secure_system_resource; - } else if constexpr (std::is_same_v) { - return slab_heap_container->event_info; - } else if constexpr (std::is_same_v) { - return slab_heap_container->debug; - } - } + KSlabHeap& SlabHeap(); /// Gets the current slab resource counts. Init::KSlabResourceCounts& SlabResourceCounts(); @@ -393,28 +351,7 @@ private: private: /// Helper to encapsulate all slab heaps in a single heap allocated container - struct SlabHeapContainer { - KSlabHeap client_session; - KSlabHeap event; - KSlabHeap linked_list_node; - KSlabHeap port; - KSlabHeap process; - KSlabHeap resource_limit; - KSlabHeap session; - KSlabHeap shared_memory; - KSlabHeap shared_memory_info; - KSlabHeap thread; - KSlabHeap transfer_memory; - KSlabHeap code_memory; - KSlabHeap device_address_space; - KSlabHeap page_buffer; - KSlabHeap thread_local_page; - KSlabHeap object_name; - KSlabHeap session_request; - KSlabHeap secure_system_resource; - KSlabHeap event_info; - KSlabHeap debug; - }; + struct SlabHeapContainer; std::unique_ptr slab_heap_container; }; -- cgit v1.2.3 From 97f7f7bad59cdd42bf5f504089e5cecd441da3ce Mon Sep 17 00:00:00 2001 From: Liam Date: Thu, 23 Feb 2023 20:32:03 -0500 Subject: kernel: be more careful about kernel address keys --- src/core/hle/kernel/k_condition_variable.cpp | 4 ++-- src/core/hle/kernel/k_light_lock.cpp | 2 +- src/core/hle/kernel/k_process.cpp | 2 +- src/core/hle/kernel/k_thread.cpp | 12 +++++++----- src/core/hle/kernel/k_thread.h | 14 ++++++++++++-- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 8dae78397..f40cf92b1 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -113,7 +113,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { // Remove waiter thread. bool has_waiters{}; KThread* const next_owner_thread = - owner_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr); + owner_thread->RemoveUserWaiterByKey(std::addressof(has_waiters), addr); // Determine the next tag. u32 next_value{}; @@ -283,7 +283,7 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Remove waiter thread. bool has_waiters{}; KThread* next_owner_thread = - cur_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr); + cur_thread->RemoveUserWaiterByKey(std::addressof(has_waiters), addr); // Update for the next owner thread. u32 next_value{}; diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index b922a67a5..14cb615da 100644 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp @@ -91,7 +91,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { // Get the next owner. bool has_waiters; - KThread* next_owner = owner_thread->RemoveWaiterByKey( + KThread* next_owner = owner_thread->RemoveKernelWaiterByKey( std::addressof(has_waiters), reinterpret_cast(std::addressof(tag))); // Pass the lock to the next owner. diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 514f20ef4..d44f6e921 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -157,7 +157,7 @@ bool KProcess::ReleaseUserException(KThread* thread) { // Remove waiter thread. bool has_waiters{}; - if (KThread* next = thread->RemoveWaiterByKey( + if (KThread* next = thread->RemoveKernelWaiterByKey( std::addressof(has_waiters), reinterpret_cast(std::addressof(exception_thread))); next != nullptr) { diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 2831df733..8c403f5fd 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -933,12 +933,14 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { held_lock_info_list.push_front(*lock_info); } -KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_) { +KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, + bool is_kernel_address_key_) { ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); // Try to find an existing held lock. for (auto& held_lock : held_lock_info_list) { - if (held_lock.GetAddressKey() == address_key_) { + if (held_lock.GetAddressKey() == address_key_ && + held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) { return std::addressof(held_lock); } } @@ -961,7 +963,7 @@ void KThread::AddWaiterImpl(KThread* thread) { } // Get the relevant lock info. - auto* lock_info = this->FindHeldLock(address_key_); + auto* lock_info = this->FindHeldLock(address_key_, is_kernel_address_key_); if (lock_info == nullptr) { // Create a new lock for the address key. lock_info = @@ -1067,11 +1069,11 @@ void KThread::RemoveWaiter(KThread* thread) { } } -KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key) { +KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) { ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); // Get the relevant lock info. - auto* lock_info = this->FindHeldLock(key); + auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_); if (lock_info == nullptr) { *out_has_waiters = false; return nullptr; diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e09dcbea0..bd125f5f1 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -595,7 +595,13 @@ public: [[nodiscard]] Result GetThreadContext3(std::vector& out); - [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key); + [[nodiscard]] KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { + return this->RemoveWaiterByKey(out_has_waiters, key, false); + } + + [[nodiscard]] KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { + return this->RemoveWaiterByKey(out_has_waiters, key, true); + } [[nodiscard]] VAddr GetAddressKey() const { return address_key; @@ -666,6 +672,9 @@ public: } private: + [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, + bool is_kernel_address_key); + static constexpr size_t PriorityInheritanceCountMax = 10; union SyncObjectBuffer { std::array sync_objects{}; @@ -850,7 +859,7 @@ public: } void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); - LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key); + LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key, bool is_kernel_address_key); private: using LockWithPriorityInheritanceInfoList = @@ -926,6 +935,7 @@ public: condvar_key = cv_key; address_key = address; address_key_value = value; + is_kernel_address_key = false; } void ClearConditionVariable() { -- cgit v1.2.3