diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/CMakeLists.txt | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/address_arbiter.cpp | 10 | ||||
-rw-r--r-- | src/core/hle/kernel/global_scheduler_context.h | 12 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 15 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 10 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scoped_lock.h | 39 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/mutex.cpp | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/process.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/readable_event.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/server_session.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/synchronization.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/thread.cpp | 17 | ||||
-rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 2 |
15 files changed, 92 insertions, 48 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 5f6dce52a..eb1fbcb61 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -159,6 +159,7 @@ add_library(core STATIC hle/kernel/k_scheduler.cpp hle/kernel/k_scheduler.h hle/kernel/k_scheduler_lock.h + hle/kernel/k_scoped_lock.h hle/kernel/k_scoped_scheduler_lock_and_sleep.h hle/kernel/kernel.cpp hle/kernel/kernel.h diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index ac4913173..20ffa7d47 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -59,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v } ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); const std::vector<std::shared_ptr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); WakeThreads(waiting_threads, num_to_wake); @@ -68,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); auto& memory = system.Memory(); // Ensure that we can write to the address. @@ -93,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); auto& memory = system.Memory(); // Ensure that we can write to the address. @@ -211,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (current_thread->IsWaitingForArbitration()) { RemoveThread(SharedFrom(current_thread)); current_thread->WaitForArbitration(false); @@ -266,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (current_thread->IsWaitingForArbitration()) { RemoveThread(SharedFrom(current_thread)); current_thread->WaitForArbitration(false); diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 39c383746..c4bc23eed 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -27,6 +27,8 @@ class GlobalSchedulerContext final { friend class KScheduler; public: + using LockType = KAbstractSchedulerLock<KScheduler>; + explicit GlobalSchedulerContext(KernelCore& kernel); ~GlobalSchedulerContext(); @@ -53,8 +55,16 @@ public: /// Returns true if the global scheduler lock is acquired bool IsLocked() const; + LockType& SchedulerLock() { + return scheduler_lock; + } + + const LockType& SchedulerLock() const { + return scheduler_lock; + } + private: - friend class SchedulerLock; + friend class KScopedSchedulerLock; friend class KScopedSchedulerLockAndSleep; KernelCore& kernel; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 466147498..9645fee22 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -410,7 +410,7 @@ void KScheduler::YieldWithoutCoreMigration() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { @@ -451,7 +451,7 @@ void KScheduler::YieldWithCoreMigration() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { @@ -541,7 +541,7 @@ void KScheduler::YieldToAnyThread() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { @@ -793,12 +793,9 @@ void KScheduler::Initialize() { } } -SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { - kernel.GlobalSchedulerContext().Lock(); -} +KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) + : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {} -SchedulerLock::~SchedulerLock() { - kernel.GlobalSchedulerContext().Unlock(); -} +KScopedSchedulerLock::~KScopedSchedulerLock() = default; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 5ba0f3c32..d52ecc0db 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -14,6 +14,7 @@ #include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/k_scoped_lock.h" namespace Common { class Fiber; @@ -198,13 +199,10 @@ private: Common::SpinLock guard{}; }; -class SchedulerLock { +class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> { public: - [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); - ~SchedulerLock(); - -protected: - KernelCore& kernel; + explicit KScopedSchedulerLock(KernelCore& kernel); + ~KScopedSchedulerLock(); }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h new file mode 100644 index 000000000..03320859f --- /dev/null +++ b/src/core/hle/kernel/k_scoped_lock.h @@ -0,0 +1,39 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +template <typename T> +concept KLockable = !std::is_reference<T>::value && requires(T & t) { + { t.Lock() } + ->std::same_as<void>; + { t.Unlock() } + ->std::same_as<void>; +}; + +template <typename T> +requires KLockable<T> class KScopedLock : NonCopyable { + +private: + T* lock_ptr; + +public: + explicit KScopedLock(T* l) : lock_ptr(l) { + this->lock_ptr->Lock(); + } + explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */ + } + ~KScopedLock() { + this->lock_ptr->Unlock(); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b74e34c40..04cae3a43 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -146,7 +146,7 @@ struct KernelCore::Impl { preemption_event = Core::Timing::CreateEvent( "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); global_scheduler_context->PreemptThreads(); } const auto time_interval = std::chrono::nanoseconds{ @@ -612,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { void KernelCore::Suspend(bool in_suspention) { const bool should_suspend = exception_exited || in_suspention; { - SchedulerLock lock(*this); + KScopedSchedulerLock lock(*this); ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { impl->suspend_threads[i]->SetStatus(status); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 6299b1342..4f8075e0e 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -75,7 +75,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, std::shared_ptr<Thread> current_thread = SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); // The mutex address must be 4-byte aligned if ((address % sizeof(u32)) != 0) { return ERR_INVALID_ADDRESS; @@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* owner = current_thread->GetLockOwner(); if (owner != nullptr) { owner->RemoveMutexWaiter(current_thread); @@ -153,7 +153,7 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr ResultCode Mutex::Release(VAddr address) { auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); std::shared_ptr<Thread> current_thread = SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 238c03a13..b905b486a 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -54,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires { - SchedulerLock lock{kernel}; + KScopedSchedulerLock lock{kernel}; thread->SetStatus(ThreadStatus::Ready); } } @@ -213,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) { } ResultCode Process::ClearSignalState() { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); if (status == ProcessStatus::Exited) { LOG_ERROR(Kernel, "called on a terminated process instance."); return ERR_INVALID_STATE; @@ -347,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { } VAddr Process::CreateTLSRegion() { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; tls_page_iter != tls_pages.cend()) { return *tls_page_iter->ReserveSlot(); @@ -378,7 +378,7 @@ VAddr Process::CreateTLSRegion() { } void Process::FreeTLSRegion(VAddr tls_address) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); auto iter = std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 927f88fed..cea262ce0 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -39,7 +39,7 @@ void ReadableEvent::Clear() { } ResultCode ReadableEvent::Reset() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (!is_signaled) { LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", GetObjectId(), GetTypeName(), GetName()); diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index bf2c90028..78e41b13e 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -171,7 +171,7 @@ ResultCode ServerSession::CompleteSyncRequest() { // Some service requests require the thread to block { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (!context.IsThreadWaiting()) { context.GetThread().ResumeFromWait(); context.GetThread().SetSynchronizationResults(nullptr, result); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2760a307c..30d60aeb6 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -345,7 +345,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); thread->InvalidateHLECallback(); thread->SetStatus(ThreadStatus::WaitIPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); @@ -359,7 +359,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* sync_object = thread->GetHLESyncObject(); sync_object->RemoveWaitingThread(SharedFrom(thread)); } @@ -1691,7 +1691,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* owner = current_thread->GetLockOwner(); if (owner != nullptr) { @@ -1724,7 +1724,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ // Retrieve a list of all threads that are waiting for this condition variable. auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* const current_process = kernel.CurrentProcess(); std::vector<std::shared_ptr<Thread>> waiting_threads = current_process->GetConditionVariableThreads(condition_variable_addr); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 6651ad90c..d3f520ea2 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -19,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {} void Synchronization::SignalObject(SynchronizationObject& obj) const { auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (obj.IsSignaled()) { for (auto thread : obj.GetWaitingThreads()) { if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { @@ -90,7 +90,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor( } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); ResultCode signaling_result = thread->GetSignalingResult(); SynchronizationObject* signaling_object = thread->GetSignalingObject(); thread->SetSynchronizationObjects(nullptr); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 6f89238ca..a4f9e0d97 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -51,7 +51,7 @@ Thread::~Thread() = default; void Thread::Stop() { { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Dead); Signal(); kernel.GlobalHandleTable().Close(global_handle); @@ -68,7 +68,7 @@ void Thread::Stop() { } void Thread::ResumeFromWait() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); switch (status) { case ThreadStatus::Paused: case ThreadStatus::WaitSynch: @@ -100,19 +100,18 @@ void Thread::ResumeFromWait() { } void Thread::OnWakeUp() { - SchedulerLock lock(kernel); - + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Ready); } ResultCode Thread::Start() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Ready); return RESULT_SUCCESS; } void Thread::CancelWait() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { is_sync_cancelled = true; return; @@ -228,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy } void Thread::SetPriority(u32 priority) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, "Invalid priority value."); nominal_priority = priority; @@ -365,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { } ResultCode Thread::SetActivity(ThreadActivity value) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto sched_status = GetSchedulingStatus(); @@ -435,7 +434,7 @@ void Thread::SetCurrentPriority(u32 new_priority) { } ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto HighestSetCore = [](u64 mask, u32 max_cores) { for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { if (((mask >> core) & 1) != 0) { diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 8e4769694..aea53cdb0 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -18,7 +18,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { time_manager_event_type = Core::Timing::CreateEvent( "Kernel::TimeManagerCallback", [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { - const SchedulerLock lock(system.Kernel()); + const KScopedSchedulerLock lock(system.Kernel()); const auto proper_handle = static_cast<Handle>(thread_handle); if (cancelled_events[proper_handle]) { return; |