diff options
author | Liam <byteslice@airmail.cc> | 2023-03-07 04:31:50 +0100 |
---|---|---|
committer | Liam <byteslice@airmail.cc> | 2023-03-13 03:06:53 +0100 |
commit | 7322c99e5fd335aa144620dff7f4d7a71932d35e (patch) | |
tree | 69d417b94fe2bb0ea1d42b07e36c121a62c597dc | |
parent | kernel: convert KMemoryLayout, KMemoryRegion*, KPageTableSlabHeap, KPriorityQueue (diff) | |
download | yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.gz yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.bz2 yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.lz yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.xz yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.zst yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.zip |
-rw-r--r-- | src/core/hle/kernel/k_scheduler_lock.h | 55 |
1 files changed, 24 insertions, 31 deletions
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 13463717f..caa1404f1 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -14,74 +14,67 @@ namespace Kernel { class KernelCore; +class GlobalSchedulerContext; template <typename SchedulerType> class KAbstractSchedulerLock { public: - explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} + explicit KAbstractSchedulerLock(KernelCore& kernel) : m_kernel{kernel} {} bool IsLockedByCurrentThread() const { - return owner_thread == GetCurrentThreadPointer(kernel); + return m_owner_thread == GetCurrentThreadPointer(m_kernel); } void Lock() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - - if (IsLockedByCurrentThread()) { + if (this->IsLockedByCurrentThread()) { // If we already own the lock, the lock count should be > 0. // For debug, ensure this is true. - ASSERT(lock_count > 0); + ASSERT(m_lock_count > 0); } else { // Otherwise, we want to disable scheduling and acquire the spinlock. - SchedulerType::DisableScheduling(kernel); - spin_lock.Lock(); + SchedulerType::DisableScheduling(m_kernel); + m_spin_lock.Lock(); - ASSERT(lock_count == 0); - ASSERT(owner_thread == nullptr); + ASSERT(m_lock_count == 0); + ASSERT(m_owner_thread == nullptr); // Take ownership of the lock. - owner_thread = GetCurrentThreadPointer(kernel); + m_owner_thread = GetCurrentThreadPointer(m_kernel); } // Increment the lock count. - lock_count++; + m_lock_count++; } void Unlock() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (kernel.IsShuttingDown()) { - return; - } - - ASSERT(IsLockedByCurrentThread()); - ASSERT(lock_count > 0); + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(m_lock_count > 0); // Release an instance of the lock. - if ((--lock_count) == 0) { + if ((--m_lock_count) == 0) { // Perform a memory barrier here. std::atomic_thread_fence(std::memory_order_seq_cst); // We're no longer going to hold the lock. Take note of what cores need scheduling. const u64 cores_needing_scheduling = - SchedulerType::UpdateHighestPriorityThreads(kernel); + SchedulerType::UpdateHighestPriorityThreads(m_kernel); // Note that we no longer hold the lock, and unlock the spinlock. - owner_thread = nullptr; - spin_lock.Unlock(); + m_owner_thread = nullptr; + m_spin_lock.Unlock(); // Enable scheduling, and perform a rescheduling operation. - SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); + SchedulerType::EnableScheduling(m_kernel, cores_needing_scheduling); } } private: - KernelCore& kernel; - KAlignedSpinLock spin_lock{}; - s32 lock_count{}; - std::atomic<KThread*> owner_thread{}; + friend class GlobalSchedulerContext; + + KernelCore& m_kernel; + KAlignedSpinLock m_spin_lock{}; + s32 m_lock_count{}; + std::atomic<KThread*> m_owner_thread{}; }; } // namespace Kernel |