From 9e29e36a784496f7290c03b6a42e400a164a5b1e Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 2 Dec 2020 18:08:35 -0800 Subject: hle: kernel: Rewrite scheduler implementation based on Mesopshere. --- src/core/hle/kernel/k_scheduler.h | 297 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 297 insertions(+) create mode 100644 src/core/hle/kernel/k_scheduler.h (limited to 'src/core/hle/kernel/k_scheduler.h') diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h new file mode 100644 index 000000000..535ee34b9 --- /dev/null +++ b/src/core/hle/kernel/k_scheduler.h @@ -0,0 +1,297 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include +#include +#include +#include + +#include "common/common_types.h" +#include "common/multi_level_queue.h" +#include "common/scope_exit.h" +#include "common/spin_lock.h" +#include "core/core_timing.h" +#include "core/hardware_properties.h" +#include "core/hle/kernel/k_priority_queue.h" +#include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/thread.h" + +namespace Common { +class Fiber; +} + +namespace Core { +class ARM_Interface; +class System; +} // namespace Core + +namespace Kernel { + +class KernelCore; +class Process; +class SchedulerLock; + +using KSchedulerPriorityQueue = + KPriorityQueue; +static constexpr s32 HighestCoreMigrationAllowedPriority = 2; + +class GlobalSchedulerContext final { + friend class KScheduler; + +public: + explicit GlobalSchedulerContext(KernelCore& kernel); + ~GlobalSchedulerContext(); + + /// Adds a new thread to the scheduler + void AddThread(std::shared_ptr thread); + + /// Removes a thread from the scheduler + void RemoveThread(std::shared_ptr thread); + + /// Returns a list of all threads managed by the scheduler + const std::vector>& GetThreadList() const { + return thread_list; + } + + /** + * Rotates the scheduling queues of threads at a preemption priority and then does + * some core rebalancing. Preemption priorities can be found in the array + * 'preemption_priorities'. + * + * @note This operation happens every 10ms. + */ + void PreemptThreads(); + + u32 CpuCoresCount() const { + return Core::Hardware::NUM_CPU_CORES; + } + + bool IsLocked() const; + +private: + friend class SchedulerLock; + + /// Lock the scheduler to the current thread. + void Lock(); + + /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling + /// and reschedules current core if needed. + void Unlock(); + + using LockType = KAbstractSchedulerLock; + + KernelCore& kernel; + + std::atomic_bool scheduler_update_needed{}; + KSchedulerPriorityQueue priority_queue; + LockType scheduler_lock; + + /// Lists all thread ids that aren't deleted/etc. + std::vector> thread_list; + Common::SpinLock global_list_guard{}; +}; + +class KScheduler final { +public: + explicit KScheduler(Core::System& system, std::size_t core_id); + ~KScheduler(); + + /// Reschedules to the next available thread (call after current thread is suspended) + void RescheduleCurrentCore(); + + /// Reschedules cores pending reschedule, to be called on EnableScheduling. + static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread); + + /// The next two are for SingleCore Only. + /// Unload current thread before preempting core. + void Unload(Thread* thread); + + /// Reload current thread after core preemption. + void Reload(Thread* thread); + + /// Gets the current running thread + Thread* GetCurrentThread() const; + + /// Gets the timestamp for the last context switch in ticks. + u64 GetLastContextSwitchTicks() const; + + bool ContextSwitchPending() const { + return this->state.needs_scheduling; + } + + void Initialize(); + + void OnThreadStart(); + + std::shared_ptr& ControlContext() { + return switch_fiber; + } + + const std::shared_ptr& ControlContext() const { + return switch_fiber; + } + + std::size_t CurrentCoreId() const { + return core_id; + } + + u64 UpdateHighestPriorityThread(Thread* highest_thread); + + /** + * Takes a thread and moves it to the back of the it's priority list. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldWithoutCoreMigration(); + + /** + * Takes a thread and moves it to the back of the it's priority list. + * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or + * a better priority than the next thread in the core. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldWithCoreMigration(); + + /** + * Takes a thread and moves it out of the scheduling queue. + * and into the suggested queue. If no thread can be scheduled afterwards in that core, + * a suggested thread is obtained instead. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldToAnyThread(); + + /// Notify the scheduler a thread's status has changed. + static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); + + /// Notify the scheduler a thread's priority has changed. + static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, + u32 old_priority); + + /// Notify the scheduler a thread's core and/or affinity mask has changed. + static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, s32 old_core); + +private: + /** + * Takes care of selecting the new scheduled threads in three steps: + * + * 1. First a thread is selected from the top of the priority queue. If no thread + * is obtained then we move to step two, else we are done. + * + * 2. Second we try to get a suggested thread that's not assigned to any core or + * that is not the top thread in that core. + * + * 3. Third is no suggested thread is found, we do a second pass and pick a running + * thread in another core and swap it with its current thread. + * + * returns the cores needing scheduling. + */ + static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + + void RotateScheduledQueue(s32 core_id, s32 priority); + +public: + static bool CanSchedule(KernelCore& kernel); + static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); + static void SetSchedulerUpdateNeeded(KernelCore& kernel); + static void ClearSchedulerUpdateNeeded(KernelCore& kernel); + static void DisableScheduling(KernelCore& kernel); + static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread); + static u64 UpdateHighestPriorityThreads(KernelCore& kernel); + +private: + friend class GlobalSchedulerContext; + + static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); + + void Schedule() { + ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); + this->ScheduleImpl(); + } + + /// Switches the CPU's active thread context to that of the specified thread + void ScheduleImpl(); + void SwitchThread(Thread* next_thread); + + /// When a thread wakes up, it must run this through it's new scheduler + void SwitchContextStep2(); + + /** + * Called on every context switch to update the internal timestamp + * This also updates the running time ticks for the given thread and + * process using the following difference: + * + * ticks += most_recent_ticks - last_context_switch_ticks + * + * The internal tick timestamp for the scheduler is simply the + * most recent tick count retrieved. No special arithmetic is + * applied to it. + */ + void UpdateLastContextSwitchTime(Thread* thread, Process* process); + + static void OnSwitch(void* this_scheduler); + void SwitchToCurrent(); + +private: + Thread* current_thread{}; + Thread* idle_thread{}; + + std::shared_ptr switch_fiber{}; + + struct SchedulingState { + std::atomic needs_scheduling; + bool interrupt_task_thread_runnable{}; + bool should_count_idle{}; + u64 idle_count{}; + Thread* highest_priority_thread{}; + void* idle_thread_stack{}; + }; + + SchedulingState state; + + Core::System& system; + u64 last_context_switch_time{}; + const std::size_t core_id; + + Common::SpinLock guard{}; +}; + +class SchedulerLock { +public: + [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); + ~SchedulerLock(); + +protected: + KernelCore& kernel; +}; + +class SchedulerLockAndSleep : public SchedulerLock { +public: + explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task, + s64 nanoseconds); + ~SchedulerLockAndSleep(); + + void CancelSleep() { + sleep_cancelled = true; + } + + void Release(); + +private: + Handle& event_handle; + Thread* time_task; + s64 nanoseconds; + bool sleep_cancelled{}; +}; + +} // namespace Kernel -- cgit v1.2.3 From 8d3e06349e12e7de17c334619f1f986792d1de4b Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 16:43:18 -0800 Subject: hle: kernel: Separate KScheduler from GlobalSchedulerContext class. --- src/core/hle/kernel/k_scheduler.h | 74 ++------------------------------------- 1 file changed, 3 insertions(+), 71 deletions(-) (limited to 'src/core/hle/kernel/k_scheduler.h') diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 535ee34b9..7f020d96e 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -8,94 +8,27 @@ #pragma once #include -#include -#include -#include #include "common/common_types.h" -#include "common/multi_level_queue.h" -#include "common/scope_exit.h" #include "common/spin_lock.h" -#include "core/core_timing.h" -#include "core/hardware_properties.h" +#include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" -#include "core/hle/kernel/thread.h" namespace Common { class Fiber; } namespace Core { -class ARM_Interface; class System; -} // namespace Core +} namespace Kernel { class KernelCore; class Process; class SchedulerLock; - -using KSchedulerPriorityQueue = - KPriorityQueue; -static constexpr s32 HighestCoreMigrationAllowedPriority = 2; - -class GlobalSchedulerContext final { - friend class KScheduler; - -public: - explicit GlobalSchedulerContext(KernelCore& kernel); - ~GlobalSchedulerContext(); - - /// Adds a new thread to the scheduler - void AddThread(std::shared_ptr thread); - - /// Removes a thread from the scheduler - void RemoveThread(std::shared_ptr thread); - - /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { - return thread_list; - } - - /** - * Rotates the scheduling queues of threads at a preemption priority and then does - * some core rebalancing. Preemption priorities can be found in the array - * 'preemption_priorities'. - * - * @note This operation happens every 10ms. - */ - void PreemptThreads(); - - u32 CpuCoresCount() const { - return Core::Hardware::NUM_CPU_CORES; - } - - bool IsLocked() const; - -private: - friend class SchedulerLock; - - /// Lock the scheduler to the current thread. - void Lock(); - - /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling - /// and reschedules current core if needed. - void Unlock(); - - using LockType = KAbstractSchedulerLock; - - KernelCore& kernel; - - std::atomic_bool scheduler_update_needed{}; - KSchedulerPriorityQueue priority_queue; - LockType scheduler_lock; - - /// Lists all thread ids that aren't deleted/etc. - std::vector> thread_list; - Common::SpinLock global_list_guard{}; -}; +class Thread; class KScheduler final { public: @@ -221,7 +154,6 @@ private: /// Switches the CPU's active thread context to that of the specified thread void ScheduleImpl(); - void SwitchThread(Thread* next_thread); /// When a thread wakes up, it must run this through it's new scheduler void SwitchContextStep2(); -- cgit v1.2.3 From 4756cb203e8ef09377988eb1b49ca20ef45f4492 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 21:56:02 -0800 Subject: hle: kernel: Separate KScopedSchedulerLockAndSleep from k_scheduler. --- src/core/hle/kernel/k_scheduler.h | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'src/core/hle/kernel/k_scheduler.h') diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 7f020d96e..5ba0f3c32 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -207,23 +207,4 @@ protected: KernelCore& kernel; }; -class SchedulerLockAndSleep : public SchedulerLock { -public: - explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task, - s64 nanoseconds); - ~SchedulerLockAndSleep(); - - void CancelSleep() { - sleep_cancelled = true; - } - - void Release(); - -private: - Handle& event_handle; - Thread* time_task; - s64 nanoseconds; - bool sleep_cancelled{}; -}; - } // namespace Kernel -- cgit v1.2.3 From ccce6cb3be062fc7ae162bed32202538ebc2e3d9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:26:42 -0800 Subject: hle: kernel: Migrate to KScopedSchedulerLock. --- src/core/hle/kernel/k_scheduler.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'src/core/hle/kernel/k_scheduler.h') diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 5ba0f3c32..d52ecc0db 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -14,6 +14,7 @@ #include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/k_scoped_lock.h" namespace Common { class Fiber; @@ -198,13 +199,10 @@ private: Common::SpinLock guard{}; }; -class SchedulerLock { +class KScopedSchedulerLock : KScopedLock { public: - [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); - ~SchedulerLock(); - -protected: - KernelCore& kernel; + explicit KScopedSchedulerLock(KernelCore& kernel); + ~KScopedSchedulerLock(); }; } // namespace Kernel -- cgit v1.2.3 From 960500cfd2558c52597fff69c1bb0ea38d922b6a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:02:30 -0800 Subject: hle: kernel: KScheduler: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scheduler.h | 49 +++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 28 deletions(-) (limited to 'src/core/hle/kernel/k_scheduler.h') diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index d52ecc0db..e84abc84c 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -51,32 +51,28 @@ public: void Reload(Thread* thread); /// Gets the current running thread - Thread* GetCurrentThread() const; + [[nodiscard]] Thread* GetCurrentThread() const; /// Gets the timestamp for the last context switch in ticks. - u64 GetLastContextSwitchTicks() const; + [[nodiscard]] u64 GetLastContextSwitchTicks() const; - bool ContextSwitchPending() const { - return this->state.needs_scheduling; + [[nodiscard]] bool ContextSwitchPending() const { + return state.needs_scheduling.load(std::memory_order_relaxed); } void Initialize(); void OnThreadStart(); - std::shared_ptr& ControlContext() { + [[nodiscard]] std::shared_ptr& ControlContext() { return switch_fiber; } - const std::shared_ptr& ControlContext() const { + [[nodiscard]] const std::shared_ptr& ControlContext() const { return switch_fiber; } - std::size_t CurrentCoreId() const { - return core_id; - } - - u64 UpdateHighestPriorityThread(Thread* highest_thread); + [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread); /** * Takes a thread and moves it to the back of the it's priority list. @@ -114,7 +110,18 @@ public: static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, const KAffinityMask& old_affinity, s32 old_core); + static bool CanSchedule(KernelCore& kernel); + static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); + static void SetSchedulerUpdateNeeded(KernelCore& kernel); + static void ClearSchedulerUpdateNeeded(KernelCore& kernel); + static void DisableScheduling(KernelCore& kernel); + static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread); + [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); + private: + friend class GlobalSchedulerContext; + /** * Takes care of selecting the new scheduled threads in three steps: * @@ -129,24 +136,11 @@ private: * * returns the cores needing scheduling. */ - static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); - void RotateScheduledQueue(s32 core_id, s32 priority); + [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); -public: - static bool CanSchedule(KernelCore& kernel); - static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); - static void SetSchedulerUpdateNeeded(KernelCore& kernel); - static void ClearSchedulerUpdateNeeded(KernelCore& kernel); - static void DisableScheduling(KernelCore& kernel); - static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, - Core::EmuThreadHandle global_thread); - static u64 UpdateHighestPriorityThreads(KernelCore& kernel); - -private: - friend class GlobalSchedulerContext; - - static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); + void RotateScheduledQueue(s32 core_id, s32 priority); void Schedule() { ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); @@ -175,7 +169,6 @@ private: static void OnSwitch(void* this_scheduler); void SwitchToCurrent(); -private: Thread* current_thread{}; Thread* idle_thread{}; -- cgit v1.2.3