diff options
author | Liam <byteslice@airmail.cc> | 2022-07-06 05:27:25 +0200 |
---|---|---|
committer | Liam <byteslice@airmail.cc> | 2022-07-15 04:47:18 +0200 |
commit | 21945ae127480c8332c1110ceada2df4a42a5848 (patch) | |
tree | a385c64a14b0d8e8dd71410eaa47575462f8f368 /src/core/hle/kernel | |
parent | kernel: use KScheduler from mesosphere (diff) | |
download | yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.gz yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.bz2 yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.lz yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.xz yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.zst yuzu-21945ae127480c8332c1110ceada2df4a42a5848.zip |
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 173 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 24 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.h | 24 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 19 | ||||
-rw-r--r-- | src/core/hle/kernel/physical_core.cpp | 1 |
7 files changed, 120 insertions, 131 deletions
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 21fd5cb67..65576b8c4 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -42,11 +42,6 @@ void GlobalSchedulerContext::PreemptThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { const u32 priority = preemption_priorities[core_id]; KScheduler::RotateScheduledQueue(kernel, core_id, priority); - - // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result - // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system - // interrupts that may have occurred. - kernel.PhysicalCore(core_id).Interrupt(); } } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 13915dbd9..cac96a780 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -28,9 +28,9 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { } KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { - m_idle_stack = std::make_shared<Common::Fiber>([this] { + m_switch_fiber = std::make_shared<Common::Fiber>([this] { while (true) { - ScheduleImplOffStack(); + ScheduleImplFiber(); } }); @@ -60,9 +60,9 @@ void KScheduler::DisableScheduling(KernelCore& kernel) { void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); - auto* scheduler = kernel.CurrentScheduler(); + auto* scheduler{kernel.CurrentScheduler()}; - if (!scheduler) { + if (!scheduler || kernel.IsPhantomModeForSingleCore()) { // HACK: we cannot schedule from this thread, it is not a core thread RescheduleCores(kernel, cores_needing_scheduling); if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) { @@ -125,9 +125,9 @@ void KScheduler::RescheduleCurrentCoreImpl() { } } -void KScheduler::Initialize(KThread* idle_thread) { +void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) { // Set core ID/idle thread/interrupt task manager. - m_core_id = GetCurrentCoreId(kernel); + m_core_id = core_id; m_idle_thread = idle_thread; // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); @@ -142,10 +142,10 @@ void KScheduler::Initialize(KThread* idle_thread) { // Bind interrupt handler. // kernel.GetInterruptManager().BindHandler( // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, - // KInterruptController::PriorityLevel_Scheduler, false, false); + // KInterruptController::PriorityLevel::Scheduler, false, false); // Set the current thread. - m_current_thread = GetCurrentThreadPointer(kernel); + m_current_thread = main_thread; } void KScheduler::Activate() { @@ -156,6 +156,10 @@ void KScheduler::Activate() { RescheduleCurrentCore(); } +void KScheduler::OnThreadStart() { + GetCurrentThread(kernel).EnableDispatch(); +} + u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { if (KThread* prev_highest_thread = m_state.highest_priority_thread; prev_highest_thread != highest_thread) [[likely]] { @@ -372,37 +376,30 @@ void KScheduler::ScheduleImpl() { } // The highest priority thread is not the same as the current thread. - // Switch to the idle thread stack and continue executing from there. - m_idle_cur_thread = cur_thread; - m_idle_highest_priority_thread = highest_priority_thread; - Common::Fiber::YieldTo(cur_thread->host_context, *m_idle_stack); + // Jump to the switcher and continue executing from there. + m_switch_cur_thread = cur_thread; + m_switch_highest_priority_thread = highest_priority_thread; + m_switch_from_schedule = true; + Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); // Returning from ScheduleImpl occurs after this thread has been scheduled again. } -void KScheduler::ScheduleImplOffStack() { - KThread* const cur_thread{m_idle_cur_thread}; - KThread* highest_priority_thread{m_idle_highest_priority_thread}; +void KScheduler::ScheduleImplFiber() { + KThread* const cur_thread{m_switch_cur_thread}; + KThread* highest_priority_thread{m_switch_highest_priority_thread}; - // Get a reference to the current thread's stack parameters. - auto& sp{cur_thread->GetStackParameters()}; - - // Save the original thread context. - { - auto& physical_core = kernel.System().CurrentPhysicalCore(); - auto& cpu_core = physical_core.ArmInterface(); - cpu_core.SaveContext(cur_thread->GetContext32()); - cpu_core.SaveContext(cur_thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - cur_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); + // If we're not coming from scheduling (i.e., we came from SC preemption), + // we should restart the scheduling loop directly. Not accurate to HOS. + if (!m_switch_from_schedule) { + goto retry; } - // Check if the thread is terminated by checking the DPC flags. - if ((sp.dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { - // The thread isn't terminated, so we want to unlock it. - sp.m_lock.store(false, std::memory_order_seq_cst); - } + // Mark that we are not coming from scheduling anymore. + m_switch_from_schedule = false; + + // Save the original thread context. + Unload(cur_thread); // The current thread's context has been entirely taken care of. // Now we want to loop until we successfully switch the thread context. @@ -411,62 +408,39 @@ void KScheduler::ScheduleImplOffStack() { // Check if the highest priority thread is null. if (!highest_priority_thread) { // The next thread is nullptr! - // Switch to nullptr. This will actually switch to the idle thread. - SwitchThread(nullptr); - - // We've switched to the idle thread, so we want to process interrupt tasks until we - // schedule a non-idle thread. - while (!m_state.interrupt_task_runnable) { - // Check if we need scheduling. - if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { - goto retry; - } - // Clear the previous thread. - m_state.prev_thread = nullptr; + // Switch to the idle thread. Note: HOS treats idling as a special case for + // performance. This is not *required* for yuzu's purposes, and for singlecore + // compatibility, we can just move the logic that would go here into the execution + // of the idle thread. If we ever remove singlecore, we should implement this + // accurately to HOS. + highest_priority_thread = m_idle_thread; + } - // Wait for an interrupt before checking again. - kernel.System().GetCpuManager().WaitForAndHandleInterrupt(); + // We want to try to lock the highest priority thread's context. + // Try to take it. + while (!highest_priority_thread->context_guard.try_lock()) { + // The highest priority thread's context is already locked. + // Check if we need scheduling. If we don't, we can retry directly. + if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { + // If we do, another core is interfering, and we must start again. + goto retry; } + } - // Execute any pending interrupt tasks. - // m_state.interrupt_task_manager->DoTasks(); - - // Clear the interrupt task thread as runnable. - m_state.interrupt_task_runnable = false; + // It's time to switch the thread. + // Switch to the highest priority thread. + SwitchThread(highest_priority_thread); - // Retry the scheduling loop. + // Check if we need scheduling. If we do, then we can't complete the switch and should + // retry. + if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { + // Our switch failed. + // We should unlock the thread context, and then retry. + highest_priority_thread->context_guard.unlock(); goto retry; } else { - // We want to try to lock the highest priority thread's context. - // Try to take it. - bool expected{false}; - while (!highest_priority_thread->stack_parameters.m_lock.compare_exchange_strong( - expected, true, std::memory_order_seq_cst)) { - // The highest priority thread's context is already locked. - // Check if we need scheduling. If we don't, we can retry directly. - if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { - // If we do, another core is interfering, and we must start again. - goto retry; - } - expected = false; - } - - // It's time to switch the thread. - // Switch to the highest priority thread. - SwitchThread(highest_priority_thread); - - // Check if we need scheduling. If we do, then we can't complete the switch and should - // retry. - if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { - // Our switch failed. - // We should unlock the thread context, and then retry. - highest_priority_thread->stack_parameters.m_lock.store(false, - std::memory_order_seq_cst); - goto retry; - } else { - break; - } + break; } retry: @@ -480,18 +454,35 @@ void KScheduler::ScheduleImplOffStack() { } // Reload the guest thread context. - { - auto& cpu_core = kernel.System().CurrentArmInterface(); - cpu_core.LoadContext(highest_priority_thread->GetContext32()); - cpu_core.LoadContext(highest_priority_thread->GetContext64()); - cpu_core.SetTlsAddress(highest_priority_thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(highest_priority_thread->GetTPIDR_EL0()); - cpu_core.LoadWatchpointArray(highest_priority_thread->GetOwnerProcess()->GetWatchpoints()); - cpu_core.ClearExclusiveState(); - } + Reload(highest_priority_thread); // Reload the host thread. - Common::Fiber::YieldTo(m_idle_stack, *highest_priority_thread->host_context); + Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); +} + +void KScheduler::Unload(KThread* thread) { + auto& cpu_core = kernel.System().ArmInterface(m_core_id); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + + // Check if the thread is terminated by checking the DPC flags. + if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { + // The thread isn't terminated, so we want to unlock it. + thread->context_guard.unlock(); + } +} + +void KScheduler::Reload(KThread* thread) { + auto& cpu_core = kernel.System().ArmInterface(m_core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); + cpu_core.ClearExclusiveState(); } void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 8f4eebf6a..91e870933 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -41,8 +41,11 @@ public: explicit KScheduler(KernelCore& kernel); ~KScheduler(); - void Initialize(KThread* idle_thread); + void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id); void Activate(); + void OnThreadStart(); + void Unload(KThread* thread); + void Reload(KThread* thread); void SetInterruptTaskRunnable(); void RequestScheduleOnInterrupt(); @@ -55,6 +58,14 @@ public: return m_idle_thread; } + bool IsIdle() const { + return m_current_thread.load() == m_idle_thread; + } + + std::shared_ptr<Common::Fiber> GetSwitchFiber() { + return m_switch_fiber; + } + KThread* GetPreviousThread() const { return m_state.prev_thread; } @@ -69,7 +80,7 @@ public: // Static public API. static bool CanSchedule(KernelCore& kernel) { - return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() == 0; + return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; } static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); @@ -113,7 +124,7 @@ private: // Instanced private API. void ScheduleImpl(); - void ScheduleImplOffStack(); + void ScheduleImplFiber(); void SwitchThread(KThread* next_thread); void Schedule(); @@ -147,9 +158,10 @@ private: KThread* m_idle_thread{nullptr}; std::atomic<KThread*> m_current_thread{nullptr}; - std::shared_ptr<Common::Fiber> m_idle_stack{}; - KThread* m_idle_cur_thread{}; - KThread* m_idle_highest_priority_thread{}; + std::shared_ptr<Common::Fiber> m_switch_fiber{}; + KThread* m_switch_cur_thread{}; + KThread* m_switch_highest_priority_thread{}; + bool m_switch_from_schedule{}; }; class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9daa589b5..d5d390f04 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -268,7 +268,7 @@ Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - abort); + system.GetCpuManager().GetIdleThreadStartFunc()); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, @@ -1204,8 +1204,9 @@ KScopedDisableDispatch::~KScopedDisableDispatch() { return; } - // Skip the reschedule if single-core, as dispatch tracking is disabled here. + // Skip the reschedule if single-core. if (!Settings::values.use_multi_core.GetValue()) { + GetCurrentThread(kernel).EnableDispatch(); return; } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 416a861a9..1fc8f5f3e 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -439,7 +439,6 @@ public: bool is_pinned; s32 disable_count; KThread* cur_thread; - std::atomic<bool> m_lock; }; [[nodiscard]] StackParameters& GetStackParameters() { @@ -485,39 +484,16 @@ public: return per_core_priority_queue_entry[core]; } - [[nodiscard]] bool IsKernelThread() const { - return GetActiveCore() == 3; - } - - [[nodiscard]] bool IsDispatchTrackingDisabled() const { - return is_single_core || IsKernelThread(); - } - [[nodiscard]] s32 GetDisableDispatchCount() const { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return 1; - } - return this->GetStackParameters().disable_count; } void DisableDispatch() { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return; - } - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return; - } - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 10e1f47f6..926c6dc84 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -64,8 +64,6 @@ struct KernelCore::Impl { is_phantom_mode_for_singlecore = false; - InitializePhysicalCores(); - // Derive the initial memory layout from the emulated board Init::InitializeSlabResourceCounts(kernel); DeriveInitialMemoryLayout(); @@ -77,6 +75,7 @@ struct KernelCore::Impl { Init::InitializeKPageBufferSlabHeap(system); InitializeShutdownThreads(); InitializePreemption(kernel); + InitializePhysicalCores(); RegisterHostThread(); } @@ -193,8 +192,21 @@ struct KernelCore::Impl { exclusive_monitor = Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { + const s32 core{static_cast<s32>(i)}; + schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); cores.emplace_back(i, system, *schedulers[i], interrupts); + + auto* main_thread{Kernel::KThread::Create(system.Kernel())}; + main_thread->SetName(fmt::format("MainThread:{}", core)); + main_thread->SetCurrentCore(core); + ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); + + auto* idle_thread{Kernel::KThread::Create(system.Kernel())}; + idle_thread->SetCurrentCore(core); + ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess()); + + schedulers[i]->Initialize(main_thread, idle_thread, core); } } @@ -1093,10 +1105,11 @@ void KernelCore::Suspend(bool suspended) { } void KernelCore::ShutdownCores() { + KScopedSchedulerLock lk{*this}; + for (auto* thread : impl->shutdown_threads) { void(thread->Run()); } - InterruptAllPhysicalCores(); } bool KernelCore::IsMulticore() const { diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index a5b16ae2e..6e7dacf97 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -43,6 +43,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Run() { arm_interface->Run(); + arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { |