diff options
Diffstat (limited to '')
-rw-r--r-- | src/core/cpu_manager.cpp | 152 | ||||
-rw-r--r-- | src/core/cpu_manager.h | 11 | ||||
-rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 173 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 24 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.h | 24 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 19 | ||||
-rw-r--r-- | src/core/hle/kernel/physical_core.cpp | 1 |
9 files changed, 225 insertions, 189 deletions
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 428194129..838d6be21 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -42,19 +42,19 @@ void CpuManager::Shutdown() { } } -void CpuManager::GuestActivateFunction() { +void CpuManager::GuestThreadFunction() { if (is_multicore) { - MultiCoreGuestActivate(); + MultiCoreRunGuestThread(); } else { - SingleCoreGuestActivate(); + SingleCoreRunGuestThread(); } } -void CpuManager::GuestThreadFunction() { +void CpuManager::IdleThreadFunction() { if (is_multicore) { - MultiCoreRunGuestThread(); + MultiCoreRunIdleThread(); } else { - SingleCoreRunGuestThread(); + SingleCoreRunIdleThread(); } } @@ -62,19 +62,6 @@ void CpuManager::ShutdownThreadFunction() { ShutdownThread(); } -void CpuManager::WaitForAndHandleInterrupt() { - auto& kernel = system.Kernel(); - auto& physical_core = kernel.CurrentPhysicalCore(); - - ASSERT(Kernel::GetCurrentThread(kernel).GetDisableDispatchCount() == 1); - - if (!physical_core.IsInterrupted()) { - physical_core.Idle(); - } - - HandleInterrupt(); -} - void CpuManager::HandleInterrupt() { auto& kernel = system.Kernel(); auto core_index = kernel.CurrentPhysicalCoreIndex(); @@ -86,49 +73,121 @@ void CpuManager::HandleInterrupt() { /// MultiCore /// /////////////////////////////////////////////////////////////////////////////// -void CpuManager::MultiCoreGuestActivate() { - // Similar to the HorizonKernelMain callback in HOS +void CpuManager::MultiCoreRunGuestThread() { + // Similar to UserModeThreadStarter in HOS auto& kernel = system.Kernel(); - auto* scheduler = kernel.CurrentScheduler(); + kernel.CurrentScheduler()->OnThreadStart(); - scheduler->Activate(); - UNREACHABLE(); + while (true) { + auto* physical_core = &kernel.CurrentPhysicalCore(); + while (!physical_core->IsInterrupted()) { + physical_core->Run(); + physical_core = &kernel.CurrentPhysicalCore(); + } + + HandleInterrupt(); + } } -void CpuManager::MultiCoreRunGuestThread() { - // Similar to UserModeThreadStarter in HOS +void CpuManager::MultiCoreRunIdleThread() { + // Not accurate to HOS. Remove this entire method when singlecore is removed. + // See notes in KScheduler::ScheduleImpl for more information about why this + // is inaccurate. + auto& kernel = system.Kernel(); - auto* thread = kernel.GetCurrentEmuThread(); - thread->EnableDispatch(); + kernel.CurrentScheduler()->OnThreadStart(); + + while (true) { + auto& physical_core = kernel.CurrentPhysicalCore(); + if (!physical_core.IsInterrupted()) { + physical_core.Idle(); + } - MultiCoreRunGuestLoop(); + HandleInterrupt(); + } } -void CpuManager::MultiCoreRunGuestLoop() { +/////////////////////////////////////////////////////////////////////////////// +/// SingleCore /// +/////////////////////////////////////////////////////////////////////////////// + +void CpuManager::SingleCoreRunGuestThread() { auto& kernel = system.Kernel(); + kernel.CurrentScheduler()->OnThreadStart(); while (true) { auto* physical_core = &kernel.CurrentPhysicalCore(); - while (!physical_core->IsInterrupted()) { + if (!physical_core->IsInterrupted()) { physical_core->Run(); physical_core = &kernel.CurrentPhysicalCore(); } + kernel.SetIsPhantomModeForSingleCore(true); + system.CoreTiming().Advance(); + kernel.SetIsPhantomModeForSingleCore(false); + + PreemptSingleCore(); HandleInterrupt(); } } -/////////////////////////////////////////////////////////////////////////////// -/// SingleCore /// -/////////////////////////////////////////////////////////////////////////////// +void CpuManager::SingleCoreRunIdleThread() { + auto& kernel = system.Kernel(); + kernel.CurrentScheduler()->OnThreadStart(); + + while (true) { + PreemptSingleCore(false); + system.CoreTiming().AddTicks(1000U); + idle_count++; + HandleInterrupt(); + } +} -void CpuManager::SingleCoreGuestActivate() {} +void CpuManager::PreemptSingleCore(bool from_running_environment) { + { + auto& kernel = system.Kernel(); + auto& scheduler = kernel.Scheduler(current_core); + + Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread(); + if (idle_count >= 4 || from_running_environment) { + if (!from_running_environment) { + system.CoreTiming().Idle(); + idle_count = 0; + } + kernel.SetIsPhantomModeForSingleCore(true); + system.CoreTiming().Advance(); + kernel.SetIsPhantomModeForSingleCore(false); + } + current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); + system.CoreTiming().ResetTicks(); + scheduler.Unload(scheduler.GetSchedulerCurrentThread()); -void CpuManager::SingleCoreRunGuestThread() {} + auto& next_scheduler = kernel.Scheduler(current_core); -void CpuManager::SingleCoreRunGuestLoop() {} + // Disable dispatch. We're about to preempt this thread. + Kernel::KScopedDisableDispatch dd{kernel}; + Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.GetSwitchFiber()); + } -void CpuManager::PreemptSingleCore(bool from_running_enviroment) {} + // We've now been scheduled again, and we may have exchanged schedulers. + // Reload the scheduler in case it's different. + { + auto& scheduler = system.Kernel().Scheduler(current_core); + scheduler.Reload(scheduler.GetSchedulerCurrentThread()); + if (!scheduler.IsIdle()) { + idle_count = 0; + } + } +} + +void CpuManager::GuestActivate() { + // Similar to the HorizonKernelMain callback in HOS + auto& kernel = system.Kernel(); + auto* scheduler = kernel.CurrentScheduler(); + + scheduler->Activate(); + UNREACHABLE(); +} void CpuManager::ShutdownThread() { auto& kernel = system.Kernel(); @@ -168,20 +227,11 @@ void CpuManager::RunThread(std::size_t core) { } auto& kernel = system.Kernel(); + auto& scheduler = *kernel.CurrentScheduler(); + auto* thread = scheduler.GetSchedulerCurrentThread(); + Kernel::SetCurrentThread(kernel, thread); - auto* main_thread = Kernel::KThread::Create(kernel); - main_thread->SetName(fmt::format("MainThread:{}", core)); - ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, static_cast<s32>(core)) - .IsSuccess()); - - auto* idle_thread = Kernel::KThread::Create(kernel); - ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, static_cast<s32>(core)) - .IsSuccess()); - - kernel.SetCurrentEmuThread(main_thread); - kernel.CurrentScheduler()->Initialize(idle_thread); - - Common::Fiber::YieldTo(data.host_context, *main_thread->GetHostContext()); + Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext()); } } // namespace Core diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index 8143424af..835505b92 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h @@ -48,12 +48,11 @@ public: gpu_barrier->Sync(); } - void WaitForAndHandleInterrupt(); void Initialize(); void Shutdown(); std::function<void()> GetGuestActivateFunc() { - return [this] { GuestActivateFunction(); }; + return [this] { GuestActivate(); }; } std::function<void()> GetGuestThreadFunc() { return [this] { GuestThreadFunction(); }; @@ -72,21 +71,19 @@ public: } private: - void GuestActivateFunction(); void GuestThreadFunction(); void IdleThreadFunction(); void ShutdownThreadFunction(); - void MultiCoreGuestActivate(); void MultiCoreRunGuestThread(); - void MultiCoreRunGuestLoop(); + void MultiCoreRunIdleThread(); - void SingleCoreGuestActivate(); void SingleCoreRunGuestThread(); - void SingleCoreRunGuestLoop(); + void SingleCoreRunIdleThread(); static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); + void GuestActivate(); void HandleInterrupt(); void ShutdownThread(); void RunThread(std::size_t core); diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 21fd5cb67..65576b8c4 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -42,11 +42,6 @@ void GlobalSchedulerContext::PreemptThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { const u32 priority = preemption_priorities[core_id]; KScheduler::RotateScheduledQueue(kernel, core_id, priority); - - // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result - // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system - // interrupts that may have occurred. - kernel.PhysicalCore(core_id).Interrupt(); } } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 13915dbd9..cac96a780 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -28,9 +28,9 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { } KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { - m_idle_stack = std::make_shared<Common::Fiber>([this] { + m_switch_fiber = std::make_shared<Common::Fiber>([this] { while (true) { - ScheduleImplOffStack(); + ScheduleImplFiber(); } }); @@ -60,9 +60,9 @@ void KScheduler::DisableScheduling(KernelCore& kernel) { void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); - auto* scheduler = kernel.CurrentScheduler(); + auto* scheduler{kernel.CurrentScheduler()}; - if (!scheduler) { + if (!scheduler || kernel.IsPhantomModeForSingleCore()) { // HACK: we cannot schedule from this thread, it is not a core thread RescheduleCores(kernel, cores_needing_scheduling); if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) { @@ -125,9 +125,9 @@ void KScheduler::RescheduleCurrentCoreImpl() { } } -void KScheduler::Initialize(KThread* idle_thread) { +void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) { // Set core ID/idle thread/interrupt task manager. - m_core_id = GetCurrentCoreId(kernel); + m_core_id = core_id; m_idle_thread = idle_thread; // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); @@ -142,10 +142,10 @@ void KScheduler::Initialize(KThread* idle_thread) { // Bind interrupt handler. // kernel.GetInterruptManager().BindHandler( // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, - // KInterruptController::PriorityLevel_Scheduler, false, false); + // KInterruptController::PriorityLevel::Scheduler, false, false); // Set the current thread. - m_current_thread = GetCurrentThreadPointer(kernel); + m_current_thread = main_thread; } void KScheduler::Activate() { @@ -156,6 +156,10 @@ void KScheduler::Activate() { RescheduleCurrentCore(); } +void KScheduler::OnThreadStart() { + GetCurrentThread(kernel).EnableDispatch(); +} + u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { if (KThread* prev_highest_thread = m_state.highest_priority_thread; prev_highest_thread != highest_thread) [[likely]] { @@ -372,37 +376,30 @@ void KScheduler::ScheduleImpl() { } // The highest priority thread is not the same as the current thread. - // Switch to the idle thread stack and continue executing from there. - m_idle_cur_thread = cur_thread; - m_idle_highest_priority_thread = highest_priority_thread; - Common::Fiber::YieldTo(cur_thread->host_context, *m_idle_stack); + // Jump to the switcher and continue executing from there. + m_switch_cur_thread = cur_thread; + m_switch_highest_priority_thread = highest_priority_thread; + m_switch_from_schedule = true; + Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); // Returning from ScheduleImpl occurs after this thread has been scheduled again. } -void KScheduler::ScheduleImplOffStack() { - KThread* const cur_thread{m_idle_cur_thread}; - KThread* highest_priority_thread{m_idle_highest_priority_thread}; +void KScheduler::ScheduleImplFiber() { + KThread* const cur_thread{m_switch_cur_thread}; + KThread* highest_priority_thread{m_switch_highest_priority_thread}; - // Get a reference to the current thread's stack parameters. - auto& sp{cur_thread->GetStackParameters()}; - - // Save the original thread context. - { - auto& physical_core = kernel.System().CurrentPhysicalCore(); - auto& cpu_core = physical_core.ArmInterface(); - cpu_core.SaveContext(cur_thread->GetContext32()); - cpu_core.SaveContext(cur_thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - cur_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); + // If we're not coming from scheduling (i.e., we came from SC preemption), + // we should restart the scheduling loop directly. Not accurate to HOS. + if (!m_switch_from_schedule) { + goto retry; } - // Check if the thread is terminated by checking the DPC flags. - if ((sp.dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { - // The thread isn't terminated, so we want to unlock it. - sp.m_lock.store(false, std::memory_order_seq_cst); - } + // Mark that we are not coming from scheduling anymore. + m_switch_from_schedule = false; + + // Save the original thread context. + Unload(cur_thread); // The current thread's context has been entirely taken care of. // Now we want to loop until we successfully switch the thread context. @@ -411,62 +408,39 @@ void KScheduler::ScheduleImplOffStack() { // Check if the highest priority thread is null. if (!highest_priority_thread) { // The next thread is nullptr! - // Switch to nullptr. This will actually switch to the idle thread. - SwitchThread(nullptr); - - // We've switched to the idle thread, so we want to process interrupt tasks until we - // schedule a non-idle thread. - while (!m_state.interrupt_task_runnable) { - // Check if we need scheduling. - if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { - goto retry; - } - // Clear the previous thread. - m_state.prev_thread = nullptr; + // Switch to the idle thread. Note: HOS treats idling as a special case for + // performance. This is not *required* for yuzu's purposes, and for singlecore + // compatibility, we can just move the logic that would go here into the execution + // of the idle thread. If we ever remove singlecore, we should implement this + // accurately to HOS. + highest_priority_thread = m_idle_thread; + } - // Wait for an interrupt before checking again. - kernel.System().GetCpuManager().WaitForAndHandleInterrupt(); + // We want to try to lock the highest priority thread's context. + // Try to take it. + while (!highest_priority_thread->context_guard.try_lock()) { + // The highest priority thread's context is already locked. + // Check if we need scheduling. If we don't, we can retry directly. + if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { + // If we do, another core is interfering, and we must start again. + goto retry; } + } - // Execute any pending interrupt tasks. - // m_state.interrupt_task_manager->DoTasks(); - - // Clear the interrupt task thread as runnable. - m_state.interrupt_task_runnable = false; + // It's time to switch the thread. + // Switch to the highest priority thread. + SwitchThread(highest_priority_thread); - // Retry the scheduling loop. + // Check if we need scheduling. If we do, then we can't complete the switch and should + // retry. + if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { + // Our switch failed. + // We should unlock the thread context, and then retry. + highest_priority_thread->context_guard.unlock(); goto retry; } else { - // We want to try to lock the highest priority thread's context. - // Try to take it. - bool expected{false}; - while (!highest_priority_thread->stack_parameters.m_lock.compare_exchange_strong( - expected, true, std::memory_order_seq_cst)) { - // The highest priority thread's context is already locked. - // Check if we need scheduling. If we don't, we can retry directly. - if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { - // If we do, another core is interfering, and we must start again. - goto retry; - } - expected = false; - } - - // It's time to switch the thread. - // Switch to the highest priority thread. - SwitchThread(highest_priority_thread); - - // Check if we need scheduling. If we do, then we can't complete the switch and should - // retry. - if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { - // Our switch failed. - // We should unlock the thread context, and then retry. - highest_priority_thread->stack_parameters.m_lock.store(false, - std::memory_order_seq_cst); - goto retry; - } else { - break; - } + break; } retry: @@ -480,18 +454,35 @@ void KScheduler::ScheduleImplOffStack() { } // Reload the guest thread context. - { - auto& cpu_core = kernel.System().CurrentArmInterface(); - cpu_core.LoadContext(highest_priority_thread->GetContext32()); - cpu_core.LoadContext(highest_priority_thread->GetContext64()); - cpu_core.SetTlsAddress(highest_priority_thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(highest_priority_thread->GetTPIDR_EL0()); - cpu_core.LoadWatchpointArray(highest_priority_thread->GetOwnerProcess()->GetWatchpoints()); - cpu_core.ClearExclusiveState(); - } + Reload(highest_priority_thread); // Reload the host thread. - Common::Fiber::YieldTo(m_idle_stack, *highest_priority_thread->host_context); + Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); +} + +void KScheduler::Unload(KThread* thread) { + auto& cpu_core = kernel.System().ArmInterface(m_core_id); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + + // Check if the thread is terminated by checking the DPC flags. + if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { + // The thread isn't terminated, so we want to unlock it. + thread->context_guard.unlock(); + } +} + +void KScheduler::Reload(KThread* thread) { + auto& cpu_core = kernel.System().ArmInterface(m_core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); + cpu_core.ClearExclusiveState(); } void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 8f4eebf6a..91e870933 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -41,8 +41,11 @@ public: explicit KScheduler(KernelCore& kernel); ~KScheduler(); - void Initialize(KThread* idle_thread); + void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id); void Activate(); + void OnThreadStart(); + void Unload(KThread* thread); + void Reload(KThread* thread); void SetInterruptTaskRunnable(); void RequestScheduleOnInterrupt(); @@ -55,6 +58,14 @@ public: return m_idle_thread; } + bool IsIdle() const { + return m_current_thread.load() == m_idle_thread; + } + + std::shared_ptr<Common::Fiber> GetSwitchFiber() { + return m_switch_fiber; + } + KThread* GetPreviousThread() const { return m_state.prev_thread; } @@ -69,7 +80,7 @@ public: // Static public API. static bool CanSchedule(KernelCore& kernel) { - return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() == 0; + return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; } static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); @@ -113,7 +124,7 @@ private: // Instanced private API. void ScheduleImpl(); - void ScheduleImplOffStack(); + void ScheduleImplFiber(); void SwitchThread(KThread* next_thread); void Schedule(); @@ -147,9 +158,10 @@ private: KThread* m_idle_thread{nullptr}; std::atomic<KThread*> m_current_thread{nullptr}; - std::shared_ptr<Common::Fiber> m_idle_stack{}; - KThread* m_idle_cur_thread{}; - KThread* m_idle_highest_priority_thread{}; + std::shared_ptr<Common::Fiber> m_switch_fiber{}; + KThread* m_switch_cur_thread{}; + KThread* m_switch_highest_priority_thread{}; + bool m_switch_from_schedule{}; }; class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9daa589b5..d5d390f04 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -268,7 +268,7 @@ Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - abort); + system.GetCpuManager().GetIdleThreadStartFunc()); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, @@ -1204,8 +1204,9 @@ KScopedDisableDispatch::~KScopedDisableDispatch() { return; } - // Skip the reschedule if single-core, as dispatch tracking is disabled here. + // Skip the reschedule if single-core. if (!Settings::values.use_multi_core.GetValue()) { + GetCurrentThread(kernel).EnableDispatch(); return; } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 416a861a9..1fc8f5f3e 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -439,7 +439,6 @@ public: bool is_pinned; s32 disable_count; KThread* cur_thread; - std::atomic<bool> m_lock; }; [[nodiscard]] StackParameters& GetStackParameters() { @@ -485,39 +484,16 @@ public: return per_core_priority_queue_entry[core]; } - [[nodiscard]] bool IsKernelThread() const { - return GetActiveCore() == 3; - } - - [[nodiscard]] bool IsDispatchTrackingDisabled() const { - return is_single_core || IsKernelThread(); - } - [[nodiscard]] s32 GetDisableDispatchCount() const { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return 1; - } - return this->GetStackParameters().disable_count; } void DisableDispatch() { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return; - } - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { - if (IsDispatchTrackingDisabled()) { - // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. - return; - } - ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 10e1f47f6..926c6dc84 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -64,8 +64,6 @@ struct KernelCore::Impl { is_phantom_mode_for_singlecore = false; - InitializePhysicalCores(); - // Derive the initial memory layout from the emulated board Init::InitializeSlabResourceCounts(kernel); DeriveInitialMemoryLayout(); @@ -77,6 +75,7 @@ struct KernelCore::Impl { Init::InitializeKPageBufferSlabHeap(system); InitializeShutdownThreads(); InitializePreemption(kernel); + InitializePhysicalCores(); RegisterHostThread(); } @@ -193,8 +192,21 @@ struct KernelCore::Impl { exclusive_monitor = Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { + const s32 core{static_cast<s32>(i)}; + schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); cores.emplace_back(i, system, *schedulers[i], interrupts); + + auto* main_thread{Kernel::KThread::Create(system.Kernel())}; + main_thread->SetName(fmt::format("MainThread:{}", core)); + main_thread->SetCurrentCore(core); + ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); + + auto* idle_thread{Kernel::KThread::Create(system.Kernel())}; + idle_thread->SetCurrentCore(core); + ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess()); + + schedulers[i]->Initialize(main_thread, idle_thread, core); } } @@ -1093,10 +1105,11 @@ void KernelCore::Suspend(bool suspended) { } void KernelCore::ShutdownCores() { + KScopedSchedulerLock lk{*this}; + for (auto* thread : impl->shutdown_threads) { void(thread->Run()); } - InterruptAllPhysicalCores(); } bool KernelCore::IsMulticore() const { diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index a5b16ae2e..6e7dacf97 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -43,6 +43,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Run() { arm_interface->Run(); + arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { |