diff options
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r-- | src/core/hle/kernel/k_address_arbiter.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_handle_table.cpp | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/k_handle_table.h | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_process.cpp | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 85 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 21 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.h | 36 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 57 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.h | 3 | ||||
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 2 |
13 files changed, 151 insertions, 74 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 1b429bc1e..6771ef621 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index e4fcdbc67..165b76747 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -170,6 +170,10 @@ public: } } + const std::string& GetName() const { + return name; + } + private: void RegisterWithKernel(); void UnregisterWithKernel(); diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index ef14ad1d2..4174f35fd 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, u32 new_orr_mask) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // Load the value from the address. const auto expected = monitor.ExclusiveRead32(current_core, address); diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index 6a420d5b0..d720c2dda 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() { // Get the table and clear our record of it. u16 saved_table_size = 0; { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); std::swap(m_table_size, saved_table_size); @@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) { // Find the object and free the entry. KAutoObject* obj = nullptr; { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if (this->IsValidHandle(handle)) { @@ -61,6 +63,7 @@ bool KHandleTable::Remove(Handle handle) { } ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -83,6 +86,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { } ResultCode KHandleTable::Reserve(Handle* out_handle) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -93,6 +97,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { } void KHandleTable::Unreserve(Handle handle) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. @@ -111,6 +116,7 @@ void KHandleTable::Unreserve(Handle handle) { } void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 2ff6aa160..75dcec7df 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h @@ -69,6 +69,7 @@ public: template <typename T = KAutoObject> KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { // Lock and look up in table. + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if constexpr (std::is_same_v<T, KAutoObject>) { @@ -123,6 +124,7 @@ public: size_t num_opened; { // Lock the table. + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); for (num_opened = 0; num_opened < num_handles; num_opened++) { // Get the current handle. diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8ead1a769..3d7e6707e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -59,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle; + thread->DisableDispatch(); auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6a7d80d03..6ddbae52c 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -376,20 +376,18 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { } void KScheduler::DisableScheduling(KernelCore& kernel) { - if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); - scheduler->GetCurrentThread()->DisableDispatch(); - } + ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); + GetCurrentThreadPointer(kernel)->DisableDispatch(); } void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { - if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); - if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { - scheduler->GetCurrentThread()->EnableDispatch(); - } + ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); + + if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { + GetCurrentThreadPointer(kernel)->EnableDispatch(); + } else { + RescheduleCores(kernel, cores_needing_scheduling); } - RescheduleCores(kernel, cores_needing_scheduling); } u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { @@ -617,13 +615,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c state.highest_priority_thread = nullptr; } -KScheduler::~KScheduler() { +void KScheduler::Finalize() { if (idle_thread) { idle_thread->Close(); idle_thread = nullptr; } } +KScheduler::~KScheduler() { + ASSERT(!idle_thread); +} + KThread* KScheduler::GetCurrentThread() const { if (auto result = current_thread.load(); result) { return result; @@ -642,10 +644,12 @@ void KScheduler::RescheduleCurrentCore() { if (phys_core.IsInterrupted()) { phys_core.ClearInterrupt(); } + guard.Lock(); if (state.needs_scheduling.load()) { Schedule(); } else { + GetCurrentThread()->EnableDispatch(); guard.Unlock(); } } @@ -655,26 +659,33 @@ void KScheduler::OnThreadStart() { } void KScheduler::Unload(KThread* thread) { + ASSERT(thread); + LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); - if (thread) { - if (thread->IsCallingSvc()) { - thread->ClearIsCallingSvc(); - } - if (!thread->IsTerminationRequested()) { - prev_thread = thread; - - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.SaveContext(thread->GetContext32()); - cpu_core.SaveContext(thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } else { - prev_thread = nullptr; - } - thread->context_guard.Unlock(); + if (thread->IsCallingSvc()) { + thread->ClearIsCallingSvc(); } + + auto& physical_core = system.Kernel().PhysicalCore(core_id); + if (!physical_core.IsInitialized()) { + return; + } + + Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + + if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { + prev_thread = thread; + } else { + prev_thread = nullptr; + } + + thread->context_guard.Unlock(); } void KScheduler::Reload(KThread* thread) { @@ -683,11 +694,6 @@ void KScheduler::Reload(KThread* thread) { if (thread) { ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); - auto* const thread_owner_process = thread->GetOwnerProcess(); - if (thread_owner_process != nullptr) { - system.Kernel().MakeCurrentProcess(thread_owner_process); - } - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); @@ -705,7 +711,7 @@ void KScheduler::SwitchContextStep2() { } void KScheduler::ScheduleImpl() { - KThread* previous_thread = current_thread.load(); + KThread* previous_thread = GetCurrentThread(); KThread* next_thread = state.highest_priority_thread; state.needs_scheduling = false; @@ -717,10 +723,15 @@ void KScheduler::ScheduleImpl() { // If we're not actually switching thread, there's nothing to do. if (next_thread == current_thread.load()) { + previous_thread->EnableDispatch(); guard.Unlock(); return; } + if (next_thread->GetCurrentCore() != core_id) { + next_thread->SetCurrentCore(core_id); + } + current_thread.store(next_thread); KProcess* const previous_process = system.Kernel().CurrentProcess(); @@ -731,11 +742,7 @@ void KScheduler::ScheduleImpl() { Unload(previous_thread); std::shared_ptr<Common::Fiber>* old_context; - if (previous_thread != nullptr) { - old_context = &previous_thread->GetHostContext(); - } else { - old_context = &idle_thread->GetHostContext(); - } + old_context = &previous_thread->GetHostContext(); guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 12cfae919..516e0cdba 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -33,6 +33,8 @@ public: explicit KScheduler(Core::System& system_, s32 core_id_); ~KScheduler(); + void Finalize(); + /// Reschedules to the next available thread (call after current thread is suspended) void RescheduleCurrentCore(); diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9f1d3156b..0f6808ade 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -14,6 +14,7 @@ #include "common/fiber.h" #include "common/logging/log.h" #include "common/scope_exit.h" +#include "common/settings.h" #include "common/thread_queue_list.h" #include "core/core.h" #include "core/cpu_manager.h" @@ -188,7 +189,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s // Setup the stack parameters. StackParameters& sp = GetStackParameters(); sp.cur_thread = this; - sp.disable_count = 1; + sp.disable_count = 0; SetInExceptionHandler(); // Set thread ID. @@ -215,9 +216,10 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint // Initialize the thread. R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); - // Initialize host context. + // Initialize emulation parameters. thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); + thread->is_single_core = !Settings::values.use_multi_core.GetValue(); return ResultSuccess; } @@ -970,6 +972,9 @@ ResultCode KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); + + DisableDispatch(); + return ResultSuccess; } } @@ -1054,4 +1059,16 @@ s32 GetCurrentCoreId(KernelCore& kernel) { return GetCurrentThread(kernel).GetCurrentCore(); } +KScopedDisableDispatch::~KScopedDisableDispatch() { + if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { + auto scheduler = kernel.CurrentScheduler(); + + if (scheduler) { + scheduler->RescheduleCurrentCore(); + } + } else { + GetCurrentThread(kernel).EnableDispatch(); + } +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c77f44ad4..e4c4c877d 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -450,16 +450,39 @@ public: sleeping_queue = q; } + [[nodiscard]] bool IsKernelThread() const { + return GetActiveCore() == 3; + } + + [[nodiscard]] bool IsDispatchTrackingDisabled() const { + return is_single_core || IsKernelThread(); + } + [[nodiscard]] s32 GetDisableDispatchCount() const { + if (IsDispatchTrackingDisabled()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return 1; + } + return this->GetStackParameters().disable_count; } void DisableDispatch() { + if (IsDispatchTrackingDisabled()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return; + } + ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { + if (IsDispatchTrackingDisabled()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return; + } + ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } @@ -708,6 +731,7 @@ private: // For emulation std::shared_ptr<Common::Fiber> host_context{}; + bool is_single_core{}; // For debugging std::vector<KSynchronizationObject*> wait_objects_for_debugging; @@ -752,4 +776,16 @@ public: } }; +class KScopedDisableDispatch { +public: + [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { + GetCurrentThread(kernel).DisableDispatch(); + } + + ~KScopedDisableDispatch(); + +private: + KernelCore& kernel; +}; + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 92fbc5532..8673384ee 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -85,8 +85,9 @@ struct KernelCore::Impl { } void InitializeCores() { - for (auto& core : cores) { - core.Initialize(current_process->Is64BitProcess()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + cores[core_id].Initialize(current_process->Is64BitProcess()); + system.Memory().SetCurrentPageTable(*current_process, core_id); } } @@ -131,15 +132,6 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - if (suspend_threads[core_id]) { - suspend_threads[core_id]->Close(); - suspend_threads[core_id] = nullptr; - } - - schedulers[core_id].reset(); - } - cores.clear(); global_handle_table->Finalize(); @@ -167,6 +159,16 @@ struct KernelCore::Impl { CleanupObject(time_shared_mem); CleanupObject(system_resource_limit); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + if (suspend_threads[core_id]) { + suspend_threads[core_id]->Close(); + suspend_threads[core_id] = nullptr; + } + + schedulers[core_id]->Finalize(); + schedulers[core_id].reset(); + } + // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others next_host_thread_id = Core::Hardware::NUM_CPU_CORES; @@ -257,14 +259,6 @@ struct KernelCore::Impl { void MakeCurrentProcess(KProcess* process) { current_process = process; - if (process == nullptr) { - return; - } - - const u32 core_id = GetCurrentHostThreadID(); - if (core_id < Core::Hardware::NUM_CPU_CORES) { - system.Memory().SetCurrentPageTable(*process, core_id); - } } /// Creates a new host thread ID, should only be called by GetHostThreadId @@ -824,16 +818,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { return impl->cores[id]; } +size_t KernelCore::CurrentPhysicalCoreIndex() const { + const u32 core_id = impl->GetCurrentHostThreadID(); + if (core_id >= Core::Hardware::NUM_CPU_CORES) { + return Core::Hardware::NUM_CPU_CORES - 1; + } + return core_id; +} + Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return impl->cores[core_id]; + return impl->cores[CurrentPhysicalCoreIndex()]; } const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return impl->cores[core_id]; + return impl->cores[CurrentPhysicalCoreIndex()]; } Kernel::KScheduler* KernelCore::CurrentScheduler() { @@ -1026,6 +1024,9 @@ void KernelCore::Suspend(bool in_suspention) { impl->suspend_threads[core_id]->SetState(state); impl->suspend_threads[core_id]->SetWaitReasonForDebugging( ThreadWaitReasonForDebugging::Suspended); + if (!should_suspend) { + impl->suspend_threads[core_id]->DisableDispatch(); + } } } } @@ -1040,13 +1041,11 @@ void KernelCore::ExceptionalExit() { } void KernelCore::EnterSVCProfile() { - std::size_t core = impl->GetCurrentHostThreadID(); - impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); + impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); } void KernelCore::ExitSVCProfile() { - std::size_t core = impl->GetCurrentHostThreadID(); - MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); + MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); } std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 3a6db0b1c..57535433b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -146,6 +146,9 @@ public: /// Gets the an instance of the respective physical CPU core. const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; + /// Gets the current physical core index for the running host thread. + std::size_t CurrentPhysicalCoreIndex() const; + /// Gets the sole instance of the Scheduler at the current running core. Kernel::KScheduler* CurrentScheduler(); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2eb532472..a90b291da 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle const u64 thread_ticks = current_thread->GetCpuTime(); out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); - } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { + } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; } |