diff options
author | bunnei <bunneidev@gmail.com> | 2021-01-20 06:05:24 +0100 |
---|---|---|
committer | bunnei <bunneidev@gmail.com> | 2021-01-29 06:42:26 +0100 |
commit | c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b (patch) | |
tree | e138e7d0ecb6a306261e2871fd0da405571deaab | |
parent | common: common_funcs: Add useful kernel macro R_SUCCEED_IF. (diff) | |
download | yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.gz yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.bz2 yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.lz yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.xz yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.zst yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.zip |
-rw-r--r-- | src/core/hle/kernel/k_address_arbiter.cpp | 16 | ||||
-rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 14 | ||||
-rw-r--r-- | src/core/hle/kernel/k_synchronization_object.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 40 | ||||
-rw-r--r-- | src/core/hle/kernel/time_manager.h | 8 |
6 files changed, 25 insertions, 69 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 282f02257..1685d25bb 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -232,10 +232,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); - Handle timer = InvalidHandle; { - KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); + KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { @@ -280,10 +279,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement } // Cancel the timer wait. - if (timer != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(timer); - } + kernel.TimeManager().UnscheduleTimeEvent(cur_thread); // Remove from the address arbiter. { @@ -303,10 +299,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); - Handle timer = InvalidHandle; { - KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); + KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { @@ -344,10 +339,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { } // Cancel the timer wait. - if (timer != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(timer); - } + kernel.TimeManager().UnscheduleTimeEvent(cur_thread); // Remove from the address arbiter. { diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 2fa2d5289..f0ad8b390 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -258,10 +258,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); - Handle timer = InvalidHandle; { - KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); + KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; // Set the synced object. cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); @@ -322,10 +321,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) } // Cancel the timer wait. - if (timer != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(timer); - } + kernel.TimeManager().UnscheduleTimeEvent(cur_thread); // Remove from the condition variable. { diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index fac39aeb7..f8189e107 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -17,19 +17,16 @@ namespace Kernel { class KScopedSchedulerLockAndSleep { public: - explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, KThread* t, - s64 timeout) - : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { - event_handle = InvalidHandle; - + explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout) + : kernel(kernel), thread(t), timeout_tick(timeout) { // Lock the scheduler. kernel.GlobalSchedulerContext().scheduler_lock.Lock(); } ~KScopedSchedulerLockAndSleep() { // Register the sleep. - if (this->timeout_tick > 0) { - kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); + if (timeout_tick > 0) { + kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick); } // Unlock the scheduler. @@ -37,12 +34,11 @@ public: } void CancelSleep() { - this->timeout_tick = 0; + timeout_tick = 0; } private: KernelCore& kernel; - Handle& event_handle; KThread* thread{}; s64 timeout_tick{}; }; diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 18e7026f5..a3b34f82f 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -21,11 +21,10 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, // Prepare for wait. KThread* thread = kernel.CurrentScheduler()->GetCurrentThread(); - Handle timer = InvalidHandle; { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout); + KScopedSchedulerLockAndSleep slp{kernel, thread, timeout}; // Check if any of the objects are already signaled. for (auto i = 0; i < num_objects; ++i) { @@ -90,10 +89,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, thread->SetWaitObjectsForDebugging({}); // Cancel the timer as needed. - if (timer != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(timer); - } + kernel.TimeManager().UnscheduleTimeEvent(thread); // Get the wait result. ResultCode wait_result{RESULT_SUCCESS}; diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index aaeef3033..fd0630019 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -21,47 +21,27 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { std::shared_ptr<KThread> thread; { std::lock_guard lock{mutex}; - const auto proper_handle = static_cast<Handle>(thread_handle); - if (cancelled_events[proper_handle]) { - return; - } - thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); - } - - if (thread) { - // Thread can be null if process has exited - thread->Wakeup(); + thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle)); } + thread->Wakeup(); }); } -void TimeManager::ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds) { +void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { std::lock_guard lock{mutex}; - event_handle = timetask->GetGlobalHandle(); if (nanoseconds > 0) { - ASSERT(timetask); - ASSERT(timetask->GetState() != ThreadState::Runnable); + ASSERT(thread); + ASSERT(thread->GetState() != ThreadState::Runnable); system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, - time_manager_event_type, event_handle); - } else { - event_handle = InvalidHandle; - } - cancelled_events[event_handle] = false; -} - -void TimeManager::UnscheduleTimeEvent(Handle event_handle) { - std::lock_guard lock{mutex}; - if (event_handle == InvalidHandle) { - return; + time_manager_event_type, + reinterpret_cast<uintptr_t>(thread)); } - system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle); - cancelled_events[event_handle] = true; } -void TimeManager::CancelTimeEvent(KThread* time_task) { +void TimeManager::UnscheduleTimeEvent(KThread* thread) { std::lock_guard lock{mutex}; - const Handle event_handle = time_task->GetGlobalHandle(); - UnscheduleTimeEvent(event_handle); + system.CoreTiming().UnscheduleEvent(time_manager_event_type, + reinterpret_cast<uintptr_t>(thread)); } } // namespace Kernel diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h index 7cc702bec..0d7f05f30 100644 --- a/src/core/hle/kernel/time_manager.h +++ b/src/core/hle/kernel/time_manager.h @@ -31,18 +31,14 @@ public: explicit TimeManager(Core::System& system); /// Schedule a time event on `timetask` thread that will expire in 'nanoseconds' - /// returns a non-invalid handle in `event_handle` if correctly scheduled - void ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds); + void ScheduleTimeEvent(KThread* time_task, s64 nanoseconds); /// Unschedule an existing time event - void UnscheduleTimeEvent(Handle event_handle); - - void CancelTimeEvent(KThread* time_task); + void UnscheduleTimeEvent(KThread* thread); private: Core::System& system; std::shared_ptr<Core::Timing::EventType> time_manager_event_type; - std::unordered_map<Handle, bool> cancelled_events; std::mutex mutex; }; |