summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorReinUsesLisp <reinuseslisp@airmail.cc>2020-06-29 02:42:57 +0200
committerReinUsesLisp <reinuseslisp@airmail.cc>2020-06-29 02:42:57 +0200
commit8562b516c0e3fa25a65e440db29cd474c0cbb896 (patch)
treea9f97fd5b63ac6455e0258dea1dc05ea489051aa /src/core/hle
parentMerge pull request #3955 from FernandoS27/prometheus-2b (diff)
downloadyuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.tar
yuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.tar.gz
yuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.tar.bz2
yuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.tar.lz
yuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.tar.xz
yuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.tar.zst
yuzu-8562b516c0e3fa25a65e440db29cd474c0cbb896.zip
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/scheduler.cpp21
1 files changed, 10 insertions, 11 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 2b12c0dbf..7b929781c 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -6,6 +6,7 @@
// licensed under GPLv2 or later under exception provided by the author.
#include <algorithm>
+#include <mutex>
#include <set>
#include <unordered_set>
#include <utility>
@@ -31,22 +32,20 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
GlobalScheduler::~GlobalScheduler() = default;
void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
- global_list_guard.lock();
+ std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread));
- global_list_guard.unlock();
}
void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
- global_list_guard.lock();
+ std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
- global_list_guard.unlock();
}
u32 GlobalScheduler::SelectThreads() {
ASSERT(is_locked);
const auto update_thread = [](Thread* thread, Scheduler& sched) {
- sched.guard.lock();
+ std::scoped_lock lock{sched.guard};
if (thread != sched.selected_thread_set.get()) {
if (thread == nullptr) {
++sched.idle_selection_count;
@@ -57,7 +56,6 @@ u32 GlobalScheduler::SelectThreads() {
sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
sched.is_context_switch_pending = reschedule_pending;
std::atomic_thread_fence(std::memory_order_seq_cst);
- sched.guard.unlock();
return reschedule_pending;
};
if (!is_reselection_pending.load()) {
@@ -757,11 +755,12 @@ void Scheduler::OnSwitch(void* this_scheduler) {
void Scheduler::SwitchToCurrent() {
while (true) {
- guard.lock();
- selected_thread = selected_thread_set;
- current_thread = selected_thread;
- is_context_switch_pending = false;
- guard.unlock();
+ {
+ std::scoped_lock lock{guard};
+ selected_thread = selected_thread_set;
+ current_thread = selected_thread;
+ is_context_switch_pending = false;
+ }
while (!is_context_switch_pending) {
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
current_thread->context_guard.lock();