summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2019-09-10 17:04:40 +0200
committerFernandoS27 <fsahmkow27@gmail.com>2019-10-15 17:55:14 +0200
commitb49c0dab8772afb06358e5d19af092226b3a59bb (patch)
tree00e1cad505f915f5d7d286c5627b8e13f5eb78ff /src
parentScheduler: Add protections for Yield bombing (diff)
downloadyuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar
yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.gz
yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.bz2
yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.lz
yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.xz
yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.zst
yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.zip
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/kernel.cpp16
-rw-r--r--src/core/hle/kernel/scheduler.cpp10
-rw-r--r--src/core/hle/kernel/scheduler.h4
3 files changed, 30 insertions, 0 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 600d6ec74..7a913520d 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -12,6 +12,7 @@
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/core_timing_util.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/handle_table.h"
@@ -96,6 +97,7 @@ struct KernelCore::Impl {
InitializeSystemResourceLimit(kernel);
InitializeThreads();
+ InitializePreemption();
}
void Shutdown() {
@@ -111,6 +113,7 @@ struct KernelCore::Impl {
thread_wakeup_callback_handle_table.Clear();
thread_wakeup_event_type = nullptr;
+ preemption_event = nullptr;
named_ports.clear();
}
@@ -133,6 +136,18 @@ struct KernelCore::Impl {
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
}
+ void InitializePreemption() {
+ preemption_event = system.CoreTiming().RegisterEvent(
+ "PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
+ global_scheduler.PreemptThreads();
+ s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
+ system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ });
+
+ s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
+ system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ }
+
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
@@ -146,6 +161,7 @@ struct KernelCore::Impl {
SharedPtr<ResourceLimit> system_resource_limit;
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
+ Core::Timing::EventType* preemption_event = nullptr;
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
// allowing us to simply use a pool index or similar.
Kernel::HandleTable thread_wakeup_callback_handle_table;
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 451fd8077..0d45307cd 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -238,6 +238,16 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
+void GlobalScheduler::PreemptThreads() {
+ for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
+ const u64 priority = preemption_priorities[core_id];
+ if (scheduled_queue[core_id].size(priority) > 1) {
+ scheduled_queue[core_id].yield(priority);
+ reselection_pending.store(true, std::memory_order_release);
+ }
+ }
+}
+
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
scheduled_queue[core].add(thread, priority);
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 8fcc86bae..c13a368fd 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -133,6 +133,8 @@ public:
*/
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
+ void PreemptThreads();
+
u32 CpuCoresCount() const {
return NUM_CPU_CORES;
}
@@ -153,6 +155,8 @@ private:
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
std::atomic<bool> reselection_pending;
+ std::array<u64, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
+
/// Lists all thread ids that aren't deleted/etc.
std::vector<SharedPtr<Thread>> thread_list;
Core::System& system;