summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/thread.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/thread.h')
-rw-r--r--src/core/hle/kernel/thread.h114
1 files changed, 85 insertions, 29 deletions
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index a75071e9b..11ef29888 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -4,6 +4,7 @@
#pragma once
+#include <array>
#include <functional>
#include <string>
#include <utility>
@@ -12,6 +13,7 @@
#include "common/common_types.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
+#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
@@ -27,10 +29,10 @@ class System;
namespace Kernel {
-class GlobalScheduler;
+class GlobalSchedulerContext;
class KernelCore;
class Process;
-class Scheduler;
+class KScheduler;
enum ThreadPriority : u32 {
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
@@ -345,8 +347,12 @@ public:
void SetStatus(ThreadStatus new_status);
- u64 GetLastRunningTicks() const {
- return last_running_ticks;
+ s64 GetLastScheduledTick() const {
+ return this->last_scheduled_tick;
+ }
+
+ void SetLastScheduledTick(s64 tick) {
+ this->last_scheduled_tick = tick;
}
u64 GetTotalCPUTimeTicks() const {
@@ -361,10 +367,18 @@ public:
return processor_id;
}
+ s32 GetActiveCore() const {
+ return GetProcessorID();
+ }
+
void SetProcessorID(s32 new_core) {
processor_id = new_core;
}
+ void SetActiveCore(s32 new_core) {
+ processor_id = new_core;
+ }
+
Process* GetOwnerProcess() {
return owner_process;
}
@@ -469,7 +483,7 @@ public:
return ideal_core;
}
- u64 GetAffinityMask() const {
+ const KAffinityMask& GetAffinityMask() const {
return affinity_mask;
}
@@ -478,21 +492,12 @@ public:
/// Sleeps this thread for the given amount of nanoseconds.
ResultCode Sleep(s64 nanoseconds);
- /// Yields this thread without rebalancing loads.
- std::pair<ResultCode, bool> YieldSimple();
-
- /// Yields this thread and does a load rebalancing.
- std::pair<ResultCode, bool> YieldAndBalanceLoad();
-
- /// Yields this thread and if the core is left idle, loads are rebalanced
- std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
-
- void IncrementYieldCount() {
- yield_count++;
+ s64 GetYieldScheduleCount() const {
+ return this->schedule_count;
}
- u64 GetYieldCount() const {
- return yield_count;
+ void SetYieldScheduleCount(s64 count) {
+ this->schedule_count = count;
}
ThreadSchedStatus GetSchedulingStatus() const {
@@ -568,9 +573,59 @@ public:
return has_exited;
}
+ class QueueEntry {
+ public:
+ constexpr QueueEntry() = default;
+
+ constexpr void Initialize() {
+ this->prev = nullptr;
+ this->next = nullptr;
+ }
+
+ constexpr Thread* GetPrev() const {
+ return this->prev;
+ }
+ constexpr Thread* GetNext() const {
+ return this->next;
+ }
+ constexpr void SetPrev(Thread* thread) {
+ this->prev = thread;
+ }
+ constexpr void SetNext(Thread* thread) {
+ this->next = thread;
+ }
+
+ private:
+ Thread* prev{};
+ Thread* next{};
+ };
+
+ QueueEntry& GetPriorityQueueEntry(s32 core) {
+ return this->per_core_priority_queue_entry[core];
+ }
+
+ const QueueEntry& GetPriorityQueueEntry(s32 core) const {
+ return this->per_core_priority_queue_entry[core];
+ }
+
+ s32 GetDisableDispatchCount() const {
+ return disable_count;
+ }
+
+ void DisableDispatch() {
+ ASSERT(GetDisableDispatchCount() >= 0);
+ disable_count++;
+ }
+
+ void EnableDispatch() {
+ ASSERT(GetDisableDispatchCount() > 0);
+ disable_count--;
+ }
+
private:
- friend class GlobalScheduler;
- friend class Scheduler;
+ friend class GlobalSchedulerContext;
+ friend class KScheduler;
+ friend class Process;
void SetSchedulingStatus(ThreadSchedStatus new_status);
void AddSchedulingFlag(ThreadSchedFlags flag);
@@ -583,12 +638,14 @@ private:
ThreadContext64 context_64{};
std::shared_ptr<Common::Fiber> host_context{};
- u64 thread_id = 0;
-
ThreadStatus status = ThreadStatus::Dormant;
+ u32 scheduling_state = 0;
+
+ u64 thread_id = 0;
VAddr entry_point = 0;
VAddr stack_top = 0;
+ std::atomic_int disable_count = 0;
ThreadType type;
@@ -602,9 +659,8 @@ private:
u32 current_priority = 0;
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
- u64 last_running_ticks = 0; ///< CPU tick when thread was last running
- u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
- ///< a redundant yield is one where no scheduling is changed
+ s64 schedule_count{};
+ s64 last_scheduled_tick{};
s32 processor_id = 0;
@@ -646,16 +702,16 @@ private:
Handle hle_time_event;
SynchronizationObject* hle_object;
- Scheduler* scheduler = nullptr;
+ KScheduler* scheduler = nullptr;
+
+ std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
u32 ideal_core{0xFFFFFFFF};
- u64 affinity_mask{0x1};
+ KAffinityMask affinity_mask{};
s32 ideal_core_override = -1;
- u64 affinity_mask_override = 0x1;
u32 affinity_override_count = 0;
- u32 scheduling_state = 0;
u32 pausing_state = 0;
bool is_running = false;
bool is_waiting_on_sync = false;