summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_scheduler_lock.h
blob: 129d6047228ab1edd5a664eb9e076e38b3cdaf53 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#pragma once

#include <atomic>
#include "common/assert.h"
#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"

namespace Kernel {

class KernelCore;

template <typename SchedulerType>
class KAbstractSchedulerLock {
public:
    explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}

    bool IsLockedByCurrentThread() const {
        return owner_thread == GetCurrentThreadPointer(kernel);
    }

    void Lock() {
        // If we are shutting down the kernel, none of this is relevant anymore.
        if (kernel.IsShuttingDown()) {
            return;
        }

        if (IsLockedByCurrentThread()) {
            // If we already own the lock, we can just increment the count.
            ASSERT(lock_count > 0);
            lock_count++;
        } else {
            // Otherwise, we want to disable scheduling and acquire the spinlock.
            SchedulerType::DisableScheduling(kernel);
            spin_lock.Lock();

            // For debug, ensure that our state is valid.
            ASSERT(lock_count == 0);
            ASSERT(owner_thread == nullptr);

            // Increment count, take ownership.
            lock_count = 1;
            owner_thread = GetCurrentThreadPointer(kernel);
        }
    }

    void Unlock() {
        // If we are shutting down the kernel, none of this is relevant anymore.
        if (kernel.IsShuttingDown()) {
            return;
        }

        ASSERT(IsLockedByCurrentThread());
        ASSERT(lock_count > 0);

        // Release an instance of the lock.
        if ((--lock_count) == 0) {
            // Perform a memory barrier here.
            std::atomic_thread_fence(std::memory_order_seq_cst);

            // We're no longer going to hold the lock. Take note of what cores need scheduling.
            const u64 cores_needing_scheduling =
                SchedulerType::UpdateHighestPriorityThreads(kernel);

            // Note that we no longer hold the lock, and unlock the spinlock.
            owner_thread = nullptr;
            spin_lock.Unlock();

            // Enable scheduling, and perform a rescheduling operation.
            SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
        }
    }

private:
    KernelCore& kernel;
    KAlignedSpinLock spin_lock{};
    s32 lock_count{};
    std::atomic<KThread*> owner_thread{};
};

} // namespace Kernel