summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_dynamic_slab_heap.h
blob: 3a0ddd0500f6314dcc354fc3a64a8bd3ab564f24 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#pragma once

#include <atomic>

#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_page_manager.h"
#include "core/hle/kernel/k_slab_heap.h"

namespace Kernel {

template <typename T, bool ClearNode = false>
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
    YUZU_NON_COPYABLE(KDynamicSlabHeap);
    YUZU_NON_MOVEABLE(KDynamicSlabHeap);

public:
    constexpr KDynamicSlabHeap() = default;

    constexpr VAddr GetAddress() const {
        return m_address;
    }
    constexpr size_t GetSize() const {
        return m_size;
    }
    constexpr size_t GetUsed() const {
        return m_used.load();
    }
    constexpr size_t GetPeak() const {
        return m_peak.load();
    }
    constexpr size_t GetCount() const {
        return m_count.load();
    }

    constexpr bool IsInRange(VAddr addr) const {
        return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
    }

    void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
        ASSERT(page_allocator != nullptr);

        // Initialize members.
        m_address = page_allocator->GetAddress();
        m_size = page_allocator->GetSize();

        // Initialize the base allocator.
        KSlabHeapImpl::Initialize();

        // Allocate until we have the correct number of objects.
        while (m_count.load() < num_objects) {
            auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
            ASSERT(allocated != nullptr);

            for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
                KSlabHeapImpl::Free(allocated + i);
            }

            m_count += sizeof(PageBuffer) / sizeof(T);
        }
    }

    T* Allocate(KDynamicPageManager* page_allocator) {
        T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());

        // If we successfully allocated and we should clear the node, do so.
        if constexpr (ClearNode) {
            if (allocated != nullptr) [[likely]] {
                reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
            }
        }

        // If we fail to allocate, try to get a new page from our next allocator.
        if (allocated == nullptr) [[unlikely]] {
            if (page_allocator != nullptr) {
                allocated = reinterpret_cast<T*>(page_allocator->Allocate());
                if (allocated != nullptr) {
                    // If we succeeded in getting a page, free the rest to our slab.
                    for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
                        KSlabHeapImpl::Free(allocated + i);
                    }
                    m_count += sizeof(PageBuffer) / sizeof(T);
                }
            }
        }

        if (allocated != nullptr) [[likely]] {
            // Construct the object.
            std::construct_at(allocated);

            // Update our tracking.
            const size_t used = ++m_used;
            size_t peak = m_peak.load();
            while (peak < used) {
                if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
                    break;
                }
            }
        }

        return allocated;
    }

    void Free(T* t) {
        KSlabHeapImpl::Free(t);
        --m_used;
    }

private:
    using PageBuffer = KDynamicPageManager::PageBuffer;

private:
    std::atomic<size_t> m_used{};
    std::atomic<size_t> m_peak{};
    std::atomic<size_t> m_count{};
    VAddr m_address{};
    size_t m_size{};
};

} // namespace Kernel