diff options
author | bunnei <bunneidev@gmail.com> | 2022-03-12 01:29:53 +0100 |
---|---|---|
committer | bunnei <bunneidev@gmail.com> | 2022-03-15 02:14:54 +0100 |
commit | 15d9b0418f22637ec848d30dee55d168ee821f6a (patch) | |
tree | b0d81616aba414b5e8fa2f5add577bbf1af26832 | |
parent | core: hle: kernel: Update init_slab_heap, use device memory, and add KThreadLocalPage and KPageBuffer. (diff) | |
download | yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.tar yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.tar.gz yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.tar.bz2 yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.tar.lz yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.tar.xz yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.tar.zst yuzu-15d9b0418f22637ec848d30dee55d168ee821f6a.zip |
-rw-r--r-- | src/core/hle/kernel/k_slab_heap.h | 230 | ||||
-rw-r--r-- | src/core/hle/kernel/slab_helpers.h | 2 |
2 files changed, 107 insertions, 125 deletions
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 05c0bec9c..5690cc757 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -16,39 +16,34 @@ class KernelCore; namespace impl { -class KSlabHeapImpl final { -public: +class KSlabHeapImpl { YUZU_NON_COPYABLE(KSlabHeapImpl); YUZU_NON_MOVEABLE(KSlabHeapImpl); +public: struct Node { Node* next{}; }; +public: constexpr KSlabHeapImpl() = default; - constexpr ~KSlabHeapImpl() = default; - void Initialize(std::size_t size) { - ASSERT(head == nullptr); - obj_size = size; - } - - constexpr std::size_t GetObjectSize() const { - return obj_size; + void Initialize() { + ASSERT(m_head == nullptr); } Node* GetHead() const { - return head; + return m_head; } void* Allocate() { - Node* ret = head.load(); + Node* ret = m_head.load(); do { if (ret == nullptr) { break; } - } while (!head.compare_exchange_weak(ret, ret->next)); + } while (!m_head.compare_exchange_weak(ret, ret->next)); return ret; } @@ -56,170 +51,157 @@ public: void Free(void* obj) { Node* node = static_cast<Node*>(obj); - Node* cur_head = head.load(); + Node* cur_head = m_head.load(); do { node->next = cur_head; - } while (!head.compare_exchange_weak(cur_head, node)); + } while (!m_head.compare_exchange_weak(cur_head, node)); } private: - std::atomic<Node*> head{}; - std::size_t obj_size{}; + std::atomic<Node*> m_head{}; }; } // namespace impl -class KSlabHeapBase { -public: +template <bool SupportDynamicExpansion> +class KSlabHeapBase : protected impl::KSlabHeapImpl { YUZU_NON_COPYABLE(KSlabHeapBase); YUZU_NON_MOVEABLE(KSlabHeapBase); - constexpr KSlabHeapBase() = default; - constexpr ~KSlabHeapBase() = default; +private: + size_t m_obj_size{}; + uintptr_t m_peak{}; + uintptr_t m_start{}; + uintptr_t m_end{}; - constexpr bool Contains(uintptr_t addr) const { - return start <= addr && addr < end; - } +private: + void UpdatePeakImpl(uintptr_t obj) { + static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free); + std::atomic_ref<uintptr_t> peak_ref(m_peak); - constexpr std::size_t GetSlabHeapSize() const { - return (end - start) / GetObjectSize(); + const uintptr_t alloc_peak = obj + this->GetObjectSize(); + uintptr_t cur_peak = m_peak; + do { + if (alloc_peak <= cur_peak) { + break; + } + } while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak)); } - constexpr std::size_t GetObjectSize() const { - return impl.GetObjectSize(); - } +public: + constexpr KSlabHeapBase() = default; - constexpr uintptr_t GetSlabHeapAddress() const { - return start; + bool Contains(uintptr_t address) const { + return m_start <= address && address < m_end; } - std::size_t GetObjectIndexImpl(const void* obj) const { - return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize(); + void Initialize(size_t obj_size, void* memory, size_t memory_size) { + // Ensure we don't initialize a slab using null memory. + ASSERT(memory != nullptr); + + // Set our object size. + m_obj_size = obj_size; + + // Initialize the base allocator. + KSlabHeapImpl::Initialize(); + + // Set our tracking variables. + const size_t num_obj = (memory_size / obj_size); + m_start = reinterpret_cast<uintptr_t>(memory); + m_end = m_start + num_obj * obj_size; + m_peak = m_start; + + // Free the objects. + u8* cur = reinterpret_cast<u8*>(m_end); + + for (size_t i = 0; i < num_obj; i++) { + cur -= obj_size; + KSlabHeapImpl::Free(cur); + } } - std::size_t GetPeakIndex() const { - return GetObjectIndexImpl(reinterpret_cast<const void*>(peak)); + size_t GetSlabHeapSize() const { + return (m_end - m_start) / this->GetObjectSize(); } - void* AllocateImpl() { - return impl.Allocate(); + size_t GetObjectSize() const { + return m_obj_size; } - void FreeImpl(void* obj) { - // Don't allow freeing an object that wasn't allocated from this heap - ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); + void* Allocate() { + void* obj = KSlabHeapImpl::Allocate(); - impl.Free(obj); + return obj; } - void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { - // Ensure we don't initialize a slab using null memory - ASSERT(memory != nullptr); - - // Initialize the base allocator - impl.Initialize(obj_size); + void Free(void* obj) { + // Don't allow freeing an object that wasn't allocated from this heap. + const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj)); + ASSERT(contained); + KSlabHeapImpl::Free(obj); + } - // Set our tracking variables - const std::size_t num_obj = (memory_size / obj_size); - start = reinterpret_cast<uintptr_t>(memory); - end = start + num_obj * obj_size; - peak = start; + size_t GetObjectIndex(const void* obj) const { + if constexpr (SupportDynamicExpansion) { + if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) { + return std::numeric_limits<size_t>::max(); + } + } - // Free the objects - u8* cur = reinterpret_cast<u8*>(end); + return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize(); + } - for (std::size_t i{}; i < num_obj; i++) { - cur -= obj_size; - impl.Free(cur); - } + size_t GetPeakIndex() const { + return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak)); } -private: - using Impl = impl::KSlabHeapImpl; + uintptr_t GetSlabHeapAddress() const { + return m_start; + } - Impl impl; - uintptr_t peak{}; - uintptr_t start{}; - uintptr_t end{}; + size_t GetNumRemaining() const { + // Only calculate the number of remaining objects under debug configuration. + return 0; + } }; template <typename T> -class KSlabHeap final : public KSlabHeapBase { -public: - enum class AllocationType { - Host, - Guest, - }; +class KSlabHeap final : public KSlabHeapBase<false> { +private: + using BaseHeap = KSlabHeapBase<false>; - explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host) - : KSlabHeapBase(), allocation_type{allocation_type_} {} +public: + constexpr KSlabHeap() = default; - void Initialize(void* memory, std::size_t memory_size) { - if (allocation_type == AllocationType::Guest) { - InitializeImpl(sizeof(T), memory, memory_size); - } + void Initialize(void* memory, size_t memory_size) { + BaseHeap::Initialize(sizeof(T), memory, memory_size); } T* Allocate() { - switch (allocation_type) { - case AllocationType::Host: - // Fallback for cases where we do not yet support allocating guest memory from the slab - // heap, such as for kernel memory regions. - return new T; - - case AllocationType::Guest: - T* obj = static_cast<T*>(AllocateImpl()); - if (obj != nullptr) { - new (obj) T(); - } - return obj; - } + T* obj = static_cast<T*>(BaseHeap::Allocate()); - UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); - return nullptr; + if (obj != nullptr) [[likely]] { + std::construct_at(obj); + } + return obj; } - T* AllocateWithKernel(KernelCore& kernel) { - switch (allocation_type) { - case AllocationType::Host: - // Fallback for cases where we do not yet support allocating guest memory from the slab - // heap, such as for kernel memory regions. - return new T(kernel); + T* Allocate(KernelCore& kernel) { + T* obj = static_cast<T*>(BaseHeap::Allocate()); - case AllocationType::Guest: - T* obj = static_cast<T*>(AllocateImpl()); - if (obj != nullptr) { - new (obj) T(kernel); - } - return obj; + if (obj != nullptr) [[likely]] { + std::construct_at(obj, kernel); } - - UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); - return nullptr; + return obj; } void Free(T* obj) { - switch (allocation_type) { - case AllocationType::Host: - // Fallback for cases where we do not yet support allocating guest memory from the slab - // heap, such as for kernel memory regions. - delete obj; - return; - - case AllocationType::Guest: - FreeImpl(obj); - return; - } - - UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); + BaseHeap::Free(obj); } - constexpr std::size_t GetObjectIndex(const T* obj) const { - return GetObjectIndexImpl(obj); + size_t GetObjectIndex(const T* obj) const { + return BaseHeap::GetObjectIndex(obj); } - -private: - const AllocationType allocation_type; }; } // namespace Kernel diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index f1c11256e..dc1e48fc9 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -59,7 +59,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base { private: static Derived* Allocate(KernelCore& kernel) { - return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel); + return kernel.SlabHeap<Derived>().Allocate(kernel); } static void Free(KernelCore& kernel, Derived* obj) { |