summaryrefslogtreecommitdiffstats
path: root/src/video_core/vulkan_common
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/vulkan_common')
-rw-r--r--src/video_core/vulkan_common/vulkan_memory_allocator.cpp227
-rw-r--r--src/video_core/vulkan_common/vulkan_memory_allocator.h95
2 files changed, 322 insertions, 0 deletions
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
new file mode 100644
index 000000000..c1cf292af
--- /dev/null
+++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
@@ -0,0 +1,227 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <bit>
+#include <optional>
+#include <tuple>
+#include <vector>
+
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/vulkan_common/vulkan_device.h"
+#include "video_core/vulkan_common/vulkan_memory_allocator.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+namespace {
+struct Range {
+ u64 begin;
+ u64 end;
+
+ [[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept {
+ return iterator < end && begin < iterator + size;
+ }
+};
+
+[[nodiscard]] u64 GetAllocationChunkSize(u64 required_size) {
+ static constexpr std::array sizes{
+ 0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10,
+ 0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10,
+ 0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10,
+ };
+ static_assert(std::is_sorted(sizes.begin(), sizes.end()));
+
+ const auto it = std::ranges::lower_bound(sizes, required_size);
+ return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20);
+}
+} // Anonymous namespace
+
+class MemoryAllocation {
+public:
+ explicit MemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
+ VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
+ : device{device_}, memory{std::move(memory_)}, properties{properties_},
+ allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
+
+ [[nodiscard]] std::optional<MemoryCommit> Commit(VkDeviceSize size, VkDeviceSize alignment) {
+ const std::optional<u64> alloc = FindFreeRegion(size, alignment);
+ if (!alloc) {
+ // Signal out of memory, it'll try to do more allocations.
+ return std::nullopt;
+ }
+ const Range range{
+ .begin = *alloc,
+ .end = *alloc + size,
+ };
+ commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range);
+ return std::make_optional<MemoryCommit>(device, this, *memory, *alloc, *alloc + size);
+ }
+
+ void Free(u64 begin) {
+ const auto it = std::ranges::find(commits, begin, &Range::begin);
+ ASSERT_MSG(it != commits.end(), "Invalid commit");
+ commits.erase(it);
+ }
+
+ [[nodiscard]] std::span<u8> Map() {
+ if (!memory_mapped_span.empty()) {
+ return memory_mapped_span;
+ }
+ u8* const raw_pointer = memory.Map(0, allocation_size);
+ memory_mapped_span = std::span<u8>(raw_pointer, allocation_size);
+ return memory_mapped_span;
+ }
+
+ /// Returns whether this allocation is compatible with the arguments.
+ [[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
+ return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
+ }
+
+private:
+ [[nodiscard]] static constexpr u32 ShiftType(u32 type) {
+ return 1U << type;
+ }
+
+ [[nodiscard]] std::optional<u64> FindFreeRegion(u64 size, u64 alignment) noexcept {
+ ASSERT(std::has_single_bit(alignment));
+ const u64 alignment_log2 = std::countr_zero(alignment);
+ std::optional<u64> candidate;
+ u64 iterator = 0;
+ auto commit = commits.begin();
+ while (iterator + size <= allocation_size) {
+ candidate = candidate.value_or(iterator);
+ if (commit == commits.end()) {
+ break;
+ }
+ if (commit->Contains(*candidate, size)) {
+ candidate = std::nullopt;
+ }
+ iterator = Common::AlignUpLog2(commit->end, alignment_log2);
+ ++commit;
+ }
+ return candidate;
+ }
+
+ const Device& device; ///< Vulkan device.
+ const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
+ const VkMemoryPropertyFlags properties; ///< Vulkan properties.
+ const u64 allocation_size; ///< Size of this allocation.
+ const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
+ std::vector<Range> commits; ///< All commit ranges done from this allocation.
+ std::span<u8> memory_mapped_span; ///< Memory mapped span. Empty if not queried before.
+};
+
+MemoryCommit::MemoryCommit(const Device& device_, MemoryAllocation* allocation_,
+ VkDeviceMemory memory_, u64 begin, u64 end) noexcept
+ : device{&device_}, allocation{allocation_}, memory{memory_}, interval{begin, end} {}
+
+MemoryCommit::~MemoryCommit() {
+ Release();
+}
+
+MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept {
+ Release();
+ device = rhs.device;
+ allocation = std::exchange(rhs.allocation, nullptr);
+ memory = rhs.memory;
+ interval = rhs.interval;
+ span = std::exchange(rhs.span, std::span<u8>{});
+ return *this;
+}
+
+MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept
+ : device{rhs.device}, allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory},
+ interval{rhs.interval}, span{std::exchange(rhs.span, std::span<u8>{})} {}
+
+std::span<u8> MemoryCommit::Map() {
+ if (!span.empty()) {
+ return span;
+ }
+ span = allocation->Map().subspan(interval.first, interval.second - interval.first);
+ return span;
+}
+
+void MemoryCommit::Release() {
+ if (allocation) {
+ allocation->Free(interval.first);
+ }
+}
+
+MemoryAllocator::MemoryAllocator(const Device& device_)
+ : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
+
+MemoryAllocator::~MemoryAllocator() = default;
+
+MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, bool host_visible) {
+ const u64 chunk_size = GetAllocationChunkSize(requirements.size);
+
+ // When a host visible commit is asked, search for host visible and coherent, otherwise search
+ // for a fast device local type.
+ const VkMemoryPropertyFlags wanted_properties =
+ host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
+ : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ if (std::optional<MemoryCommit> commit = TryAllocCommit(requirements, wanted_properties)) {
+ return std::move(*commit);
+ }
+ // Commit has failed, allocate more memory.
+ // TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory.
+ AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size);
+
+ // Commit again, this time it won't fail since there's a fresh allocation above.
+ // If it does, there's a bug.
+ return TryAllocCommit(requirements, wanted_properties).value();
+}
+
+MemoryCommit MemoryAllocator::Commit(const vk::Buffer& buffer, bool host_visible) {
+ auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
+ buffer.BindMemory(commit.Memory(), commit.Offset());
+ return commit;
+}
+
+MemoryCommit MemoryAllocator::Commit(const vk::Image& image, bool host_visible) {
+ auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
+ image.BindMemory(commit.Memory(), commit.Offset());
+ return commit;
+}
+
+void MemoryAllocator::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
+ u64 size) {
+ const u32 type = [&] {
+ for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
+ const auto flags = properties.memoryTypes[type_index].propertyFlags;
+ if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
+ // The type matches in type and in the wanted properties.
+ return type_index;
+ }
+ }
+ UNREACHABLE_MSG("Couldn't find a compatible memory type!");
+ return 0U;
+ }();
+ vk::DeviceMemory memory = device.GetLogical().AllocateMemory({
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = nullptr,
+ .allocationSize = size,
+ .memoryTypeIndex = type,
+ });
+ allocations.push_back(std::make_unique<MemoryAllocation>(device, std::move(memory),
+ wanted_properties, size, type));
+}
+
+std::optional<MemoryCommit> MemoryAllocator::TryAllocCommit(
+ const VkMemoryRequirements& requirements, VkMemoryPropertyFlags wanted_properties) {
+ for (auto& allocation : allocations) {
+ if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
+ continue;
+ }
+ if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) {
+ return commit;
+ }
+ }
+ return std::nullopt;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.h b/src/video_core/vulkan_common/vulkan_memory_allocator.h
new file mode 100644
index 000000000..69a6341e1
--- /dev/null
+++ b/src/video_core/vulkan_common/vulkan_memory_allocator.h
@@ -0,0 +1,95 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <span>
+#include <utility>
+#include <vector>
+#include "common/common_types.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+
+class Device;
+class MemoryMap;
+class MemoryAllocation;
+
+class MemoryCommit final {
+public:
+ explicit MemoryCommit() noexcept = default;
+ explicit MemoryCommit(const Device& device_, MemoryAllocation* allocation_,
+ VkDeviceMemory memory_, u64 begin, u64 end) noexcept;
+ ~MemoryCommit();
+
+ MemoryCommit& operator=(MemoryCommit&&) noexcept;
+ MemoryCommit(MemoryCommit&&) noexcept;
+
+ MemoryCommit& operator=(const MemoryCommit&) = delete;
+ MemoryCommit(const MemoryCommit&) = delete;
+
+ /// Returns a host visible memory map.
+ /// It will map the backing allocation if it hasn't been mapped before.
+ std::span<u8> Map();
+
+ /// Returns the Vulkan memory handler.
+ VkDeviceMemory Memory() const {
+ return memory;
+ }
+
+ /// Returns the start position of the commit relative to the allocation.
+ VkDeviceSize Offset() const {
+ return static_cast<VkDeviceSize>(interval.first);
+ }
+
+private:
+ void Release();
+
+ const Device* device{}; ///< Vulkan device.
+ MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
+ VkDeviceMemory memory{}; ///< Vulkan device memory handler.
+ std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
+ std::span<u8> span; ///< Host visible memory span. Empty if not queried before.
+};
+
+class MemoryAllocator final {
+public:
+ explicit MemoryAllocator(const Device& device_);
+ ~MemoryAllocator();
+
+ MemoryAllocator& operator=(const MemoryAllocator&) = delete;
+ MemoryAllocator(const MemoryAllocator&) = delete;
+
+ /**
+ * Commits a memory with the specified requeriments.
+ *
+ * @param requirements Requirements returned from a Vulkan call.
+ * @param host_visible Signals the allocator that it *must* use host visible and coherent
+ * memory. When passing false, it will try to allocate device local memory.
+ *
+ * @returns A memory commit.
+ */
+ MemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
+
+ /// Commits memory required by the buffer and binds it.
+ MemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
+
+ /// Commits memory required by the image and binds it.
+ MemoryCommit Commit(const vk::Image& image, bool host_visible);
+
+private:
+ /// Allocates a chunk of memory.
+ void AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
+
+ /// Tries to allocate a memory commit.
+ std::optional<MemoryCommit> TryAllocCommit(const VkMemoryRequirements& requirements,
+ VkMemoryPropertyFlags wanted_properties);
+
+ const Device& device; ///< Device handler.
+ const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
+ std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations.
+};
+
+} // namespace Vulkan