summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
m---------externals/Vulkan-Headers0
-rw-r--r--src/video_core/CMakeLists.txt6
-rw-r--r--src/video_core/dma_pusher.cpp57
-rw-r--r--src/video_core/dma_pusher.h5
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp252
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h87
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp60
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h69
9 files changed, 508 insertions, 30 deletions
diff --git a/externals/Vulkan-Headers b/externals/Vulkan-Headers
-Subproject 7f02d9bb810f371de0fe833c80004c34f7ff8c5
+Subproject 15e5c4db7500b936ae758236f2e72fc1aec2202
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 59319f206..6036d6ed3 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -106,8 +106,12 @@ if (ENABLE_VULKAN)
renderer_vulkan/declarations.h
renderer_vulkan/vk_device.cpp
renderer_vulkan/vk_device.h
+ renderer_vulkan/vk_memory_manager.cpp
+ renderer_vulkan/vk_memory_manager.h
renderer_vulkan/vk_resource_manager.cpp
- renderer_vulkan/vk_resource_manager.h)
+ renderer_vulkan/vk_resource_manager.h
+ renderer_vulkan/vk_scheduler.cpp
+ renderer_vulkan/vk_scheduler.h)
target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include)
target_compile_definitions(video_core PRIVATE HAS_VULKAN)
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index eb9bf1878..669541b4b 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -33,18 +33,36 @@ void DmaPusher::DispatchCalls() {
}
bool DmaPusher::Step() {
- if (dma_get != dma_put) {
- // Push buffer non-empty, read a word
- const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
- ASSERT_MSG(address, "Invalid GPU address");
+ if (!ib_enable || dma_pushbuffer.empty()) {
+ // pushbuffer empty and IB empty or nonexistent - nothing to do
+ return false;
+ }
- const CommandHeader command_header{Memory::Read32(*address)};
+ const CommandList& command_list{dma_pushbuffer.front()};
+ const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
+ GPUVAddr dma_get = command_list_header.addr;
+ GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32);
+ bool non_main = command_list_header.is_non_main;
- dma_get += sizeof(u32);
+ if (dma_pushbuffer_subindex >= command_list.size()) {
+ // We've gone through the current list, remove it from the queue
+ dma_pushbuffer.pop();
+ dma_pushbuffer_subindex = 0;
+ }
- if (!non_main) {
- dma_mget = dma_get;
- }
+ if (command_list_header.size == 0) {
+ return true;
+ }
+
+ // Push buffer non-empty, read a word
+ const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
+ ASSERT_MSG(address, "Invalid GPU address");
+
+ command_headers.resize(command_list_header.size);
+
+ Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32));
+
+ for (const CommandHeader& command_header : command_headers) {
// now, see if we're in the middle of a command
if (dma_state.length_pending) {
@@ -91,22 +109,11 @@ bool DmaPusher::Step() {
break;
}
}
- } else if (ib_enable && !dma_pushbuffer.empty()) {
- // Current pushbuffer empty, but we have more IB entries to read
- const CommandList& command_list{dma_pushbuffer.front()};
- const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
- dma_get = command_list_header.addr;
- dma_put = dma_get + command_list_header.size * sizeof(u32);
- non_main = command_list_header.is_non_main;
-
- if (dma_pushbuffer_subindex >= command_list.size()) {
- // We've gone through the current list, remove it from the queue
- dma_pushbuffer.pop();
- dma_pushbuffer_subindex = 0;
- }
- } else {
- // Otherwise, pushbuffer empty and IB empty or nonexistent - nothing to do
- return {};
+ }
+
+ if (!non_main) {
+ // TODO (degasus): This is dead code, as dma_mget is never read.
+ dma_mget = dma_put;
}
return true;
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 1097e5c49..27a36348c 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,8 @@ private:
GPU& gpu;
+ std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
+
std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed
std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer
@@ -89,11 +91,8 @@ private:
DmaState dma_state{};
bool dma_increment_once{};
- GPUVAddr dma_put{}; ///< pushbuffer current end address
- GPUVAddr dma_get{}; ///< pushbuffer current read address
GPUVAddr dma_mget{}; ///< main pushbuffer last read address
bool ib_enable{true}; ///< IB mode enabled
- bool non_main{}; ///< non-main pushbuffer active
};
} // namespace Tegra
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
index 59f671048..74200914e 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
@@ -423,7 +423,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
for (u32 i = 0; i < params.depth; i++) {
MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
- params.MipBlockDepth(mip_level), params.tile_width_spacing, 1,
+ params.MipBlockDepth(mip_level), 1, params.tile_width_spacing,
gl_buffer.data() + offset_gl, gl_size, params.addr + offset);
offset += layer_size;
offset_gl += gl_size;
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
new file mode 100644
index 000000000..17ee93b91
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -0,0 +1,252 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <optional>
+#include <tuple>
+#include <vector>
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_memory_manager.h"
+
+namespace Vulkan {
+
+// TODO(Rodrigo): Fine tune this number
+constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024;
+
+class VKMemoryAllocation final {
+public:
+ explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
+ vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type)
+ : device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size},
+ shifted_type{ShiftType(type)}, is_mappable{properties &
+ vk::MemoryPropertyFlagBits::eHostVisible} {
+ if (is_mappable) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld));
+ }
+ }
+
+ ~VKMemoryAllocation() {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ if (is_mappable)
+ dev.unmapMemory(memory, dld);
+ dev.free(memory, nullptr, dld);
+ }
+
+ VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
+ auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size),
+ static_cast<u64>(alignment));
+ if (!found) {
+ found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
+ static_cast<u64>(alignment));
+ if (!found) {
+ // Signal out of memory, it'll try to do more allocations.
+ return nullptr;
+ }
+ }
+ u8* address = is_mappable ? base_address + *found : nullptr;
+ auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found,
+ *found + commit_size);
+ commits.push_back(commit.get());
+
+ // Last commit's address is highly probable to be free.
+ free_iterator = *found + commit_size;
+
+ return commit;
+ }
+
+ void Free(const VKMemoryCommitImpl* commit) {
+ ASSERT(commit);
+ const auto it =
+ std::find_if(commits.begin(), commits.end(),
+ [&](const auto& stored_commit) { return stored_commit == commit; });
+ if (it == commits.end()) {
+ LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!");
+ UNREACHABLE();
+ return;
+ }
+ commits.erase(it);
+ }
+
+ /// Returns whether this allocation is compatible with the arguments.
+ bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const {
+ return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) &&
+ (type_mask & shifted_type) != 0;
+ }
+
+private:
+ static constexpr u32 ShiftType(u32 type) {
+ return 1U << type;
+ }
+
+ /// A memory allocator, it may return a free region between "start" and "end" with the solicited
+ /// requeriments.
+ std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
+ u64 iterator = start;
+ while (iterator + size < end) {
+ const u64 try_left = Common::AlignUp(iterator, alignment);
+ const u64 try_right = try_left + size;
+
+ bool overlap = false;
+ for (const auto& commit : commits) {
+ const auto [commit_left, commit_right] = commit->interval;
+ if (try_left < commit_right && commit_left < try_right) {
+ // There's an overlap, continue the search where the overlapping commit ends.
+ iterator = commit_right;
+ overlap = true;
+ break;
+ }
+ }
+ if (!overlap) {
+ // A free address has been found.
+ return try_left;
+ }
+ }
+ // No free regions where found, return an empty optional.
+ return std::nullopt;
+ }
+
+ const VKDevice& device; ///< Vulkan device.
+ const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
+ const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
+ const u64 alloc_size; ///< Size of this allocation.
+ const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
+ const bool is_mappable; ///< Whether the allocation is mappable.
+
+ /// Base address of the mapped pointer.
+ u8* base_address{};
+
+ /// Hints where the next free region is likely going to be.
+ u64 free_iterator{};
+
+ /// Stores all commits done from this allocation.
+ std::vector<const VKMemoryCommitImpl*> commits;
+};
+
+VKMemoryManager::VKMemoryManager(const VKDevice& device)
+ : device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())},
+ is_memory_unified{GetMemoryUnified(props)} {}
+
+VKMemoryManager::~VKMemoryManager() = default;
+
+VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) {
+ ASSERT(reqs.size < ALLOC_CHUNK_SIZE);
+
+ // When a host visible commit is asked, search for host visible and coherent, otherwise search
+ // for a fast device local type.
+ const vk::MemoryPropertyFlags wanted_properties =
+ host_visible
+ ? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent
+ : vk::MemoryPropertyFlagBits::eDeviceLocal;
+
+ const auto TryCommit = [&]() -> VKMemoryCommit {
+ for (auto& alloc : allocs) {
+ if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits))
+ continue;
+
+ if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) {
+ return commit;
+ }
+ }
+ return {};
+ };
+
+ if (auto commit = TryCommit(); commit) {
+ return commit;
+ }
+
+ // Commit has failed, allocate more memory.
+ if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) {
+ // TODO(Rodrigo): Try to use host memory.
+ LOG_CRITICAL(Render_Vulkan, "Ran out of memory!");
+ UNREACHABLE();
+ }
+
+ // Commit again, this time it won't fail since there's a fresh allocation above. If it does,
+ // there's a bug.
+ auto commit = TryCommit();
+ ASSERT(commit);
+ return commit;
+}
+
+VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld);
+ auto commit = Commit(requeriments, host_visible);
+ dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
+ return commit;
+}
+
+VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ const auto requeriments = dev.getImageMemoryRequirements(image, dld);
+ auto commit = Commit(requeriments, host_visible);
+ dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
+ return commit;
+}
+
+bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask,
+ u64 size) {
+ const u32 type = [&]() {
+ for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
+ const auto flags = props.memoryTypes[type_index].propertyFlags;
+ if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
+ // The type matches in type and in the wanted properties.
+ return type_index;
+ }
+ }
+ LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!");
+ UNREACHABLE();
+ return 0u;
+ }();
+
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+
+ // Try to allocate found type.
+ const vk::MemoryAllocateInfo memory_ai(size, type);
+ vk::DeviceMemory memory;
+ if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld);
+ res != vk::Result::eSuccess) {
+ LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res));
+ return false;
+ }
+ allocs.push_back(
+ std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type));
+ return true;
+}
+
+/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) {
+ for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) {
+ if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
+ // Memory is considered unified when heaps are device local only.
+ return false;
+ }
+ }
+ return true;
+}
+
+VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory,
+ u8* data, u64 begin, u64 end)
+ : allocation{allocation}, memory{memory}, data{data}, interval(std::make_pair(begin, end)) {}
+
+VKMemoryCommitImpl::~VKMemoryCommitImpl() {
+ allocation->Free(this);
+}
+
+u8* VKMemoryCommitImpl::GetData() const {
+ ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit.");
+ return data;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
new file mode 100644
index 000000000..073597b35
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -0,0 +1,87 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <utility>
+#include <vector>
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKMemoryAllocation;
+class VKMemoryCommitImpl;
+
+using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
+
+class VKMemoryManager final {
+public:
+ explicit VKMemoryManager(const VKDevice& device);
+ ~VKMemoryManager();
+
+ /**
+ * Commits a memory with the specified requeriments.
+ * @param reqs Requeriments returned from a Vulkan call.
+ * @param host_visible Signals the allocator that it *must* use host visible and coherent
+ * memory. When passing false, it will try to allocate device local memory.
+ * @returns A memory commit.
+ */
+ VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible);
+
+ /// Commits memory required by the buffer and binds it.
+ VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible);
+
+ /// Commits memory required by the image and binds it.
+ VKMemoryCommit Commit(vk::Image image, bool host_visible);
+
+ /// Returns true if the memory allocations are done always in host visible and coherent memory.
+ bool IsMemoryUnified() const {
+ return is_memory_unified;
+ }
+
+private:
+ /// Allocates a chunk of memory.
+ bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
+
+ /// Returns true if the device uses an unified memory model.
+ static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props);
+
+ const VKDevice& device; ///< Device handler.
+ const vk::PhysicalDeviceMemoryProperties props; ///< Physical device properties.
+ const bool is_memory_unified; ///< True if memory model is unified.
+ std::vector<std::unique_ptr<VKMemoryAllocation>> allocs; ///< Current allocations.
+};
+
+class VKMemoryCommitImpl final {
+ friend VKMemoryAllocation;
+
+public:
+ explicit VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, u8* data,
+ u64 begin, u64 end);
+ ~VKMemoryCommitImpl();
+
+ /// Returns the writeable memory map. The commit has to be mappable.
+ u8* GetData() const;
+
+ /// Returns the Vulkan memory handler.
+ vk::DeviceMemory GetMemory() const {
+ return memory;
+ }
+
+ /// Returns the start position of the commit relative to the allocation.
+ vk::DeviceSize GetOffset() const {
+ return static_cast<vk::DeviceSize>(interval.first);
+ }
+
+private:
+ std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
+ vk::DeviceMemory memory; ///< Vulkan device memory handler.
+ VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
+ u8* data{}; ///< Pointer to the host mapped memory, it has the commit offset included.
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
new file mode 100644
index 000000000..f1fea1871
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -0,0 +1,60 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+
+namespace Vulkan {
+
+VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager)
+ : device{device}, resource_manager{resource_manager} {
+ next_fence = &resource_manager.CommitFence();
+ AllocateNewContext();
+}
+
+VKScheduler::~VKScheduler() = default;
+
+VKExecutionContext VKScheduler::GetExecutionContext() const {
+ return VKExecutionContext(current_fence, current_cmdbuf);
+}
+
+VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) {
+ SubmitExecution(semaphore);
+ current_fence->Release();
+ AllocateNewContext();
+ return GetExecutionContext();
+}
+
+VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) {
+ SubmitExecution(semaphore);
+ current_fence->Wait();
+ current_fence->Release();
+ AllocateNewContext();
+ return GetExecutionContext();
+}
+
+void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {
+ const auto& dld = device.GetDispatchLoader();
+ current_cmdbuf.end(dld);
+
+ const auto queue = device.GetGraphicsQueue();
+ const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, &current_cmdbuf, semaphore ? 1u : 0u,
+ &semaphore);
+ queue.submit({submit_info}, *current_fence, dld);
+}
+
+void VKScheduler::AllocateNewContext() {
+ current_fence = next_fence;
+ current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence);
+ next_fence = &resource_manager.CommitFence();
+
+ const auto& dld = device.GetDispatchLoader();
+ current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, dld);
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
new file mode 100644
index 000000000..cfaf5376f
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -0,0 +1,69 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKExecutionContext;
+class VKFence;
+class VKResourceManager;
+
+/// The scheduler abstracts command buffer and fence management with an interface that's able to do
+/// OpenGL-like operations on Vulkan command buffers.
+class VKScheduler {
+public:
+ explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager);
+ ~VKScheduler();
+
+ /// Gets the current execution context.
+ [[nodiscard]] VKExecutionContext GetExecutionContext() const;
+
+ /// Sends the current execution context to the GPU. It invalidates the current execution context
+ /// and returns a new one.
+ VKExecutionContext Flush(vk::Semaphore semaphore = nullptr);
+
+ /// Sends the current execution context to the GPU and waits for it to complete. It invalidates
+ /// the current execution context and returns a new one.
+ VKExecutionContext Finish(vk::Semaphore semaphore = nullptr);
+
+private:
+ void SubmitExecution(vk::Semaphore semaphore);
+
+ void AllocateNewContext();
+
+ const VKDevice& device;
+ VKResourceManager& resource_manager;
+ vk::CommandBuffer current_cmdbuf;
+ VKFence* current_fence = nullptr;
+ VKFence* next_fence = nullptr;
+};
+
+class VKExecutionContext {
+ friend class VKScheduler;
+
+public:
+ VKExecutionContext() = default;
+
+ VKFence& GetFence() const {
+ return *fence;
+ }
+
+ vk::CommandBuffer GetCommandBuffer() const {
+ return cmdbuf;
+ }
+
+private:
+ explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf)
+ : fence{fence}, cmdbuf{cmdbuf} {}
+
+ VKFence* fence{};
+ vk::CommandBuffer cmdbuf;
+};
+
+} // namespace Vulkan