From 58b0ae84b56996304b6ad373e4f6cff2cf6bdd41 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 10 Sep 2020 03:43:30 -0300 Subject: renderer_vulkan: Make unconditional use of VK_KHR_timeline_semaphore This reworks how host<->device synchronization works on the Vulkan backend. Instead of "protecting" resources with a fence and signalling these as free when the fence is known to be signalled by the host GPU, use timeline semaphores. Vulkan timeline semaphores allow use to work on a subset of D3D12 fences. As far as we are concerned, timeline semaphores are a value set by the host or the device that can be waited by either of them. Taking advantange of this, we can have a monolithically increasing atomic value for each submission to the graphics queue. Instead of protecting resources with a fence, we simply store the current logical tick (the atomic value stored in CPU memory). When we want to know if a resource is free, it can be compared to the current GPU tick. This greatly simplifies resource management code and the free status of resources should have less false negatives. To workaround bugs in validation layers, when these are attached there's a thread waiting for timeline semaphores. --- src/video_core/renderer_vulkan/vk_scheduler.cpp | 77 ++++++++++++++++--------- 1 file changed, 50 insertions(+), 27 deletions(-) (limited to 'src/video_core/renderer_vulkan/vk_scheduler.cpp') diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp index dbbd0961a..1a483dc71 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.cpp +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -10,9 +10,10 @@ #include "common/microprofile.h" #include "common/thread.h" +#include "video_core/renderer_vulkan/vk_command_pool.h" #include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_master_semaphore.h" #include "video_core/renderer_vulkan/vk_query_cache.h" -#include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/wrapper.h" @@ -35,10 +36,10 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) { last = nullptr; } -VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager, - StateTracker& state_tracker) - : device{device}, resource_manager{resource_manager}, state_tracker{state_tracker}, - next_fence{&resource_manager.CommitFence()} { +VKScheduler::VKScheduler(const VKDevice& device_, StateTracker& state_tracker_) + : device{device_}, state_tracker{state_tracker_}, + master_semaphore{std::make_unique(device)}, + command_pool{std::make_unique(*master_semaphore, device)} { AcquireNewChunk(); AllocateNewContext(); worker_thread = std::thread(&VKScheduler::WorkerThread, this); @@ -50,20 +51,27 @@ VKScheduler::~VKScheduler() { worker_thread.join(); } -void VKScheduler::Flush(bool release_fence, VkSemaphore semaphore) { +u64 VKScheduler::CurrentTick() const noexcept { + return master_semaphore->CurrentTick(); +} + +bool VKScheduler::IsFree(u64 tick) const noexcept { + return master_semaphore->IsFree(tick); +} + +void VKScheduler::Wait(u64 tick) { + master_semaphore->Wait(tick); +} + +void VKScheduler::Flush(VkSemaphore semaphore) { SubmitExecution(semaphore); - if (release_fence) { - current_fence->Release(); - } AllocateNewContext(); } -void VKScheduler::Finish(bool release_fence, VkSemaphore semaphore) { +void VKScheduler::Finish(VkSemaphore semaphore) { + const u64 presubmit_tick = CurrentTick(); SubmitExecution(semaphore); - current_fence->Wait(); - if (release_fence) { - current_fence->Release(); - } + Wait(presubmit_tick); AllocateNewContext(); } @@ -160,18 +168,38 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) { current_cmdbuf.End(); + const VkSemaphore timeline_semaphore = master_semaphore->Handle(); + const u32 num_signal_semaphores = semaphore ? 2U : 1U; + + const u64 signal_value = master_semaphore->CurrentTick(); + const u64 wait_value = signal_value - 1; + const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; + + master_semaphore->NextTick(); + + const std::array signal_values{signal_value, u64(0)}; + const std::array signal_semaphores{timeline_semaphore, semaphore}; + + const VkTimelineSemaphoreSubmitInfoKHR timeline_si{ + .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR, + .pNext = nullptr, + .waitSemaphoreValueCount = 1, + .pWaitSemaphoreValues = &wait_value, + .signalSemaphoreValueCount = num_signal_semaphores, + .pSignalSemaphoreValues = signal_values.data(), + }; const VkSubmitInfo submit_info{ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - .pNext = nullptr, - .waitSemaphoreCount = 0, - .pWaitSemaphores = nullptr, - .pWaitDstStageMask = nullptr, + .pNext = &timeline_si, + .waitSemaphoreCount = 1, + .pWaitSemaphores = &timeline_semaphore, + .pWaitDstStageMask = &wait_stage_mask, .commandBufferCount = 1, .pCommandBuffers = current_cmdbuf.address(), - .signalSemaphoreCount = semaphore ? 1U : 0U, - .pSignalSemaphores = &semaphore, + .signalSemaphoreCount = num_signal_semaphores, + .pSignalSemaphores = signal_semaphores.data(), }; - switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) { + switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) { case VK_SUCCESS: break; case VK_ERROR_DEVICE_LOST: @@ -183,14 +211,9 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) { } void VKScheduler::AllocateNewContext() { - ++ticks; - std::unique_lock lock{mutex}; - current_fence = next_fence; - next_fence = &resource_manager.CommitFence(); - current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence), - device.GetDispatchLoader()); + current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader()); current_cmdbuf.Begin({ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .pNext = nullptr, -- cgit v1.2.3