summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt26
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h126
-rw-r--r--src/video_core/buffer_cache/map_interval.h18
-rw-r--r--src/video_core/dma_pusher.cpp31
-rw-r--r--src/video_core/dma_pusher.h1
-rw-r--r--src/video_core/engines/fermi_2d.cpp8
-rw-r--r--src/video_core/engines/fermi_2d.h3
-rw-r--r--src/video_core/engines/kepler_compute.cpp7
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp7
-rw-r--r--src/video_core/engines/kepler_memory.h3
-rw-r--r--src/video_core/engines/maxwell_3d.cpp90
-rw-r--r--src/video_core/engines/maxwell_3d.h15
-rw-r--r--src/video_core/engines/maxwell_dma.cpp18
-rw-r--r--src/video_core/engines/maxwell_dma.h3
-rw-r--r--src/video_core/engines/shader_bytecode.h7
-rw-r--r--src/video_core/fence_manager.h172
-rw-r--r--src/video_core/gpu.cpp84
-rw-r--r--src/video_core/gpu.h39
-rw-r--r--src/video_core/gpu_asynch.cpp4
-rw-r--r--src/video_core/gpu_asynch.h2
-rw-r--r--src/video_core/gpu_thread.cpp39
-rw-r--r--src/video_core/gpu_thread.h11
-rw-r--r--src/video_core/memory_manager.cpp18
-rw-r--r--src/video_core/query_cache.h46
-rw-r--r--src/video_core/rasterizer_interface.h21
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h2
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp72
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h53
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp164
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp99
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp68
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h33
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp512
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h372
-rw-r--r--src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp220
-rw-r--r--src/video_core/renderer_vulkan/nsight_aftermath_tracker.h87
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp3
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp36
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h19
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp101
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h74
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp96
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp118
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h62
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp218
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h17
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp53
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.h59
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp83
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp76
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h1
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp58
-rw-r--r--src/video_core/renderer_vulkan/wrapper.h64
-rw-r--r--src/video_core/shader/control_flow.cpp16
-rw-r--r--src/video_core/shader/decode.cpp30
-rw-r--r--src/video_core/shader/decode/arithmetic_half.cpp51
-rw-r--r--src/video_core/shader/decode/arithmetic_integer.cpp35
-rw-r--r--src/video_core/shader/decode/image.cpp18
-rw-r--r--src/video_core/shader/decode/memory.cpp2
-rw-r--r--src/video_core/shader/decode/register_set_predicate.cpp52
-rw-r--r--src/video_core/shader/decode/texture.cpp203
-rw-r--r--src/video_core/shader/memory_util.cpp77
-rw-r--r--src/video_core/shader/memory_util.h47
-rw-r--r--src/video_core/shader/node.h131
-rw-r--r--src/video_core/shader/shader_ir.h37
-rw-r--r--src/video_core/shader/track.cpp20
-rw-r--r--src/video_core/texture_cache/surface_base.h18
-rw-r--r--src/video_core/texture_cache/surface_params.cpp10
-rw-r--r--src/video_core/texture_cache/texture_cache.h182
-rw-r--r--src/video_core/textures/decoders.cpp3
-rw-r--r--src/video_core/textures/decoders.h5
101 files changed, 3195 insertions, 1507 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 258d58eba..ff53282c9 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -23,6 +23,7 @@ add_library(video_core STATIC
engines/shader_bytecode.h
engines/shader_header.h
engines/shader_type.h
+ fence_manager.h
gpu.cpp
gpu.h
gpu_asynch.cpp
@@ -51,6 +52,8 @@ add_library(video_core STATIC
renderer_opengl/gl_buffer_cache.h
renderer_opengl/gl_device.cpp
renderer_opengl/gl_device.h
+ renderer_opengl/gl_fence_manager.cpp
+ renderer_opengl/gl_fence_manager.h
renderer_opengl/gl_framebuffer_cache.cpp
renderer_opengl/gl_framebuffer_cache.h
renderer_opengl/gl_rasterizer.cpp
@@ -121,6 +124,8 @@ add_library(video_core STATIC
shader/decode.cpp
shader/expr.cpp
shader/expr.h
+ shader/memory_util.cpp
+ shader/memory_util.h
shader/node_helper.cpp
shader/node_helper.h
shader/node.h
@@ -160,6 +165,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/fixed_pipeline_state.h
renderer_vulkan/maxwell_to_vk.cpp
renderer_vulkan/maxwell_to_vk.h
+ renderer_vulkan/nsight_aftermath_tracker.cpp
+ renderer_vulkan/nsight_aftermath_tracker.h
renderer_vulkan/renderer_vulkan.h
renderer_vulkan/renderer_vulkan.cpp
renderer_vulkan/vk_blit_screen.cpp
@@ -174,6 +181,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/vk_descriptor_pool.h
renderer_vulkan/vk_device.cpp
renderer_vulkan/vk_device.h
+ renderer_vulkan/vk_fence_manager.cpp
+ renderer_vulkan/vk_fence_manager.h
renderer_vulkan/vk_graphics_pipeline.cpp
renderer_vulkan/vk_graphics_pipeline.h
renderer_vulkan/vk_image.cpp
@@ -213,19 +222,30 @@ if (ENABLE_VULKAN)
renderer_vulkan/wrapper.cpp
renderer_vulkan/wrapper.h
)
-
- target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
- target_compile_definitions(video_core PRIVATE HAS_VULKAN)
endif()
create_target_directory_groups(video_core)
target_link_libraries(video_core PUBLIC common core)
target_link_libraries(video_core PRIVATE glad)
+
if (ENABLE_VULKAN)
+ target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
+ target_compile_definitions(video_core PRIVATE HAS_VULKAN)
target_link_libraries(video_core PRIVATE sirit)
endif()
+if (ENABLE_NSIGHT_AFTERMATH)
+ if (NOT DEFINED ENV{NSIGHT_AFTERMATH_SDK})
+ message(ERROR "Environment variable NSIGHT_AFTERMATH_SDK has to be provided")
+ endif()
+ if (NOT WIN32)
+ message(ERROR "Nsight Aftermath doesn't support non-Windows platforms")
+ endif()
+ target_compile_definitions(video_core PRIVATE HAS_NSIGHT_AFTERMATH)
+ target_include_directories(video_core PRIVATE "$ENV{NSIGHT_AFTERMATH_SDK}/include")
+endif()
+
if (MSVC)
target_compile_options(video_core PRIVATE /we4267)
else()
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 83e7a1cde..56e570994 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -4,7 +4,7 @@
#pragma once
-#include <array>
+#include <list>
#include <memory>
#include <mutex>
#include <unordered_map>
@@ -18,8 +18,10 @@
#include "common/alignment.h"
#include "common/common_types.h"
+#include "common/logging/log.h"
#include "core/core.h"
#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/buffer_cache/buffer_block.h"
#include "video_core/buffer_cache/map_interval.h"
#include "video_core/memory_manager.h"
@@ -79,14 +81,13 @@ public:
auto map = MapAddress(block, gpu_addr, cpu_addr, size);
if (is_written) {
map->MarkAsModified(true, GetModifiedTicks());
+ if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
+ MarkForAsyncFlush(map);
+ }
if (!map->IsWritten()) {
map->MarkAsWritten(true);
MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
}
- } else {
- if (map->IsWritten()) {
- WriteBarrier();
- }
}
return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))};
@@ -137,11 +138,22 @@ public:
});
for (auto& object : objects) {
if (object->IsModified() && object->IsRegistered()) {
+ mutex.unlock();
FlushMap(object);
+ mutex.lock();
}
}
}
+ bool MustFlushRegion(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ const std::vector<MapInterval> objects = GetMapsInRange(addr, size);
+ return std::any_of(objects.cbegin(), objects.cend(), [](const MapInterval& map) {
+ return map->IsModified() && map->IsRegistered();
+ });
+ }
+
/// Mark the specified region as being invalidated
void InvalidateRegion(VAddr addr, u64 size) {
std::lock_guard lock{mutex};
@@ -154,6 +166,77 @@ public:
}
}
+ void OnCPUWrite(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& object : GetMapsInRange(addr, size)) {
+ if (object->IsMemoryMarked() && object->IsRegistered()) {
+ UnmarkMemory(object);
+ object->SetSyncPending(true);
+ marked_for_unregister.emplace_back(object);
+ }
+ }
+ }
+
+ void SyncGuestHost() {
+ std::lock_guard lock{mutex};
+
+ for (const auto& object : marked_for_unregister) {
+ if (object->IsRegistered()) {
+ object->SetSyncPending(false);
+ Unregister(object);
+ }
+ }
+ marked_for_unregister.clear();
+ }
+
+ void CommitAsyncFlushes() {
+ if (uncommitted_flushes) {
+ auto commit_list = std::make_shared<std::list<MapInterval>>();
+ for (auto& map : *uncommitted_flushes) {
+ if (map->IsRegistered() && map->IsModified()) {
+ // TODO(Blinkhawk): Implement backend asynchronous flushing
+ // AsyncFlushMap(map)
+ commit_list->push_back(map);
+ }
+ }
+ if (!commit_list->empty()) {
+ committed_flushes.push_back(commit_list);
+ } else {
+ committed_flushes.emplace_back();
+ }
+ } else {
+ committed_flushes.emplace_back();
+ }
+ uncommitted_flushes.reset();
+ }
+
+ bool ShouldWaitAsyncFlushes() const {
+ return !committed_flushes.empty() && committed_flushes.front() != nullptr;
+ }
+
+ bool HasUncommittedFlushes() const {
+ return uncommitted_flushes != nullptr;
+ }
+
+ void PopAsyncFlushes() {
+ if (committed_flushes.empty()) {
+ return;
+ }
+ auto& flush_list = committed_flushes.front();
+ if (!flush_list) {
+ committed_flushes.pop_front();
+ return;
+ }
+ for (MapInterval& map : *flush_list) {
+ if (map->IsRegistered()) {
+ // TODO(Blinkhawk): Replace this for reading the asynchronous flush
+ FlushMap(map);
+ }
+ }
+ committed_flushes.pop_front();
+ }
+
virtual BufferType GetEmptyBuffer(std::size_t size) = 0;
protected:
@@ -166,8 +249,6 @@ protected:
virtual BufferType ToHandle(const OwnerBuffer& storage) = 0;
- virtual void WriteBarrier() = 0;
-
virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
@@ -196,17 +277,30 @@ protected:
const IntervalType interval{new_map->GetStart(), new_map->GetEnd()};
mapped_addresses.insert({interval, new_map});
rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
+ new_map->SetMemoryMarked(true);
if (inherit_written) {
MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1);
new_map->MarkAsWritten(true);
}
}
- /// Unregisters an object from the cache
- void Unregister(MapInterval& map) {
+ void UnmarkMemory(const MapInterval& map) {
+ if (!map->IsMemoryMarked()) {
+ return;
+ }
const std::size_t size = map->GetEnd() - map->GetStart();
rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1);
+ map->SetMemoryMarked(false);
+ }
+
+ /// Unregisters an object from the cache
+ void Unregister(const MapInterval& map) {
+ UnmarkMemory(map);
map->MarkAsRegistered(false);
+ if (map->IsSyncPending()) {
+ marked_for_unregister.remove(map);
+ map->SetSyncPending(false);
+ }
if (map->IsWritten()) {
UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
}
@@ -264,6 +358,9 @@ private:
MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr);
if (modified_inheritance) {
new_map->MarkAsModified(true, GetModifiedTicks());
+ if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
+ MarkForAsyncFlush(new_map);
+ }
}
Register(new_map, write_inheritance);
return new_map;
@@ -450,6 +547,13 @@ private:
return false;
}
+ void MarkForAsyncFlush(MapInterval& map) {
+ if (!uncommitted_flushes) {
+ uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval>>();
+ }
+ uncommitted_flushes->insert(map);
+ }
+
VideoCore::RasterizerInterface& rasterizer;
Core::System& system;
@@ -479,6 +583,10 @@ private:
u64 modified_ticks = 0;
std::vector<u8> staging_buffer;
+ std::list<MapInterval> marked_for_unregister;
+
+ std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes;
std::recursive_mutex mutex;
};
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
index b0956029d..29d8b26f3 100644
--- a/src/video_core/buffer_cache/map_interval.h
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -46,6 +46,22 @@ public:
return is_registered;
}
+ void SetMemoryMarked(bool is_memory_marked_) {
+ is_memory_marked = is_memory_marked_;
+ }
+
+ bool IsMemoryMarked() const {
+ return is_memory_marked;
+ }
+
+ void SetSyncPending(bool is_sync_pending_) {
+ is_sync_pending = is_sync_pending_;
+ }
+
+ bool IsSyncPending() const {
+ return is_sync_pending;
+ }
+
VAddr GetStart() const {
return start;
}
@@ -83,6 +99,8 @@ private:
bool is_written{};
bool is_modified{};
bool is_registered{};
+ bool is_memory_marked{};
+ bool is_sync_pending{};
u64 ticks{};
};
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 0b77afc71..16311f05e 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -21,6 +21,7 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
void DmaPusher::DispatchCalls() {
MICROPROFILE_SCOPE(DispatchCalls);
+ gpu.SyncGuestHost();
// On entering GPU code, assume all memory may be touched by the ARM core.
gpu.Maxwell3D().OnMemoryWrite();
@@ -32,6 +33,8 @@ void DmaPusher::DispatchCalls() {
}
}
gpu.FlushCommands();
+ gpu.SyncGuestHost();
+ gpu.OnCommandListEnd();
}
bool DmaPusher::Step() {
@@ -68,16 +71,22 @@ bool DmaPusher::Step() {
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
command_list_header.size * sizeof(u32));
- for (const CommandHeader& command_header : command_headers) {
+ for (std::size_t index = 0; index < command_headers.size();) {
+ const CommandHeader& command_header = command_headers[index];
- // now, see if we're in the middle of a command
- if (dma_state.length_pending) {
- // Second word of long non-inc methods command - method count
- dma_state.length_pending = 0;
- dma_state.method_count = command_header.method_count_;
- } else if (dma_state.method_count) {
+ if (dma_state.method_count) {
// Data word of methods command
- CallMethod(command_header.argument);
+ if (dma_state.non_incrementing) {
+ const u32 max_write = static_cast<u32>(
+ std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
+ index);
+ CallMultiMethod(&command_header.argument, max_write);
+ dma_state.method_count -= max_write;
+ index += max_write;
+ continue;
+ } else {
+ CallMethod(command_header.argument);
+ }
if (!dma_state.non_incrementing) {
dma_state.method++;
@@ -117,6 +126,7 @@ bool DmaPusher::Step() {
break;
}
}
+ index++;
}
if (!non_main) {
@@ -137,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const {
gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
}
+void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
+ gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
+ dma_state.method_count);
+}
+
} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index d6188614a..6cef71306 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,7 @@ private:
void SetState(const CommandHeader& command_header);
void CallMethod(u32 argument) const;
+ void CallMultiMethod(const u32* base_start, u32 num_methods) const;
std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 85d308e26..8a47614d2 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -28,7 +28,13 @@ void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
}
}
-std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
+void Fermi2D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
+static std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
const u32 line_a = src_2 - src_1;
const u32 line_b = dst_2 - dst_1;
const u32 excess = std::max<s32>(0, line_a - src_line + src_1);
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index dba342c70..939a5966d 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -39,6 +39,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
enum class Origin : u32 {
Center = 0,
Corner = 1,
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 368c75a66..00a12175f 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -51,6 +51,13 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
ASSERT(cbuf_mask[regs.tex_cb_index]);
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index eeb79c56f..fe55fdfd0 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -202,6 +202,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
Texture::FullTextureInfo GetTexture(std::size_t offset) const;
/// Given a texture handle, returns the TSC and TIC entries.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 597872e43..586ff15dc 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -41,4 +41,11 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
} // namespace Tegra::Engines
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 396fb6e86..bb26fb030 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -40,6 +40,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr size_t NUM_REGS = 0x7F;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index ba63b44b4..7db055ea0 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -92,6 +92,10 @@ void Maxwell3D::InitializeRegisterDefaults() {
color_mask.A.Assign(1);
}
+ for (auto& format : regs.vertex_attrib_format) {
+ format.constant.Assign(1);
+ }
+
// NVN games expect these values to be enabled at boot
regs.rasterize_enable = 1;
regs.rt_separate_frag_data = 1;
@@ -180,6 +184,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
}
switch (method) {
+ case MAXWELL3D_REG_INDEX(wait_for_idle): {
+ rasterizer.WaitForIdle();
+ break;
+ }
case MAXWELL3D_REG_INDEX(shadow_ram_control): {
shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(method_call.argument);
break;
@@ -276,6 +284,58 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ // Methods after 0xE00 are special, they're actually triggers for some microcode that was
+ // uploaded to the GPU during initialization.
+ if (method >= MacroRegistersStart) {
+ // We're trying to execute a macro
+ if (executing_macro == 0) {
+ // A macro call must begin by writing the macro method's register, not its argument.
+ ASSERT_MSG((method % 2) == 0,
+ "Can't start macro execution by writing to the ARGS register");
+ executing_macro = method;
+ }
+
+ for (std::size_t i = 0; i < amount; i++) {
+ macro_params.push_back(base_start[i]);
+ }
+
+ // Call the macro when there are no more parameters in the command buffer
+ if (amount == methods_pending) {
+ CallMacroMethod(executing_macro, macro_params.size(), macro_params.data());
+ macro_params.clear();
+ }
+ return;
+ }
+ switch (method) {
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
+ ProcessCBMultiData(method, base_start, amount);
+ break;
+ }
+ default: {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+ }
+ }
+}
+
void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
if (mme_draw.current_mode == MMEDrawMode::Undefined) {
if (mme_draw.gl_begin_consume) {
@@ -400,7 +460,11 @@ void Maxwell3D::ProcessQueryGet() {
switch (regs.query.query_get.operation) {
case Regs::QueryOperation::Release:
- StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
+ if (regs.query.query_get.fence == 1) {
+ rasterizer.SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence);
+ } else {
+ StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
+ }
break;
case Regs::QueryOperation::Acquire:
// TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
@@ -479,7 +543,7 @@ void Maxwell3D::ProcessSyncPoint() {
const u32 increment = regs.sync_info.increment.Value();
[[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value();
if (increment) {
- system.GPU().IncrementSyncPoint(sync_point);
+ rasterizer.SignalSyncPoint(sync_point);
}
}
@@ -562,6 +626,28 @@ void Maxwell3D::StartCBData(u32 method) {
ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
}
+void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) {
+ if (cb_data_state.current != method) {
+ if (cb_data_state.current != null_cb_data) {
+ FinishCBData();
+ }
+ constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]);
+ cb_data_state.start_pos = regs.const_buffer.cb_pos;
+ cb_data_state.id = method - first_cb_data;
+ cb_data_state.current = method;
+ cb_data_state.counter = 0;
+ }
+ const std::size_t id = cb_data_state.id;
+ const std::size_t size = amount;
+ std::size_t i = 0;
+ for (; i < size; i++) {
+ cb_data_state.buffer[id][cb_data_state.counter] = start_base[i];
+ cb_data_state.counter++;
+ }
+ // Increment the current buffer position.
+ regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount;
+}
+
void Maxwell3D::FinishCBData() {
// Write the input value to the current const buffer at the current position.
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 5cf6a4cc3..864924ff3 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -709,7 +709,9 @@ public:
union {
struct {
- INSERT_UNION_PADDING_WORDS(0x45);
+ INSERT_UNION_PADDING_WORDS(0x44);
+
+ u32 wait_for_idle;
struct {
u32 upload_address;
@@ -1149,7 +1151,7 @@ public:
/// Returns whether the vertex array specified by index is supposed to be
/// accessed per instance or not.
- bool IsInstancingEnabled(u32 index) const {
+ bool IsInstancingEnabled(std::size_t index) const {
return is_instanced[index];
}
} instanced_arrays;
@@ -1179,6 +1181,7 @@ public:
BitField<0, 1, u32> depth_range_0_1;
BitField<3, 1, u32> depth_clamp_near;
BitField<4, 1, u32> depth_clamp_far;
+ BitField<11, 1, u32> depth_clamp_disabled;
} view_volume_clip_control;
INSERT_UNION_PADDING_WORDS(0x1F);
@@ -1259,7 +1262,8 @@ public:
GPUVAddr LimitAddress() const {
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
- limit_low);
+ limit_low) +
+ 1;
}
} vertex_array_limit[NumVertexArrays];
@@ -1358,6 +1362,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
/// Write the value to the register identified by method.
void CallMethodFromMME(const GPU::MethodCall& method_call);
@@ -1511,6 +1518,7 @@ private:
/// Handles a write to the CB_DATA[i] register.
void StartCBData(u32 method);
void ProcessCBData(u32 value);
+ void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount);
void FinishCBData();
/// Handles a write to the CB_BIND register.
@@ -1530,6 +1538,7 @@ private:
static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \
"Field " #field_name " has invalid position")
+ASSERT_REG_POSITION(wait_for_idle, 0x44);
ASSERT_REG_POSITION(macros, 0x45);
ASSERT_REG_POSITION(shadow_ram_control, 0x49);
ASSERT_REG_POSITION(upload, 0x60);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index c2610f992..6630005b0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -36,6 +36,13 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
#undef MAXWELLDMA_REG_INDEX
}
+void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
void MaxwellDMA::HandleCopy() {
LOG_TRACE(HW_GPU, "Requested a DMA copy");
@@ -104,8 +111,13 @@ void MaxwellDMA::HandleCopy() {
write_buffer.resize(dst_size);
}
- memory_manager.ReadBlock(source, read_buffer.data(), src_size);
- memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
+ if (Settings::IsGPULevelExtreme()) {
+ memory_manager.ReadBlock(source, read_buffer.data(), src_size);
+ memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
+ } else {
+ memory_manager.ReadBlockUnsafe(source, read_buffer.data(), src_size);
+ memory_manager.ReadBlockUnsafe(dest, write_buffer.data(), dst_size);
+ }
Texture::UnswizzleSubrect(
regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel,
@@ -136,7 +148,7 @@ void MaxwellDMA::HandleCopy() {
write_buffer.resize(dst_size);
}
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
memory_manager.ReadBlock(source, read_buffer.data(), src_size);
memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
} else {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 4f40d1d1f..c43ed8194 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -35,6 +35,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr std::size_t NUM_REGS = 0x1D6;
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 7231597d4..8dae754d4 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -655,6 +655,7 @@ union Instruction {
}
constexpr Instruction(u64 value) : value{value} {}
+ constexpr Instruction(const Instruction& instr) : value(instr.value) {}
BitField<0, 8, Register> gpr0;
BitField<8, 8, Register> gpr8;
@@ -813,15 +814,17 @@ union Instruction {
} alu_integer;
union {
+ BitField<43, 1, u64> x;
+ } iadd;
+
+ union {
BitField<39, 1, u64> ftz;
BitField<32, 1, u64> saturate;
BitField<49, 2, HalfMerge> merge;
- BitField<43, 1, u64> negate_a;
BitField<44, 1, u64> abs_a;
BitField<47, 2, HalfType> type_a;
- BitField<31, 1, u64> negate_b;
BitField<30, 1, u64> abs_b;
BitField<28, 2, HalfType> type_b;
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
new file mode 100644
index 000000000..8b2a6a42c
--- /dev/null
+++ b/src/video_core/fence_manager.h
@@ -0,0 +1,172 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <queue>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/core.h"
+#include "core/memory.h"
+#include "core/settings.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace VideoCommon {
+
+class FenceBase {
+public:
+ FenceBase(u32 payload, bool is_stubbed)
+ : address{}, payload{payload}, is_semaphore{false}, is_stubbed{is_stubbed} {}
+
+ FenceBase(GPUVAddr address, u32 payload, bool is_stubbed)
+ : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {}
+
+ GPUVAddr GetAddress() const {
+ return address;
+ }
+
+ u32 GetPayload() const {
+ return payload;
+ }
+
+ bool IsSemaphore() const {
+ return is_semaphore;
+ }
+
+private:
+ GPUVAddr address;
+ u32 payload;
+ bool is_semaphore;
+
+protected:
+ bool is_stubbed;
+};
+
+template <typename TFence, typename TTextureCache, typename TTBufferCache, typename TQueryCache>
+class FenceManager {
+public:
+ void SignalSemaphore(GPUVAddr addr, u32 value) {
+ TryReleasePendingFences();
+ const bool should_flush = ShouldFlush();
+ CommitAsyncFlushes();
+ TFence new_fence = CreateFence(addr, value, !should_flush);
+ fences.push(new_fence);
+ QueueFence(new_fence);
+ if (should_flush) {
+ rasterizer.FlushCommands();
+ }
+ rasterizer.SyncGuestHost();
+ }
+
+ void SignalSyncPoint(u32 value) {
+ TryReleasePendingFences();
+ const bool should_flush = ShouldFlush();
+ CommitAsyncFlushes();
+ TFence new_fence = CreateFence(value, !should_flush);
+ fences.push(new_fence);
+ QueueFence(new_fence);
+ if (should_flush) {
+ rasterizer.FlushCommands();
+ }
+ rasterizer.SyncGuestHost();
+ }
+
+ void WaitPendingFences() {
+ auto& gpu{system.GPU()};
+ auto& memory_manager{gpu.MemoryManager()};
+ while (!fences.empty()) {
+ TFence& current_fence = fences.front();
+ if (ShouldWait()) {
+ WaitFence(current_fence);
+ }
+ PopAsyncFlushes();
+ if (current_fence->IsSemaphore()) {
+ memory_manager.template Write<u32>(current_fence->GetAddress(),
+ current_fence->GetPayload());
+ } else {
+ gpu.IncrementSyncPoint(current_fence->GetPayload());
+ }
+ fences.pop();
+ }
+ }
+
+protected:
+ FenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ TTextureCache& texture_cache, TTBufferCache& buffer_cache,
+ TQueryCache& query_cache)
+ : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache},
+ buffer_cache{buffer_cache}, query_cache{query_cache} {}
+
+ virtual ~FenceManager() {}
+
+ /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
+ /// true
+ virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
+ /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
+ virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
+ /// Queues a fence into the backend if the fence isn't stubbed.
+ virtual void QueueFence(TFence& fence) = 0;
+ /// Notifies that the backend fence has been signaled/reached in host GPU.
+ virtual bool IsFenceSignaled(TFence& fence) const = 0;
+ /// Waits until a fence has been signalled by the host GPU.
+ virtual void WaitFence(TFence& fence) = 0;
+
+ Core::System& system;
+ VideoCore::RasterizerInterface& rasterizer;
+ TTextureCache& texture_cache;
+ TTBufferCache& buffer_cache;
+ TQueryCache& query_cache;
+
+private:
+ void TryReleasePendingFences() {
+ auto& gpu{system.GPU()};
+ auto& memory_manager{gpu.MemoryManager()};
+ while (!fences.empty()) {
+ TFence& current_fence = fences.front();
+ if (ShouldWait() && !IsFenceSignaled(current_fence)) {
+ return;
+ }
+ PopAsyncFlushes();
+ if (current_fence->IsSemaphore()) {
+ memory_manager.template Write<u32>(current_fence->GetAddress(),
+ current_fence->GetPayload());
+ } else {
+ gpu.IncrementSyncPoint(current_fence->GetPayload());
+ }
+ fences.pop();
+ }
+ }
+
+ bool ShouldWait() const {
+ return texture_cache.ShouldWaitAsyncFlushes() || buffer_cache.ShouldWaitAsyncFlushes() ||
+ query_cache.ShouldWaitAsyncFlushes();
+ }
+
+ bool ShouldFlush() const {
+ return texture_cache.HasUncommittedFlushes() || buffer_cache.HasUncommittedFlushes() ||
+ query_cache.HasUncommittedFlushes();
+ }
+
+ void PopAsyncFlushes() {
+ texture_cache.PopAsyncFlushes();
+ buffer_cache.PopAsyncFlushes();
+ query_cache.PopAsyncFlushes();
+ }
+
+ void CommitAsyncFlushes() {
+ texture_cache.CommitAsyncFlushes();
+ buffer_cache.CommitAsyncFlushes();
+ query_cache.CommitAsyncFlushes();
+ }
+
+ std::queue<TFence> fences;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index a606f4abd..b87fd873d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -9,6 +9,7 @@
#include "core/core_timing_util.h"
#include "core/frontend/emu_window.h"
#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/kepler_memory.h"
@@ -125,6 +126,28 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
return true;
}
+u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
+ std::unique_lock lck{flush_request_mutex};
+ const u64 fence = ++last_flush_fence;
+ flush_requests.emplace_back(fence, addr, size);
+ return fence;
+}
+
+void GPU::TickWork() {
+ std::unique_lock lck{flush_request_mutex};
+ while (!flush_requests.empty()) {
+ auto& request = flush_requests.front();
+ const u64 fence = request.fence;
+ const VAddr addr = request.addr;
+ const std::size_t size = request.size;
+ flush_requests.pop_front();
+ flush_request_mutex.unlock();
+ renderer->Rasterizer().FlushRegion(addr, size);
+ current_flush_fence.store(fence);
+ flush_request_mutex.lock();
+ }
+}
+
u64 GPU::GetTicks() const {
// This values were reversed engineered by fincs from NVN
// The gpu clock is reported in units of 385/625 nanoseconds
@@ -132,7 +155,10 @@ u64 GPU::GetTicks() const {
constexpr u64 gpu_ticks_den = 625;
const u64 cpu_ticks = system.CoreTiming().GetTicks();
- const u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ if (Settings::values.use_fast_gpu_time) {
+ nanoseconds /= 256;
+ }
const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
@@ -142,6 +168,13 @@ void GPU::FlushCommands() {
renderer->Rasterizer().FlushCommands();
}
+void GPU::SyncGuestHost() {
+ renderer->Rasterizer().SyncGuestHost();
+}
+
+void GPU::OnCommandListEnd() {
+ renderer->Rasterizer().ReleaseFences();
+}
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
// So the values you see in docs might be multiplied by 4.
@@ -180,16 +213,32 @@ void GPU::CallMethod(const MethodCall& method_call) {
ASSERT(method_call.subchannel < bound_engines.size());
- if (ExecuteMethodOnEngine(method_call)) {
+ if (ExecuteMethodOnEngine(method_call.method)) {
CallEngineMethod(method_call);
} else {
CallPullerMethod(method_call);
}
}
-bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) {
- const auto method = static_cast<BufferMethods>(method_call.method);
- return method >= BufferMethods::NonPullerMethods;
+void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+
+ ASSERT(subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method)) {
+ CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
+ } else {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallPullerMethod(
+ {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
+ }
+ }
+}
+
+bool GPU::ExecuteMethodOnEngine(u32 method) {
+ const auto buffer_method = static_cast<BufferMethods>(method);
+ return buffer_method >= BufferMethods::NonPullerMethods;
}
void GPU::CallPullerMethod(const MethodCall& method_call) {
@@ -269,6 +318,31 @@ void GPU::CallEngineMethod(const MethodCall& method_call) {
}
}
+void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ const EngineID engine = bound_engines[subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_B:
+ maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
void GPU::ProcessBindMethod(const MethodCall& method_call) {
// Bind the current subchannel to the desired engine id.
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 1a2d747be..dd51c95b7 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -155,7 +155,27 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
+ /// Calls a GPU multivalue method.
+ void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
+ /// Flush all current written commands into the host GPU for execution.
void FlushCommands();
+ /// Synchronizes CPU writes with Host GPU memory.
+ void SyncGuestHost();
+ /// Signal the ending of command list.
+ virtual void OnCommandListEnd();
+
+ /// Request a host GPU memory flush from the CPU.
+ u64 RequestFlush(VAddr addr, std::size_t size);
+
+ /// Obtains current flush request fence id.
+ u64 CurrentFlushRequestFence() const {
+ return current_flush_fence.load(std::memory_order_relaxed);
+ }
+
+ /// Tick pending requests within the GPU.
+ void TickWork();
/// Returns a reference to the Maxwell3D GPU engine.
Engines::Maxwell3D& Maxwell3D();
@@ -293,8 +313,12 @@ private:
/// Calls a GPU engine method.
void CallEngineMethod(const MethodCall& method_call);
+ /// Calls a GPU engine multivalue method.
+ void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
/// Determines where the method should be executed.
- bool ExecuteMethodOnEngine(const MethodCall& method_call);
+ bool ExecuteMethodOnEngine(u32 method);
protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
@@ -325,6 +349,19 @@ private:
std::condition_variable sync_cv;
+ struct FlushRequest {
+ FlushRequest(u64 fence, VAddr addr, std::size_t size)
+ : fence{fence}, addr{addr}, size{size} {}
+ u64 fence;
+ VAddr addr;
+ std::size_t size;
+ };
+
+ std::list<FlushRequest> flush_requests;
+ std::atomic<u64> current_flush_fence{};
+ u64 last_flush_fence{};
+ std::mutex flush_request_mutex;
+
const bool is_async;
};
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index 20e73a37e..53305ab43 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -52,4 +52,8 @@ void GPUAsynch::WaitIdle() const {
gpu_thread.WaitIdle();
}
+void GPUAsynch::OnCommandListEnd() {
+ gpu_thread.OnCommandListEnd();
+}
+
} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 03fd0eef0..517658612 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -32,6 +32,8 @@ public:
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void WaitIdle() const override;
+ void OnCommandListEnd() override;
+
protected:
void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override;
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 10cda686b..c3bb4fe06 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -6,6 +6,7 @@
#include "common/microprofile.h"
#include "core/core.h"
#include "core/frontend/emu_window.h"
+#include "core/settings.h"
#include "video_core/dma_pusher.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
@@ -14,8 +15,9 @@
namespace VideoCommon::GPUThread {
/// Runs the GPU thread
-static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher, SynchState& state) {
+static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
+ Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher,
+ SynchState& state) {
MicroProfileOnThreadCreate("GpuThread");
// Wait for first GPU command before acquiring the window context
@@ -37,10 +39,14 @@ static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::Graphic
dma_pusher.DispatchCalls();
} else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) {
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
+ } else if (const auto data = std::get_if<OnCommandListEndCommand>(&next.data)) {
+ renderer.Rasterizer().ReleaseFences();
+ } else if (const auto data = std::get_if<GPUTickCommand>(&next.data)) {
+ system.GPU().TickWork();
} else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) {
renderer.Rasterizer().FlushRegion(data->addr, data->size);
} else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) {
- renderer.Rasterizer().InvalidateRegion(data->addr, data->size);
+ renderer.Rasterizer().OnCPUWrite(data->addr, data->size);
} else if (std::holds_alternative<EndProcessingCommand>(next.data)) {
return;
} else {
@@ -65,8 +71,8 @@ ThreadManager::~ThreadManager() {
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
Core::Frontend::GraphicsContext& context,
Tegra::DmaPusher& dma_pusher) {
- thread = std::thread{RunThread, std::ref(renderer), std::ref(context), std::ref(dma_pusher),
- std::ref(state)};
+ thread = std::thread{RunThread, std::ref(system), std::ref(renderer),
+ std::ref(context), std::ref(dma_pusher), std::ref(state)};
}
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
@@ -78,16 +84,29 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
}
void ThreadManager::FlushRegion(VAddr addr, u64 size) {
- PushCommand(FlushRegionCommand(addr, size));
+ if (!Settings::IsGPULevelHigh()) {
+ PushCommand(FlushRegionCommand(addr, size));
+ return;
+ }
+ if (!Settings::IsGPULevelExtreme()) {
+ return;
+ }
+ if (system.Renderer().Rasterizer().MustFlushRegion(addr, size)) {
+ auto& gpu = system.GPU();
+ u64 fence = gpu.RequestFlush(addr, size);
+ PushCommand(GPUTickCommand());
+ while (fence > gpu.CurrentFlushRequestFence()) {
+ }
+ }
}
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
- system.Renderer().Rasterizer().InvalidateRegion(addr, size);
+ system.Renderer().Rasterizer().OnCPUWrite(addr, size);
}
void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
// Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
- InvalidateRegion(addr, size);
+ system.Renderer().Rasterizer().OnCPUWrite(addr, size);
}
void ThreadManager::WaitIdle() const {
@@ -95,6 +114,10 @@ void ThreadManager::WaitIdle() const {
}
}
+void ThreadManager::OnCommandListEnd() {
+ PushCommand(OnCommandListEndCommand());
+}
+
u64 ThreadManager::PushCommand(CommandData&& command_data) {
const u64 fence{++state.last_fence};
state.queue.Push(CommandDataContainer(std::move(command_data), fence));
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index cd74ad330..5a28335d6 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -70,9 +70,16 @@ struct FlushAndInvalidateRegionCommand final {
u64 size;
};
+/// Command called within the gpu, to schedule actions after a command list end
+struct OnCommandListEndCommand final {};
+
+/// Command to make the gpu look into pending requests
+struct GPUTickCommand final {};
+
using CommandData =
std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand,
- InvalidateRegionCommand, FlushAndInvalidateRegionCommand>;
+ InvalidateRegionCommand, FlushAndInvalidateRegionCommand, OnCommandListEndCommand,
+ GPUTickCommand>;
struct CommandDataContainer {
CommandDataContainer() = default;
@@ -122,6 +129,8 @@ public:
// Wait until the gpu thread is idle.
void WaitIdle() const;
+ void OnCommandListEnd();
+
private:
/// Pushes a command to be executed by the GPU thread
u64 PushCommand(CommandData&& command_data);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index fd49bc2a9..dbee9f634 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -51,11 +51,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
- Kernel::Memory::MemoryAttribute::DeviceShared)
- .IsSuccess());
+ ASSERT(
+ system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
return gpu_addr;
}
@@ -66,11 +63,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
const u64 aligned_size{Common::AlignUp(size, page_size)};
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
- Kernel::Memory::MemoryAttribute::DeviceShared)
- .IsSuccess());
+ ASSERT(
+ system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
return gpu_addr;
}
@@ -87,9 +81,7 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
UnmapRange(gpu_addr, aligned_size);
ASSERT(system.CurrentProcess()
->PageTable()
- .SetMemoryAttribute(cpu_addr.value(), size,
- Kernel::Memory::MemoryAttribute::DeviceShared,
- Kernel::Memory::MemoryAttribute::None)
+ .UnlockForDeviceAddressSpace(cpu_addr.value(), size)
.IsSuccess());
return gpu_addr;
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 5ea2b01f2..2f75f8801 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -12,10 +12,12 @@
#include <mutex>
#include <optional>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "common/assert.h"
#include "core/core.h"
+#include "core/settings.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
@@ -130,6 +132,9 @@ public:
}
query->BindCounter(Stream(type).Current(), timestamp);
+ if (Settings::values.use_asynchronous_gpu_emulation) {
+ AsyncFlushQuery(cpu_addr);
+ }
}
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
@@ -170,6 +175,37 @@ public:
return streams[static_cast<std::size_t>(type)];
}
+ void CommitAsyncFlushes() {
+ committed_flushes.push_back(uncommitted_flushes);
+ uncommitted_flushes.reset();
+ }
+
+ bool HasUncommittedFlushes() const {
+ return uncommitted_flushes != nullptr;
+ }
+
+ bool ShouldWaitAsyncFlushes() const {
+ if (committed_flushes.empty()) {
+ return false;
+ }
+ return committed_flushes.front() != nullptr;
+ }
+
+ void PopAsyncFlushes() {
+ if (committed_flushes.empty()) {
+ return;
+ }
+ auto& flush_list = committed_flushes.front();
+ if (!flush_list) {
+ committed_flushes.pop_front();
+ return;
+ }
+ for (VAddr query_address : *flush_list) {
+ FlushAndRemoveRegion(query_address, 4);
+ }
+ committed_flushes.pop_front();
+ }
+
protected:
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
@@ -224,6 +260,13 @@ private:
return found != std::end(contents) ? &*found : nullptr;
}
+ void AsyncFlushQuery(VAddr addr) {
+ if (!uncommitted_flushes) {
+ uncommitted_flushes = std::make_shared<std::unordered_set<VAddr>>();
+ }
+ uncommitted_flushes->insert(addr);
+ }
+
static constexpr std::uintptr_t PAGE_SIZE = 4096;
static constexpr unsigned PAGE_SHIFT = 12;
@@ -235,6 +278,9 @@ private:
std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
+
+ std::shared_ptr<std::unordered_set<VAddr>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::unordered_set<VAddr>>> committed_flushes;
};
template <class QueryCache, class HostCounter>
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 8ae5b9c4e..3cbdac8e7 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -49,19 +49,40 @@ public:
/// Records a GPU query and caches it
virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0;
+ /// Signal a GPU based semaphore as a fence
+ virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0;
+
+ /// Signal a GPU based syncpoint as a fence
+ virtual void SignalSyncPoint(u32 value) = 0;
+
+ /// Release all pending fences.
+ virtual void ReleaseFences() = 0;
+
/// Notify rasterizer that all caches should be flushed to Switch memory
virtual void FlushAll() = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
virtual void FlushRegion(VAddr addr, u64 size) = 0;
+ /// Check if the the specified memory area requires flushing to CPU Memory.
+ virtual bool MustFlushRegion(VAddr addr, u64 size) = 0;
+
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
+ /// Notify rasterizer that any caches of the specified region are desync with guest
+ virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
+
+ /// Sync memory between guest and host.
+ virtual void SyncGuestHost() = 0;
+
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
+ /// Notify the host renderer to wait for previous primitive and compute operations.
+ virtual void WaitForIdle() = 0;
+
/// Notify the rasterizer to send all written commands to the host GPU.
virtual void FlushCommands() = 0;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index cb5792407..d2cab50bd 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -51,10 +51,6 @@ Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
return std::make_shared<CachedBufferBlock>(cpu_addr, size);
}
-void OGLBufferCache::WriteBarrier() {
- glMemoryBarrier(GL_ALL_BARRIER_BITS);
-}
-
GLuint OGLBufferCache::ToHandle(const Buffer& buffer) {
return buffer->GetHandle();
}
@@ -72,6 +68,7 @@ void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, s
void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
u8* data) {
MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
+ glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset),
static_cast<GLsizeiptr>(size), data);
}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index a74817857..a9e86cfc7 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -59,8 +59,6 @@ protected:
GLuint ToHandle(const Buffer& buffer) override;
- void WriteBarrier() override;
-
void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
const u8* data) override;
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
new file mode 100644
index 000000000..99ddcb3f8
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -0,0 +1,72 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+
+#include "video_core/renderer_opengl/gl_fence_manager.h"
+
+namespace OpenGL {
+
+GLInnerFence::GLInnerFence(u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(payload, is_stubbed), sync_object{} {}
+
+GLInnerFence::GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(address, payload, is_stubbed), sync_object{} {}
+
+GLInnerFence::~GLInnerFence() = default;
+
+void GLInnerFence::Queue() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(sync_object.handle == 0);
+ sync_object.Create();
+}
+
+bool GLInnerFence::IsSignaled() const {
+ if (is_stubbed) {
+ return true;
+ }
+ ASSERT(sync_object.handle != 0);
+ GLsizei length;
+ GLint sync_status;
+ glGetSynciv(sync_object.handle, GL_SYNC_STATUS, sizeof(GLint), &length, &sync_status);
+ return sync_status == GL_SIGNALED;
+}
+
+void GLInnerFence::Wait() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(sync_object.handle != 0);
+ glClientWaitSync(sync_object.handle, 0, GL_TIMEOUT_IGNORED);
+}
+
+FenceManagerOpenGL::FenceManagerOpenGL(Core::System& system,
+ VideoCore::RasterizerInterface& rasterizer,
+ TextureCacheOpenGL& texture_cache,
+ OGLBufferCache& buffer_cache, QueryCache& query_cache)
+ : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache) {}
+
+Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) {
+ return std::make_shared<GLInnerFence>(value, is_stubbed);
+}
+
+Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
+ return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
+}
+
+void FenceManagerOpenGL::QueueFence(Fence& fence) {
+ fence->Queue();
+}
+
+bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) const {
+ return fence->IsSignaled();
+}
+
+void FenceManagerOpenGL::WaitFence(Fence& fence) {
+ fence->Wait();
+}
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
new file mode 100644
index 000000000..c917b3343
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -0,0 +1,53 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <glad/glad.h>
+
+#include "common/common_types.h"
+#include "video_core/fence_manager.h"
+#include "video_core/renderer_opengl/gl_buffer_cache.h"
+#include "video_core/renderer_opengl/gl_query_cache.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
+
+namespace OpenGL {
+
+class GLInnerFence : public VideoCommon::FenceBase {
+public:
+ GLInnerFence(u32 payload, bool is_stubbed);
+ GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed);
+ ~GLInnerFence();
+
+ void Queue();
+
+ bool IsSignaled() const;
+
+ void Wait();
+
+private:
+ OGLSync sync_object;
+};
+
+using Fence = std::shared_ptr<GLInnerFence>;
+using GenericFenceManager =
+ VideoCommon::FenceManager<Fence, TextureCacheOpenGL, OGLBufferCache, QueryCache>;
+
+class FenceManagerOpenGL final : public GenericFenceManager {
+public:
+ FenceManagerOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache,
+ QueryCache& query_cache);
+
+protected:
+ Fence CreateFence(u32 value, bool is_stubbed) override;
+ Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ void QueueFence(Fence& fence) override;
+ bool IsFenceSignaled(Fence& fence) const override;
+ void WaitFence(Fence& fence) override;
+};
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 175374f0d..8b3b3ce92 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -59,14 +59,12 @@ constexpr std::size_t NumSupportedVertexAttributes = 16;
template <typename Engine, typename Entry>
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
ShaderType shader_type, std::size_t index = 0) {
- if (entry.IsBindless()) {
- const Tegra::Texture::TextureHandle tex_handle =
- engine.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset());
+ if (entry.is_bindless) {
+ const auto tex_handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
return engine.GetTextureInfo(tex_handle);
}
const auto& gpu_profile = engine.AccessGuestDriverProfile();
- const u32 offset =
- entry.GetOffset() + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
+ const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
return engine.GetStageTexture(shader_type, offset);
} else {
@@ -99,9 +97,10 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind
ScreenInfo& info, GLShader::ProgramManager& program_manager,
StateTracker& state_tracker)
: RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device, state_tracker},
- shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, system{system},
- screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker},
- buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} {
+ shader_cache{*this, system, emu_window, device}, query_cache{system, *this},
+ buffer_cache{*this, system, device, STREAM_BUFFER_SIZE},
+ fence_manager{system, *this, texture_cache, buffer_cache, query_cache}, system{system},
+ screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker} {
CheckExtensions();
}
@@ -185,8 +184,12 @@ void RasterizerOpenGL::SetupVertexBuffer() {
const GPUVAddr start = vertex_array.StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- const u64 size = end - start + 1;
+ ASSERT(end >= start);
+ const u64 size = end - start;
+ if (size == 0) {
+ glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride);
+ continue;
+ }
const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
vertex_array.stride);
@@ -310,8 +313,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
const GPUVAddr start = regs.vertex_array[index].StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- size += end - start + 1;
+ size += end - start;
+ ASSERT(end >= start);
}
return size;
@@ -343,7 +346,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
texture_cache.GuardRenderTargets(true);
- View depth_surface = texture_cache.GetDepthBufferSurface();
+ View depth_surface = texture_cache.GetDepthBufferSurface(true);
const auto& regs = gpu.regs;
UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -352,7 +355,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
FramebufferCacheKey key;
const auto colors_count = static_cast<std::size_t>(regs.rt_control.count);
for (std::size_t index = 0; index < colors_count; ++index) {
- View color_surface{texture_cache.GetColorBufferSurface(index)};
+ View color_surface{texture_cache.GetColorBufferSurface(index, true)};
if (!color_surface) {
continue;
}
@@ -376,28 +379,52 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
}
-void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb,
- bool using_stencil_fb) {
+void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) {
auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
texture_cache.GuardRenderTargets(true);
View color_surface;
- if (using_color_fb) {
+
+ if (using_color) {
+ // Determine if we have to preserve the contents.
+ // First we have to make sure all clear masks are enabled.
+ bool preserve_contents = !regs.clear_buffers.R || !regs.clear_buffers.G ||
+ !regs.clear_buffers.B || !regs.clear_buffers.A;
const std::size_t index = regs.clear_buffers.RT;
- color_surface = texture_cache.GetColorBufferSurface(index);
+ if (regs.clear_flags.scissor) {
+ // Then we have to confirm scissor testing clears the whole image.
+ const auto& scissor = regs.scissor_test[0];
+ preserve_contents |= scissor.min_x > 0;
+ preserve_contents |= scissor.min_y > 0;
+ preserve_contents |= scissor.max_x < regs.rt[index].width;
+ preserve_contents |= scissor.max_y < regs.rt[index].height;
+ }
+
+ color_surface = texture_cache.GetColorBufferSurface(index, preserve_contents);
texture_cache.MarkColorBufferInUse(index);
}
+
View depth_surface;
- if (using_depth_fb || using_stencil_fb) {
- depth_surface = texture_cache.GetDepthBufferSurface();
+ if (using_depth_stencil) {
+ bool preserve_contents = false;
+ if (regs.clear_flags.scissor) {
+ // For depth stencil clears we only have to confirm scissor test covers the whole image.
+ const auto& scissor = regs.scissor_test[0];
+ preserve_contents |= scissor.min_x > 0;
+ preserve_contents |= scissor.min_y > 0;
+ preserve_contents |= scissor.max_x < regs.zeta_width;
+ preserve_contents |= scissor.max_y < regs.zeta_height;
+ }
+
+ depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
texture_cache.MarkDepthBufferInUse();
}
texture_cache.GuardRenderTargets(false);
FramebufferCacheKey key;
- key.colors[0] = color_surface;
- key.zeta = depth_surface;
+ key.colors[0] = std::move(color_surface);
+ key.zeta = std::move(depth_surface);
state_tracker.NotifyFramebuffer();
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
@@ -417,8 +444,7 @@ void RasterizerOpenGL::Clear() {
if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
regs.clear_buffers.A) {
use_color = true;
- }
- if (use_color) {
+
state_tracker.NotifyColorMask0();
glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0,
regs.clear_buffers.B != 0, regs.clear_buffers.A != 0);
@@ -456,7 +482,7 @@ void RasterizerOpenGL::Clear() {
UNIMPLEMENTED_IF(regs.clear_flags.viewport);
- ConfigureClearFramebuffer(use_color, use_depth, use_stencil);
+ ConfigureClearFramebuffer(use_color, use_depth || use_stencil);
if (use_color) {
glClearBufferfv(GL_COLOR, 0, regs.clear_color);
@@ -599,6 +625,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
EndTransformFeedback();
++num_queued_commands;
+
+ system.GPU().TickWork();
}
void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
@@ -649,6 +677,13 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
query_cache.FlushRegion(addr, size);
}
+bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) {
+ if (!Settings::IsGPULevelHigh()) {
+ return buffer_cache.MustFlushRegion(addr, size);
+ }
+ return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
+}
+
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
if (addr == 0 || size == 0) {
@@ -660,13 +695,68 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
query_cache.InvalidateRegion(addr, size);
}
+void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
+ MICROPROFILE_SCOPE(OpenGL_CacheManagement);
+ if (addr == 0 || size == 0) {
+ return;
+ }
+ texture_cache.OnCPUWrite(addr, size);
+ shader_cache.InvalidateRegion(addr, size);
+ buffer_cache.OnCPUWrite(addr, size);
+ query_cache.InvalidateRegion(addr, size);
+}
+
+void RasterizerOpenGL::SyncGuestHost() {
+ MICROPROFILE_SCOPE(OpenGL_CacheManagement);
+ texture_cache.SyncGuestHost();
+ buffer_cache.SyncGuestHost();
+}
+
+void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ auto& memory_manager{gpu.MemoryManager()};
+ memory_manager.Write<u32>(addr, value);
+ return;
+ }
+ fence_manager.SignalSemaphore(addr, value);
+}
+
+void RasterizerOpenGL::SignalSyncPoint(u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ gpu.IncrementSyncPoint(value);
+ return;
+ }
+ fence_manager.SignalSyncPoint(value);
+}
+
+void RasterizerOpenGL::ReleaseFences() {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ return;
+ }
+ fence_manager.WaitPendingFences();
+}
+
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
FlushRegion(addr, size);
}
InvalidateRegion(addr, size);
}
+void RasterizerOpenGL::WaitForIdle() {
+ // Place a barrier on everything that is not framebuffer related.
+ // This is related to another flag that is not currently implemented.
+ glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT | GL_ELEMENT_ARRAY_BARRIER_BIT |
+ GL_UNIFORM_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT |
+ GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_COMMAND_BARRIER_BIT |
+ GL_PIXEL_BUFFER_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT |
+ GL_BUFFER_UPDATE_BARRIER_BIT | GL_TRANSFORM_FEEDBACK_BARRIER_BIT |
+ GL_SHADER_STORAGE_BARRIER_BIT | GL_QUERY_BUFFER_BARRIER_BIT);
+}
+
void RasterizerOpenGL::FlushCommands() {
// Only flush when we have commands queued to OpenGL.
if (num_queued_commands == 0) {
@@ -775,9 +865,9 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer;
for (const auto& entry : shader->GetEntries().global_memory_entries) {
- const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()};
- const auto gpu_addr{memory_manager.Read<u64>(addr)};
- const auto size{memory_manager.Read<u32>(addr + 8)};
+ const GPUVAddr addr{cbufs.const_buffers[entry.cbuf_index].address + entry.cbuf_offset};
+ const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)};
+ const u32 size{memory_manager.Read<u32>(addr + 8)};
SetupGlobalMemory(binding++, entry, gpu_addr, size);
}
}
@@ -789,7 +879,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
u32 binding = 0;
for (const auto& entry : kernel->GetEntries().global_memory_entries) {
- const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
+ const auto addr{cbufs[entry.cbuf_index].Address() + entry.cbuf_offset};
const auto gpu_addr{memory_manager.Read<u64>(addr)};
const auto size{memory_manager.Read<u32>(addr + 8)};
SetupGlobalMemory(binding++, entry, gpu_addr, size);
@@ -800,7 +890,7 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
GPUVAddr gpu_addr, std::size_t size) {
const auto alignment{device.GetShaderStorageBufferAlignment()};
const auto [ssbo, buffer_offset] =
- buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten());
+ buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.is_written);
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset,
static_cast<GLsizeiptr>(size));
}
@@ -811,7 +901,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
u32 binding = device.GetBaseBindings(stage_index).sampler;
for (const auto& entry : shader->GetEntries().samplers) {
const auto shader_type = static_cast<ShaderType>(stage_index);
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i);
SetupTexture(binding++, texture, entry);
}
@@ -823,7 +913,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
const auto& compute = system.GPU().KeplerCompute();
u32 binding = 0;
for (const auto& entry : kernel->GetEntries().samplers) {
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i);
SetupTexture(binding++, texture, entry);
}
@@ -880,7 +970,7 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t
if (!tic.IsBuffer()) {
view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
}
- if (entry.IsWritten()) {
+ if (entry.is_written) {
view->MarkAsModified(texture_cache.Tick());
}
glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE,
@@ -941,11 +1031,7 @@ void RasterizerOpenGL::SyncDepthClamp() {
}
flags[Dirty::DepthClampEnabled] = false;
- const auto& state = gpu.regs.view_volume_clip_control;
- UNIMPLEMENTED_IF_MSG(state.depth_clamp_far != state.depth_clamp_near,
- "Unimplemented depth clamp separation!");
-
- oglEnable(GL_DEPTH_CLAMP, state.depth_clamp_far || state.depth_clamp_near);
+ oglEnable(GL_DEPTH_CLAMP, gpu.regs.view_volume_clip_control.depth_clamp_disabled == 0);
}
void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index caea174d2..b94c65907 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -23,6 +23,7 @@
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_device.h"
+#include "video_core/renderer_opengl/gl_fence_manager.h"
#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
#include "video_core/renderer_opengl/gl_query_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
@@ -66,8 +67,15 @@ public:
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void FlushAll() override;
void FlushRegion(VAddr addr, u64 size) override;
+ bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
+ void OnCPUWrite(VAddr addr, u64 size) override;
+ void SyncGuestHost() override;
+ void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void SignalSyncPoint(u32 value) override;
+ void ReleaseFences() override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+ void WaitForIdle() override;
void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -88,7 +96,8 @@ private:
/// Configures the color and depth framebuffer states.
void ConfigureFramebuffers();
- void ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, bool using_stencil_fb);
+ /// Configures the color and depth framebuffer for clearing.
+ void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil);
/// Configures the current constbuffers to use for the draw command.
void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader);
@@ -222,6 +231,8 @@ private:
SamplerCacheOpenGL sampler_cache;
FramebufferCacheOpenGL framebuffer_cache;
QueryCache query_cache;
+ OGLBufferCache buffer_cache;
+ FenceManagerOpenGL fence_manager;
Core::System& system;
ScreenInfo& screen_info;
@@ -229,7 +240,6 @@ private:
StateTracker& state_tracker;
static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024;
- OGLBufferCache buffer_cache;
GLint vertex_binding = 0;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 6d2ff20f9..9759a7078 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -10,8 +10,6 @@
#include <thread>
#include <unordered_set>
-#include <boost/functional/hash.hpp>
-
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
@@ -28,76 +26,26 @@
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
#include "video_core/renderer_opengl/utils.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace OpenGL {
using Tegra::Engines::ShaderType;
+using VideoCommon::Shader::GetShaderAddress;
+using VideoCommon::Shader::GetShaderCode;
+using VideoCommon::Shader::GetUniqueIdentifier;
+using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
using VideoCommon::Shader::ProgramCode;
using VideoCommon::Shader::Registry;
using VideoCommon::Shader::ShaderIR;
+using VideoCommon::Shader::STAGE_MAIN_OFFSET;
namespace {
-constexpr u32 STAGE_MAIN_OFFSET = 10;
-constexpr u32 KERNEL_MAIN_OFFSET = 0;
-
constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
-/// Gets the address for the specified shader stage program
-GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
- const auto& gpu{system.GPU().Maxwell3D()};
- const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
- return gpu.regs.code_address.CodeAddress() + shader_config.offset;
-}
-
-/// Gets if the current instruction offset is a scheduler instruction
-constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
- // Sched instructions appear once every 4 instructions.
- constexpr std::size_t SchedPeriod = 4;
- const std::size_t absolute_offset = offset - main_offset;
- return (absolute_offset % SchedPeriod) == 0;
-}
-
-/// Calculates the size of a program stream
-std::size_t CalculateProgramSize(const ProgramCode& program) {
- constexpr std::size_t start_offset = 10;
- // This is the encoded version of BRA that jumps to itself. All Nvidia
- // shaders end with one.
- constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
- constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
- std::size_t offset = start_offset;
- while (offset < program.size()) {
- const u64 instruction = program[offset];
- if (!IsSchedInstruction(offset, start_offset)) {
- if ((instruction & mask) == self_jumping_branch) {
- // End on Maxwell's "nop" instruction
- break;
- }
- if (instruction == 0) {
- break;
- }
- }
- offset++;
- }
- // The last instruction is included in the program size
- return std::min(offset + 1, program.size());
-}
-
-/// Gets the shader program code from memory for the specified address
-ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
- const u8* host_ptr) {
- ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
- ASSERT_OR_EXECUTE(host_ptr != nullptr, {
- std::fill(code.begin(), code.end(), 0);
- return code;
- });
- memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
- code.resize(CalculateProgramSize(code));
- return code;
-}
-
/// Gets the shader type from a Maxwell program type
constexpr GLenum GetGLShaderType(ShaderType shader_type) {
switch (shader_type) {
@@ -114,17 +62,6 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) {
}
}
-/// Hashes one (or two) program streams
-u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code,
- const ProgramCode& code_b = {}) {
- u64 unique_identifier = boost::hash_value(code);
- if (is_a) {
- // VertexA programs include two programs
- boost::hash_combine(unique_identifier, boost::hash_value(code_b));
- }
- return unique_identifier;
-}
-
constexpr const char* GetShaderTypeName(ShaderType shader_type) {
switch (shader_type) {
case ShaderType::Vertex:
@@ -448,7 +385,7 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
// Look up shader in the cache based on address
const auto cpu_addr{memory_manager.GpuToCpuAddress(address)};
- Shader shader{cpu_addr ? TryGet(*cpu_addr) : nullptr};
+ Shader shader{cpu_addr ? TryGet(*cpu_addr) : null_shader};
if (shader) {
return last_shaders[static_cast<std::size_t>(program)] = shader;
}
@@ -456,11 +393,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const auto host_ptr{memory_manager.GetPointer(address)};
// No shader found - create a new one
- ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)};
+ ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)};
ProgramCode code_b;
if (program == Maxwell::ShaderProgram::VertexA) {
const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
- code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b));
+ const u8* host_ptr_b = memory_manager.GetPointer(address_b);
+ code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false);
}
const auto unique_identifier = GetUniqueIdentifier(
@@ -477,7 +415,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const std::size_t size_in_bytes = code.size() * sizeof(u64);
shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
}
- Register(shader);
+
+ if (cpu_addr) {
+ Register(shader);
+ } else {
+ null_shader = shader;
+ }
return last_shaders[static_cast<std::size_t>(program)] = shader;
}
@@ -486,14 +429,14 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
auto& memory_manager{system.GPU().MemoryManager()};
const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)};
- auto kernel = cpu_addr ? TryGet(*cpu_addr) : nullptr;
+ auto kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel;
if (kernel) {
return kernel;
}
const auto host_ptr{memory_manager.GetPointer(code_addr)};
// No kernel found, create a new one
- auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
+ auto code{GetShaderCode(memory_manager, code_addr, host_ptr, true)};
const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
const ShaderParameters params{system, disk_cache, device,
@@ -507,7 +450,11 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
}
- Register(kernel);
+ if (cpu_addr) {
+ Register(kernel);
+ } else {
+ null_kernel = kernel;
+ }
return kernel;
}
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index c836df5bd..91690b470 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -125,6 +125,9 @@ private:
ShaderDiskCacheOpenGL disk_cache;
std::unordered_map<u64, PrecompiledShader> runtime_cache;
+ Shader null_shader{};
+ Shader null_kernel{};
+
std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
};
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 22242cce9..99fd4ae2c 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -484,7 +484,7 @@ private:
code.AddLine("switch (jmp_to) {{");
for (const auto& pair : ir.GetBasicBlocks()) {
- const auto [address, bb] = pair;
+ const auto& [address, bb] = pair;
code.AddLine("case 0x{:X}U: {{", address);
++code.scope;
@@ -870,13 +870,13 @@ private:
for (const auto& sampler : ir.GetSamplers()) {
const std::string name = GetSampler(sampler);
const std::string description = fmt::format("layout (binding = {}) uniform", binding);
- binding += sampler.IsIndexed() ? sampler.Size() : 1;
+ binding += sampler.is_indexed ? sampler.size : 1;
std::string sampler_type = [&]() {
- if (sampler.IsBuffer()) {
+ if (sampler.is_buffer) {
return "samplerBuffer";
}
- switch (sampler.GetType()) {
+ switch (sampler.type) {
case Tegra::Shader::TextureType::Texture1D:
return "sampler1D";
case Tegra::Shader::TextureType::Texture2D:
@@ -890,17 +890,17 @@ private:
return "sampler2D";
}
}();
- if (sampler.IsArray()) {
+ if (sampler.is_array) {
sampler_type += "Array";
}
- if (sampler.IsShadow()) {
+ if (sampler.is_shadow) {
sampler_type += "Shadow";
}
- if (!sampler.IsIndexed()) {
+ if (!sampler.is_indexed) {
code.AddLine("{} {} {};", description, sampler_type, name);
} else {
- code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.Size());
+ code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.size);
}
}
if (!ir.GetSamplers().empty()) {
@@ -946,14 +946,14 @@ private:
u32 binding = device.GetBaseBindings(stage).image;
for (const auto& image : ir.GetImages()) {
std::string qualifier = "coherent volatile";
- if (image.IsRead() && !image.IsWritten()) {
+ if (image.is_read && !image.is_written) {
qualifier += " readonly";
- } else if (image.IsWritten() && !image.IsRead()) {
+ } else if (image.is_written && !image.is_read) {
qualifier += " writeonly";
}
- const char* format = image.IsAtomic() ? "r32ui, " : "";
- const char* type_declaration = GetImageTypeDeclaration(image.GetType());
+ const char* format = image.is_atomic ? "r32ui, " : "";
+ const char* type_declaration = GetImageTypeDeclaration(image.type);
code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++,
qualifier, type_declaration, GetImage(image));
}
@@ -1337,8 +1337,8 @@ private:
ASSERT(meta);
const std::size_t count = operation.GetOperandsCount();
- const bool has_array = meta->sampler.IsArray();
- const bool has_shadow = meta->sampler.IsShadow();
+ const bool has_array = meta->sampler.is_array;
+ const bool has_shadow = meta->sampler.is_shadow;
std::string expr = "texture" + function_suffix;
if (!meta->aoffi.empty()) {
@@ -1346,7 +1346,7 @@ private:
} else if (!meta->ptp.empty()) {
expr += "Offsets";
}
- if (!meta->sampler.IsIndexed()) {
+ if (!meta->sampler.is_indexed) {
expr += '(' + GetSampler(meta->sampler) + ", ";
} else {
expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], ";
@@ -1484,8 +1484,8 @@ private:
dy += '(';
for (std::size_t index = 0; index < components; ++index) {
- const auto operand_x{derivates.at(index * 2)};
- const auto operand_y{derivates.at(index * 2 + 1)};
+ const auto& operand_x{derivates.at(index * 2)};
+ const auto& operand_y{derivates.at(index * 2 + 1)};
dx += Visit(operand_x).AsFloat();
dy += Visit(operand_y).AsFloat();
@@ -1870,6 +1870,14 @@ private:
return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type);
}
+ Expression LogicalAddCarry(Operation operation) {
+ const std::string carry = code.GenerateTemporary();
+ code.AddLine("uint {};", carry);
+ code.AddLine("uaddCarry({}, {}, {});", VisitOperand(operation, 0).AsUint(),
+ VisitOperand(operation, 1).AsUint(), carry);
+ return {fmt::format("({} != 0)", carry), Type::Bool};
+ }
+
Expression LogicalFIsNan(Operation operation) {
return GenerateUnary(operation, "isnan", Type::Bool, Type::Float);
}
@@ -1974,7 +1982,7 @@ private:
std::string expr = GenerateTexture(
operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}});
- if (meta->sampler.IsShadow()) {
+ if (meta->sampler.is_shadow) {
expr = "vec4(" + expr + ')';
}
return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1986,7 +1994,7 @@ private:
std::string expr = GenerateTexture(
operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}});
- if (meta->sampler.IsShadow()) {
+ if (meta->sampler.is_shadow) {
expr = "vec4(" + expr + ')';
}
return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1995,11 +2003,11 @@ private:
Expression TextureGather(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int;
- const bool separate_dc = meta.sampler.IsShadow();
+ const auto type = meta.sampler.is_shadow ? Type::Float : Type::Int;
+ const bool separate_dc = meta.sampler.is_shadow;
std::vector<TextureIR> ir;
- if (meta.sampler.IsShadow()) {
+ if (meta.sampler.is_shadow) {
ir = {TextureOffset{}};
} else {
ir = {TextureOffset{}, TextureArgument{type, meta.component}};
@@ -2044,7 +2052,7 @@ private:
constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"};
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
- UNIMPLEMENTED_IF(meta->sampler.IsArray());
+ UNIMPLEMENTED_IF(meta->sampler.is_array);
const std::size_t count = operation.GetOperandsCount();
std::string expr = "texelFetch(";
@@ -2065,7 +2073,7 @@ private:
}
expr += ')';
- if (meta->lod && !meta->sampler.IsBuffer()) {
+ if (meta->lod && !meta->sampler.is_buffer) {
expr += ", ";
expr += Visit(meta->lod).AsInt();
}
@@ -2076,12 +2084,10 @@ private:
}
Expression TextureGradient(Operation operation) {
- const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
- ASSERT(meta);
-
+ const auto& meta = std::get<MetaTexture>(operation.GetMeta());
std::string expr =
GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}});
- return {std::move(expr) + GetSwizzle(meta->element), Type::Float};
+ return {std::move(expr) + GetSwizzle(meta.element), Type::Float};
}
Expression ImageLoad(Operation operation) {
@@ -2441,6 +2447,8 @@ private:
&GLSLDecompiler::LogicalNotEqual<Type::Uint>,
&GLSLDecompiler::LogicalGreaterEqual<Type::Uint>,
+ &GLSLDecompiler::LogicalAddCarry,
+
&GLSLDecompiler::Logical2HLessThan<false>,
&GLSLDecompiler::Logical2HEqual<false>,
&GLSLDecompiler::Logical2HLessEqual<false>,
@@ -2598,11 +2606,11 @@ private:
}
std::string GetSampler(const Sampler& sampler) const {
- return AppendSuffix(static_cast<u32>(sampler.GetIndex()), "sampler");
+ return AppendSuffix(sampler.index, "sampler");
}
std::string GetImage(const Image& image) const {
- return AppendSuffix(static_cast<u32>(image.GetIndex()), "image");
+ return AppendSuffix(image.index, "image");
}
std::string AppendSuffix(u32 index, std::string_view name) const {
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index e7dbd810c..e8a178764 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -33,36 +33,19 @@ public:
}
private:
- u32 index{};
+ u32 index = 0;
};
-class GlobalMemoryEntry {
-public:
- explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read, bool is_written)
+struct GlobalMemoryEntry {
+ constexpr explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read,
+ bool is_written)
: cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{
is_written} {}
- u32 GetCbufIndex() const {
- return cbuf_index;
- }
-
- u32 GetCbufOffset() const {
- return cbuf_offset;
- }
-
- bool IsRead() const {
- return is_read;
- }
-
- bool IsWritten() const {
- return is_written;
- }
-
-private:
- u32 cbuf_index{};
- u32 cbuf_offset{};
- bool is_read{};
- bool is_written{};
+ u32 cbuf_index = 0;
+ u32 cbuf_offset = 0;
+ bool is_read = false;
+ bool is_written = false;
};
struct ShaderEntries {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 2bb376555..648b1e71b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -2,10 +2,12 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <cstring>
#include <tuple>
#include <boost/functional/hash.hpp>
+#include "common/cityhash.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -13,289 +15,349 @@ namespace Vulkan {
namespace {
-constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) {
- const FixedPipelineState::StencilFace front_stencil(
- regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass,
- regs.stencil_front_func_func);
- const FixedPipelineState::StencilFace back_stencil =
- regs.stencil_two_side_enable
- ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail,
- regs.stencil_back_op_zpass,
- regs.stencil_back_func_func)
- : front_stencil;
- return FixedPipelineState::DepthStencil(
- regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1,
- regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil);
-}
-
-constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) {
- return FixedPipelineState::InputAssembly(
- regs.draw.topology, regs.primitive_restart.enabled,
- regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f);
-}
-
-constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState(
- const Maxwell& regs, std::size_t render_target) {
- const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target];
- const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0};
-
- const FixedPipelineState::BlendingAttachment default_blending(
- false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
- Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
- Maxwell::Blend::Factor::Zero, components);
- if (render_target >= regs.rt_control.count) {
- return default_blending;
- }
+constexpr std::size_t POINT = 0;
+constexpr std::size_t LINE = 1;
+constexpr std::size_t POLYGON = 2;
+constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
+ POINT, // Points
+ LINE, // Lines
+ LINE, // LineLoop
+ LINE, // LineStrip
+ POLYGON, // Triangles
+ POLYGON, // TriangleStrip
+ POLYGON, // TriangleFan
+ POLYGON, // Quads
+ POLYGON, // QuadStrip
+ POLYGON, // Polygon
+ LINE, // LinesAdjacency
+ LINE, // LineStripAdjacency
+ POLYGON, // TrianglesAdjacency
+ POLYGON, // TriangleStripAdjacency
+ POLYGON, // Patches
+};
- if (!regs.independent_blend_enable) {
- const auto& src = regs.blend;
- if (!src.enable[render_target]) {
- return default_blending;
- }
- return FixedPipelineState::BlendingAttachment(
- true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
- src.factor_source_a, src.factor_dest_a, components);
- }
+} // Anonymous namespace
- if (!regs.blend.enable[render_target]) {
- return default_blending;
+void FixedPipelineState::DepthStencil::Fill(const Maxwell& regs) noexcept {
+ raw = 0;
+ front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op_fail));
+ front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op_zfail));
+ front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op_zpass));
+ front.test_func.Assign(PackComparisonOp(regs.stencil_front_func_func));
+ if (regs.stencil_two_side_enable) {
+ back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op_fail));
+ back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op_zfail));
+ back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op_zpass));
+ back.test_func.Assign(PackComparisonOp(regs.stencil_back_func_func));
+ } else {
+ back.action_stencil_fail.Assign(front.action_stencil_fail);
+ back.action_depth_fail.Assign(front.action_depth_fail);
+ back.action_depth_pass.Assign(front.action_depth_pass);
+ back.test_func.Assign(front.test_func);
}
- const auto& src = regs.independent_blend[render_target];
- return FixedPipelineState::BlendingAttachment(
- true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
- src.factor_source_a, src.factor_dest_a, components);
-}
-
-constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) {
- return FixedPipelineState::ColorBlending(
- {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a},
- regs.rt_control.count,
- {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1),
- GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3),
- GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5),
- GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)});
-}
-
-constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) {
- return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim,
- regs.tess_mode.spacing, regs.tess_mode.cw != 0);
+ depth_test_enable.Assign(regs.depth_test_enable);
+ depth_write_enable.Assign(regs.depth_write_enabled);
+ depth_bounds_enable.Assign(regs.depth_bounds_enable);
+ stencil_enable.Assign(regs.stencil_enable);
+ depth_test_func.Assign(PackComparisonOp(regs.depth_test_func));
}
-constexpr std::size_t Point = 0;
-constexpr std::size_t Line = 1;
-constexpr std::size_t Polygon = 2;
-constexpr std::array PolygonOffsetEnableLUT = {
- Point, // Points
- Line, // Lines
- Line, // LineLoop
- Line, // LineStrip
- Polygon, // Triangles
- Polygon, // TriangleStrip
- Polygon, // TriangleFan
- Polygon, // Quads
- Polygon, // QuadStrip
- Polygon, // Polygon
- Line, // LinesAdjacency
- Line, // LineStripAdjacency
- Polygon, // TrianglesAdjacency
- Polygon, // TriangleStripAdjacency
- Polygon, // Patches
-};
-
-constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) {
+void FixedPipelineState::Rasterizer::Fill(const Maxwell& regs) noexcept {
+ const auto& clip = regs.view_volume_clip_control;
const std::array enabled_lut = {regs.polygon_offset_point_enable,
regs.polygon_offset_line_enable,
regs.polygon_offset_fill_enable};
- const auto topology = static_cast<std::size_t>(regs.draw.topology.Value());
- const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]];
-
- const auto& clip = regs.view_volume_clip_control;
- const bool depth_clamp_enabled = clip.depth_clamp_near == 1 || clip.depth_clamp_far == 1;
+ const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
- Maxwell::FrontFace front_face = regs.front_face;
+ u32 packed_front_face = PackFrontFace(regs.front_face);
if (regs.screen_y_control.triangle_rast_flip != 0 &&
regs.viewport_transform[0].scale_y > 0.0f) {
- if (front_face == Maxwell::FrontFace::CounterClockWise)
- front_face = Maxwell::FrontFace::ClockWise;
- else if (front_face == Maxwell::FrontFace::ClockWise)
- front_face = Maxwell::FrontFace::CounterClockWise;
+ // Flip front face
+ packed_front_face = 1 - packed_front_face;
}
- const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne;
- return FixedPipelineState::Rasterizer(regs.cull_test_enabled, depth_bias_enabled,
- depth_clamp_enabled, gl_ndc, regs.cull_face, front_face);
+ raw = 0;
+ topology.Assign(topology_index);
+ primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
+ cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0);
+ depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
+ depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
+ ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
+ cull_face.Assign(PackCullFace(regs.cull_face));
+ front_face.Assign(packed_front_face);
+ polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
+ patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
+ tessellation_primitive.Assign(static_cast<u32>(regs.tess_mode.prim.Value()));
+ tessellation_spacing.Assign(static_cast<u32>(regs.tess_mode.spacing.Value()));
+ tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
+ logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
+ logic_op.Assign(PackLogicOp(regs.logic_op.operation));
+ std::memcpy(&point_size, &regs.point_size, sizeof(point_size)); // TODO: C++20 std::bit_cast
}
-} // Anonymous namespace
-
-std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept {
- return (index << stride) ^ divisor;
+void FixedPipelineState::ColorBlending::Fill(const Maxwell& regs) noexcept {
+ for (std::size_t index = 0; index < std::size(attachments); ++index) {
+ attachments[index].Fill(regs, index);
+ }
}
-bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept {
- return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor);
-}
+void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size_t index) {
+ const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : index];
+
+ raw = 0;
+ mask_r.Assign(mask.R);
+ mask_g.Assign(mask.G);
+ mask_b.Assign(mask.B);
+ mask_a.Assign(mask.A);
+
+ // TODO: C++20 Use templated lambda to deduplicate code
+
+ if (!regs.independent_blend_enable) {
+ const auto& src = regs.blend;
+ if (!src.enable[index]) {
+ return;
+ }
+ equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
+ equation_a.Assign(PackBlendEquation(src.equation_a));
+ factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
+ factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
+ factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
+ factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
+ enable.Assign(1);
+ return;
+ }
-std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept {
- return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^
- (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^
- (static_cast<std::size_t>(offset) << 36);
+ if (!regs.blend.enable[index]) {
+ return;
+ }
+ const auto& src = regs.independent_blend[index];
+ equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
+ equation_a.Assign(PackBlendEquation(src.equation_a));
+ factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
+ factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
+ factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
+ factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
+ enable.Assign(1);
}
-bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept {
- return std::tie(index, buffer, type, size, offset) ==
- std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset);
+void FixedPipelineState::Fill(const Maxwell& regs) {
+ rasterizer.Fill(regs);
+ depth_stencil.Fill(regs);
+ color_blending.Fill(regs);
}
-std::size_t FixedPipelineState::StencilFace::Hash() const noexcept {
- return static_cast<std::size_t>(action_stencil_fail) ^
- (static_cast<std::size_t>(action_depth_fail) << 4) ^
- (static_cast<std::size_t>(action_depth_fail) << 20) ^
- (static_cast<std::size_t>(action_depth_pass) << 36);
+std::size_t FixedPipelineState::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
}
-bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept {
- return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) ==
- std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass,
- rhs.test_func);
+bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
+ return std::memcmp(this, &rhs, sizeof *this) == 0;
}
-std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept {
- return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^
- (static_cast<std::size_t>(src_rgb_func) << 10) ^
- (static_cast<std::size_t>(dst_rgb_func) << 15) ^
- (static_cast<std::size_t>(a_equation) << 20) ^
- (static_cast<std::size_t>(src_a_func) << 25) ^
- (static_cast<std::size_t>(dst_a_func) << 30) ^
- (static_cast<std::size_t>(components[0]) << 35) ^
- (static_cast<std::size_t>(components[1]) << 36) ^
- (static_cast<std::size_t>(components[2]) << 37) ^
- (static_cast<std::size_t>(components[3]) << 38);
+u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept {
+ // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8
+ // If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
+ // Perfect for a hash.
+ const u32 value = static_cast<u32>(op);
+ return value - (value >= 0x200 ? 0x200 : 1);
}
-bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const
- noexcept {
- return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func,
- dst_a_func, components) ==
- std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func,
- rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components);
+Maxwell::ComparisonOp FixedPipelineState::UnpackComparisonOp(u32 packed) noexcept {
+ // Read PackComparisonOp for the logic behind this.
+ return static_cast<Maxwell::ComparisonOp>(packed + 1);
}
-std::size_t FixedPipelineState::VertexInput::Hash() const noexcept {
- std::size_t hash = num_bindings ^ (num_attributes << 32);
- for (std::size_t i = 0; i < num_bindings; ++i) {
- boost::hash_combine(hash, bindings[i].Hash());
- }
- for (std::size_t i = 0; i < num_attributes; ++i) {
- boost::hash_combine(hash, attributes[i].Hash());
+u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp op) noexcept {
+ switch (op) {
+ case Maxwell::StencilOp::Keep:
+ case Maxwell::StencilOp::KeepOGL:
+ return 0;
+ case Maxwell::StencilOp::Zero:
+ case Maxwell::StencilOp::ZeroOGL:
+ return 1;
+ case Maxwell::StencilOp::Replace:
+ case Maxwell::StencilOp::ReplaceOGL:
+ return 2;
+ case Maxwell::StencilOp::Incr:
+ case Maxwell::StencilOp::IncrOGL:
+ return 3;
+ case Maxwell::StencilOp::Decr:
+ case Maxwell::StencilOp::DecrOGL:
+ return 4;
+ case Maxwell::StencilOp::Invert:
+ case Maxwell::StencilOp::InvertOGL:
+ return 5;
+ case Maxwell::StencilOp::IncrWrap:
+ case Maxwell::StencilOp::IncrWrapOGL:
+ return 6;
+ case Maxwell::StencilOp::DecrWrap:
+ case Maxwell::StencilOp::DecrWrapOGL:
+ return 7;
}
- return hash;
+ return 0;
}
-bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept {
- return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(),
- rhs.bindings.begin() + rhs.num_bindings) &&
- std::equal(attributes.begin(), attributes.begin() + num_attributes,
- rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes);
+Maxwell::StencilOp FixedPipelineState::UnpackStencilOp(u32 packed) noexcept {
+ static constexpr std::array LUT = {Maxwell::StencilOp::Keep, Maxwell::StencilOp::Zero,
+ Maxwell::StencilOp::Replace, Maxwell::StencilOp::Incr,
+ Maxwell::StencilOp::Decr, Maxwell::StencilOp::Invert,
+ Maxwell::StencilOp::IncrWrap, Maxwell::StencilOp::DecrWrap};
+ return LUT[packed];
}
-std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept {
- std::size_t point_size_int = 0;
- std::memcpy(&point_size_int, &point_size, sizeof(point_size));
- return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^
- static_cast<std::size_t>(primitive_restart_enable);
+u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept {
+ // FrontAndBack is 0x408, by substracting 0x406 in it we get 2.
+ // Individual cull faces are in 0x404 and 0x405, substracting 0x404 we get 0 and 1.
+ const u32 value = static_cast<u32>(cull);
+ return value - (value == 0x408 ? 0x406 : 0x404);
}
-bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept {
- return std::tie(topology, primitive_restart_enable, point_size) ==
- std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size);
+Maxwell::CullFace FixedPipelineState::UnpackCullFace(u32 packed) noexcept {
+ static constexpr std::array LUT = {Maxwell::CullFace::Front, Maxwell::CullFace::Back,
+ Maxwell::CullFace::FrontAndBack};
+ return LUT[packed];
}
-std::size_t FixedPipelineState::Tessellation::Hash() const noexcept {
- return static_cast<std::size_t>(patch_control_points) ^
- (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^
- (static_cast<std::size_t>(clockwise) << 10);
+u32 FixedPipelineState::PackFrontFace(Maxwell::FrontFace face) noexcept {
+ return static_cast<u32>(face) - 0x900;
}
-bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept {
- return std::tie(patch_control_points, primitive, spacing, clockwise) ==
- std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise);
+Maxwell::FrontFace FixedPipelineState::UnpackFrontFace(u32 packed) noexcept {
+ return static_cast<Maxwell::FrontFace>(packed + 0x900);
}
-std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept {
- return static_cast<std::size_t>(cull_enable) ^
- (static_cast<std::size_t>(depth_bias_enable) << 1) ^
- (static_cast<std::size_t>(depth_clamp_enable) << 2) ^
- (static_cast<std::size_t>(ndc_minus_one_to_one) << 3) ^
- (static_cast<std::size_t>(cull_face) << 24) ^
- (static_cast<std::size_t>(front_face) << 48);
+u32 FixedPipelineState::PackPolygonMode(Maxwell::PolygonMode mode) noexcept {
+ return static_cast<u32>(mode) - 0x1B00;
}
-bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept {
- return std::tie(cull_enable, depth_bias_enable, depth_clamp_enable, ndc_minus_one_to_one,
- cull_face, front_face) ==
- std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.depth_clamp_enable,
- rhs.ndc_minus_one_to_one, rhs.cull_face, rhs.front_face);
+Maxwell::PolygonMode FixedPipelineState::UnpackPolygonMode(u32 packed) noexcept {
+ return static_cast<Maxwell::PolygonMode>(packed + 0x1B00);
}
-std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept {
- std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^
- (static_cast<std::size_t>(depth_write_enable) << 1) ^
- (static_cast<std::size_t>(depth_bounds_enable) << 2) ^
- (static_cast<std::size_t>(stencil_enable) << 3) ^
- (static_cast<std::size_t>(depth_test_function) << 4);
- boost::hash_combine(hash, front_stencil.Hash());
- boost::hash_combine(hash, back_stencil.Hash());
- return hash;
+u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOperation op) noexcept {
+ return static_cast<u32>(op) - 0x1500;
}
-bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept {
- return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function,
- stencil_enable, front_stencil, back_stencil) ==
- std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable,
- rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil,
- rhs.back_stencil);
+Maxwell::LogicOperation FixedPipelineState::UnpackLogicOp(u32 packed) noexcept {
+ return static_cast<Maxwell::LogicOperation>(packed + 0x1500);
}
-std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept {
- std::size_t hash = attachments_count << 13;
- for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) {
- boost::hash_combine(hash, attachments[rt].Hash());
+u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noexcept {
+ switch (equation) {
+ case Maxwell::Blend::Equation::Add:
+ case Maxwell::Blend::Equation::AddGL:
+ return 0;
+ case Maxwell::Blend::Equation::Subtract:
+ case Maxwell::Blend::Equation::SubtractGL:
+ return 1;
+ case Maxwell::Blend::Equation::ReverseSubtract:
+ case Maxwell::Blend::Equation::ReverseSubtractGL:
+ return 2;
+ case Maxwell::Blend::Equation::Min:
+ case Maxwell::Blend::Equation::MinGL:
+ return 3;
+ case Maxwell::Blend::Equation::Max:
+ case Maxwell::Blend::Equation::MaxGL:
+ return 4;
}
- return hash;
+ return 0;
}
-bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept {
- return std::equal(attachments.begin(), attachments.begin() + attachments_count,
- rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count);
+Maxwell::Blend::Equation FixedPipelineState::UnpackBlendEquation(u32 packed) noexcept {
+ static constexpr std::array LUT = {
+ Maxwell::Blend::Equation::Add, Maxwell::Blend::Equation::Subtract,
+ Maxwell::Blend::Equation::ReverseSubtract, Maxwell::Blend::Equation::Min,
+ Maxwell::Blend::Equation::Max};
+ return LUT[packed];
}
-std::size_t FixedPipelineState::Hash() const noexcept {
- std::size_t hash = 0;
- boost::hash_combine(hash, vertex_input.Hash());
- boost::hash_combine(hash, input_assembly.Hash());
- boost::hash_combine(hash, tessellation.Hash());
- boost::hash_combine(hash, rasterizer.Hash());
- boost::hash_combine(hash, depth_stencil.Hash());
- boost::hash_combine(hash, color_blending.Hash());
- return hash;
-}
-
-bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
- return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil,
- color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly,
- rhs.tessellation, rhs.rasterizer, rhs.depth_stencil,
- rhs.color_blending);
+u32 FixedPipelineState::PackBlendFactor(Maxwell::Blend::Factor factor) noexcept {
+ switch (factor) {
+ case Maxwell::Blend::Factor::Zero:
+ case Maxwell::Blend::Factor::ZeroGL:
+ return 0;
+ case Maxwell::Blend::Factor::One:
+ case Maxwell::Blend::Factor::OneGL:
+ return 1;
+ case Maxwell::Blend::Factor::SourceColor:
+ case Maxwell::Blend::Factor::SourceColorGL:
+ return 2;
+ case Maxwell::Blend::Factor::OneMinusSourceColor:
+ case Maxwell::Blend::Factor::OneMinusSourceColorGL:
+ return 3;
+ case Maxwell::Blend::Factor::SourceAlpha:
+ case Maxwell::Blend::Factor::SourceAlphaGL:
+ return 4;
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha:
+ case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
+ return 5;
+ case Maxwell::Blend::Factor::DestAlpha:
+ case Maxwell::Blend::Factor::DestAlphaGL:
+ return 6;
+ case Maxwell::Blend::Factor::OneMinusDestAlpha:
+ case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
+ return 7;
+ case Maxwell::Blend::Factor::DestColor:
+ case Maxwell::Blend::Factor::DestColorGL:
+ return 8;
+ case Maxwell::Blend::Factor::OneMinusDestColor:
+ case Maxwell::Blend::Factor::OneMinusDestColorGL:
+ return 9;
+ case Maxwell::Blend::Factor::SourceAlphaSaturate:
+ case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
+ return 10;
+ case Maxwell::Blend::Factor::Source1Color:
+ case Maxwell::Blend::Factor::Source1ColorGL:
+ return 11;
+ case Maxwell::Blend::Factor::OneMinusSource1Color:
+ case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
+ return 12;
+ case Maxwell::Blend::Factor::Source1Alpha:
+ case Maxwell::Blend::Factor::Source1AlphaGL:
+ return 13;
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha:
+ case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
+ return 14;
+ case Maxwell::Blend::Factor::ConstantColor:
+ case Maxwell::Blend::Factor::ConstantColorGL:
+ return 15;
+ case Maxwell::Blend::Factor::OneMinusConstantColor:
+ case Maxwell::Blend::Factor::OneMinusConstantColorGL:
+ return 16;
+ case Maxwell::Blend::Factor::ConstantAlpha:
+ case Maxwell::Blend::Factor::ConstantAlphaGL:
+ return 17;
+ case Maxwell::Blend::Factor::OneMinusConstantAlpha:
+ case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
+ return 18;
+ }
+ return 0;
}
-FixedPipelineState GetFixedPipelineState(const Maxwell& regs) {
- FixedPipelineState fixed_state;
- fixed_state.input_assembly = GetInputAssemblyState(regs);
- fixed_state.tessellation = GetTessellationState(regs);
- fixed_state.rasterizer = GetRasterizerState(regs);
- fixed_state.depth_stencil = GetDepthStencilState(regs);
- fixed_state.color_blending = GetColorBlendingState(regs);
- return fixed_state;
+Maxwell::Blend::Factor FixedPipelineState::UnpackBlendFactor(u32 packed) noexcept {
+ static constexpr std::array LUT = {
+ Maxwell::Blend::Factor::Zero,
+ Maxwell::Blend::Factor::One,
+ Maxwell::Blend::Factor::SourceColor,
+ Maxwell::Blend::Factor::OneMinusSourceColor,
+ Maxwell::Blend::Factor::SourceAlpha,
+ Maxwell::Blend::Factor::OneMinusSourceAlpha,
+ Maxwell::Blend::Factor::DestAlpha,
+ Maxwell::Blend::Factor::OneMinusDestAlpha,
+ Maxwell::Blend::Factor::DestColor,
+ Maxwell::Blend::Factor::OneMinusDestColor,
+ Maxwell::Blend::Factor::SourceAlphaSaturate,
+ Maxwell::Blend::Factor::Source1Color,
+ Maxwell::Blend::Factor::OneMinusSource1Color,
+ Maxwell::Blend::Factor::Source1Alpha,
+ Maxwell::Blend::Factor::OneMinusSource1Alpha,
+ Maxwell::Blend::Factor::ConstantColor,
+ Maxwell::Blend::Factor::OneMinusConstantColor,
+ Maxwell::Blend::Factor::ConstantAlpha,
+ Maxwell::Blend::Factor::OneMinusConstantAlpha,
+ };
+ return LUT[packed];
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 4c8ba7f90..8652067a7 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -7,6 +7,7 @@
#include <array>
#include <type_traits>
+#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
@@ -16,93 +17,48 @@ namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-// TODO(Rodrigo): Optimize this structure.
-
struct FixedPipelineState {
- using PixelFormat = VideoCore::Surface::PixelFormat;
-
- struct VertexBinding {
- constexpr VertexBinding(u32 index, u32 stride, u32 divisor)
- : index{index}, stride{stride}, divisor{divisor} {}
- VertexBinding() = default;
-
- u32 index;
- u32 stride;
- u32 divisor;
-
- std::size_t Hash() const noexcept;
+ static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept;
+ static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept;
- bool operator==(const VertexBinding& rhs) const noexcept;
-
- bool operator!=(const VertexBinding& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ static u32 PackStencilOp(Maxwell::StencilOp op) noexcept;
+ static Maxwell::StencilOp UnpackStencilOp(u32 packed) noexcept;
- struct VertexAttribute {
- constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type,
- Maxwell::VertexAttribute::Size size, u32 offset)
- : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {}
- VertexAttribute() = default;
-
- u32 index;
- u32 buffer;
- Maxwell::VertexAttribute::Type type;
- Maxwell::VertexAttribute::Size size;
- u32 offset;
-
- std::size_t Hash() const noexcept;
-
- bool operator==(const VertexAttribute& rhs) const noexcept;
-
- bool operator!=(const VertexAttribute& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ static u32 PackCullFace(Maxwell::CullFace cull) noexcept;
+ static Maxwell::CullFace UnpackCullFace(u32 packed) noexcept;
- struct StencilFace {
- constexpr StencilFace(Maxwell::StencilOp action_stencil_fail,
- Maxwell::StencilOp action_depth_fail,
- Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func)
- : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail},
- action_depth_pass{action_depth_pass}, test_func{test_func} {}
- StencilFace() = default;
+ static u32 PackFrontFace(Maxwell::FrontFace face) noexcept;
+ static Maxwell::FrontFace UnpackFrontFace(u32 packed) noexcept;
- Maxwell::StencilOp action_stencil_fail;
- Maxwell::StencilOp action_depth_fail;
- Maxwell::StencilOp action_depth_pass;
- Maxwell::ComparisonOp test_func;
+ static u32 PackPolygonMode(Maxwell::PolygonMode mode) noexcept;
+ static Maxwell::PolygonMode UnpackPolygonMode(u32 packed) noexcept;
- std::size_t Hash() const noexcept;
+ static u32 PackLogicOp(Maxwell::LogicOperation op) noexcept;
+ static Maxwell::LogicOperation UnpackLogicOp(u32 packed) noexcept;
- bool operator==(const StencilFace& rhs) const noexcept;
+ static u32 PackBlendEquation(Maxwell::Blend::Equation equation) noexcept;
+ static Maxwell::Blend::Equation UnpackBlendEquation(u32 packed) noexcept;
- bool operator!=(const StencilFace& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ static u32 PackBlendFactor(Maxwell::Blend::Factor factor) noexcept;
+ static Maxwell::Blend::Factor UnpackBlendFactor(u32 packed) noexcept;
struct BlendingAttachment {
- constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation,
- Maxwell::Blend::Factor src_rgb_func,
- Maxwell::Blend::Factor dst_rgb_func,
- Maxwell::Blend::Equation a_equation,
- Maxwell::Blend::Factor src_a_func,
- Maxwell::Blend::Factor dst_a_func,
- std::array<bool, 4> components)
- : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func},
- dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func},
- dst_a_func{dst_a_func}, components{components} {}
- BlendingAttachment() = default;
-
- bool enable;
- Maxwell::Blend::Equation rgb_equation;
- Maxwell::Blend::Factor src_rgb_func;
- Maxwell::Blend::Factor dst_rgb_func;
- Maxwell::Blend::Equation a_equation;
- Maxwell::Blend::Factor src_a_func;
- Maxwell::Blend::Factor dst_a_func;
- std::array<bool, 4> components;
+ union {
+ u32 raw;
+ BitField<0, 1, u32> mask_r;
+ BitField<1, 1, u32> mask_g;
+ BitField<2, 1, u32> mask_b;
+ BitField<3, 1, u32> mask_a;
+ BitField<4, 3, u32> equation_rgb;
+ BitField<7, 3, u32> equation_a;
+ BitField<10, 5, u32> factor_source_rgb;
+ BitField<15, 5, u32> factor_dest_rgb;
+ BitField<20, 5, u32> factor_source_a;
+ BitField<25, 5, u32> factor_dest_a;
+ BitField<30, 1, u32> enable;
+ };
+
+ void Fill(const Maxwell& regs, std::size_t index);
std::size_t Hash() const noexcept;
@@ -111,135 +67,178 @@ struct FixedPipelineState {
bool operator!=(const BlendingAttachment& rhs) const noexcept {
return !operator==(rhs);
}
- };
-
- struct VertexInput {
- std::size_t num_bindings = 0;
- std::size_t num_attributes = 0;
- std::array<VertexBinding, Maxwell::NumVertexArrays> bindings;
- std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
-
- std::size_t Hash() const noexcept;
- bool operator==(const VertexInput& rhs) const noexcept;
+ constexpr std::array<bool, 4> Mask() const noexcept {
+ return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
+ }
- bool operator!=(const VertexInput& rhs) const noexcept {
- return !operator==(rhs);
+ Maxwell::Blend::Equation EquationRGB() const noexcept {
+ return UnpackBlendEquation(equation_rgb.Value());
}
- };
- struct InputAssembly {
- constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable,
- float point_size)
- : topology{topology}, primitive_restart_enable{primitive_restart_enable},
- point_size{point_size} {}
- InputAssembly() = default;
+ Maxwell::Blend::Equation EquationAlpha() const noexcept {
+ return UnpackBlendEquation(equation_a.Value());
+ }
- Maxwell::PrimitiveTopology topology;
- bool primitive_restart_enable;
- float point_size;
+ Maxwell::Blend::Factor SourceRGBFactor() const noexcept {
+ return UnpackBlendFactor(factor_source_rgb.Value());
+ }
- std::size_t Hash() const noexcept;
+ Maxwell::Blend::Factor DestRGBFactor() const noexcept {
+ return UnpackBlendFactor(factor_dest_rgb.Value());
+ }
- bool operator==(const InputAssembly& rhs) const noexcept;
+ Maxwell::Blend::Factor SourceAlphaFactor() const noexcept {
+ return UnpackBlendFactor(factor_source_a.Value());
+ }
- bool operator!=(const InputAssembly& rhs) const noexcept {
- return !operator==(rhs);
+ Maxwell::Blend::Factor DestAlphaFactor() const noexcept {
+ return UnpackBlendFactor(factor_dest_a.Value());
}
};
- struct Tessellation {
- constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive,
- Maxwell::TessellationSpacing spacing, bool clockwise)
- : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing},
- clockwise{clockwise} {}
- Tessellation() = default;
-
- u32 patch_control_points;
- Maxwell::TessellationPrimitive primitive;
- Maxwell::TessellationSpacing spacing;
- bool clockwise;
-
- std::size_t Hash() const noexcept;
-
- bool operator==(const Tessellation& rhs) const noexcept;
+ struct VertexInput {
+ union Binding {
+ u16 raw;
+ BitField<0, 1, u16> enabled;
+ BitField<1, 12, u16> stride;
+ };
+
+ union Attribute {
+ u32 raw;
+ BitField<0, 1, u32> enabled;
+ BitField<1, 5, u32> buffer;
+ BitField<6, 14, u32> offset;
+ BitField<20, 3, u32> type;
+ BitField<23, 6, u32> size;
+
+ constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
+ return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
+ }
+
+ constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
+ return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
+ }
+ };
+
+ std::array<Binding, Maxwell::NumVertexArrays> bindings;
+ std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
+ std::array<Attribute, Maxwell::NumVertexAttributes> attributes;
+
+ void SetBinding(std::size_t index, bool enabled, u32 stride, u32 divisor) noexcept {
+ auto& binding = bindings[index];
+ binding.raw = 0;
+ binding.enabled.Assign(enabled ? 1 : 0);
+ binding.stride.Assign(static_cast<u16>(stride));
+ binding_divisors[index] = divisor;
+ }
- bool operator!=(const Tessellation& rhs) const noexcept {
- return !operator==(rhs);
+ void SetAttribute(std::size_t index, bool enabled, u32 buffer, u32 offset,
+ Maxwell::VertexAttribute::Type type,
+ Maxwell::VertexAttribute::Size size) noexcept {
+ auto& attribute = attributes[index];
+ attribute.raw = 0;
+ attribute.enabled.Assign(enabled ? 1 : 0);
+ attribute.buffer.Assign(buffer);
+ attribute.offset.Assign(offset);
+ attribute.type.Assign(static_cast<u32>(type));
+ attribute.size.Assign(static_cast<u32>(size));
}
};
struct Rasterizer {
- constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool depth_clamp_enable,
- bool ndc_minus_one_to_one, Maxwell::CullFace cull_face,
- Maxwell::FrontFace front_face)
- : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable},
- depth_clamp_enable{depth_clamp_enable}, ndc_minus_one_to_one{ndc_minus_one_to_one},
- cull_face{cull_face}, front_face{front_face} {}
- Rasterizer() = default;
-
- bool cull_enable;
- bool depth_bias_enable;
- bool depth_clamp_enable;
- bool ndc_minus_one_to_one;
- Maxwell::CullFace cull_face;
- Maxwell::FrontFace front_face;
-
- std::size_t Hash() const noexcept;
+ union {
+ u32 raw;
+ BitField<0, 4, u32> topology;
+ BitField<4, 1, u32> primitive_restart_enable;
+ BitField<5, 1, u32> cull_enable;
+ BitField<6, 1, u32> depth_bias_enable;
+ BitField<7, 1, u32> depth_clamp_disabled;
+ BitField<8, 1, u32> ndc_minus_one_to_one;
+ BitField<9, 2, u32> cull_face;
+ BitField<11, 1, u32> front_face;
+ BitField<12, 2, u32> polygon_mode;
+ BitField<14, 5, u32> patch_control_points_minus_one;
+ BitField<19, 2, u32> tessellation_primitive;
+ BitField<21, 2, u32> tessellation_spacing;
+ BitField<23, 1, u32> tessellation_clockwise;
+ BitField<24, 1, u32> logic_op_enable;
+ BitField<25, 4, u32> logic_op;
+ };
+
+ // TODO(Rodrigo): Move this to push constants
+ u32 point_size;
+
+ void Fill(const Maxwell& regs) noexcept;
+
+ constexpr Maxwell::PrimitiveTopology Topology() const noexcept {
+ return static_cast<Maxwell::PrimitiveTopology>(topology.Value());
+ }
- bool operator==(const Rasterizer& rhs) const noexcept;
+ Maxwell::CullFace CullFace() const noexcept {
+ return UnpackCullFace(cull_face.Value());
+ }
- bool operator!=(const Rasterizer& rhs) const noexcept {
- return !operator==(rhs);
+ Maxwell::FrontFace FrontFace() const noexcept {
+ return UnpackFrontFace(front_face.Value());
}
};
struct DepthStencil {
- constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable,
- bool depth_bounds_enable, bool stencil_enable,
- Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil,
- StencilFace back_stencil)
- : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable},
- depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable},
- depth_test_function{depth_test_function}, front_stencil{front_stencil},
- back_stencil{back_stencil} {}
- DepthStencil() = default;
-
- bool depth_test_enable;
- bool depth_write_enable;
- bool depth_bounds_enable;
- bool stencil_enable;
- Maxwell::ComparisonOp depth_test_function;
- StencilFace front_stencil;
- StencilFace back_stencil;
-
- std::size_t Hash() const noexcept;
-
- bool operator==(const DepthStencil& rhs) const noexcept;
-
- bool operator!=(const DepthStencil& rhs) const noexcept {
- return !operator==(rhs);
+ template <std::size_t Position>
+ union StencilFace {
+ BitField<Position + 0, 3, u32> action_stencil_fail;
+ BitField<Position + 3, 3, u32> action_depth_fail;
+ BitField<Position + 6, 3, u32> action_depth_pass;
+ BitField<Position + 9, 3, u32> test_func;
+
+ Maxwell::StencilOp ActionStencilFail() const noexcept {
+ return UnpackStencilOp(action_stencil_fail);
+ }
+
+ Maxwell::StencilOp ActionDepthFail() const noexcept {
+ return UnpackStencilOp(action_depth_fail);
+ }
+
+ Maxwell::StencilOp ActionDepthPass() const noexcept {
+ return UnpackStencilOp(action_depth_pass);
+ }
+
+ Maxwell::ComparisonOp TestFunc() const noexcept {
+ return UnpackComparisonOp(test_func);
+ }
+ };
+
+ union {
+ u32 raw;
+ StencilFace<0> front;
+ StencilFace<12> back;
+ BitField<24, 1, u32> depth_test_enable;
+ BitField<25, 1, u32> depth_write_enable;
+ BitField<26, 1, u32> depth_bounds_enable;
+ BitField<27, 1, u32> stencil_enable;
+ BitField<28, 3, u32> depth_test_func;
+ };
+
+ void Fill(const Maxwell& regs) noexcept;
+
+ Maxwell::ComparisonOp DepthTestFunc() const noexcept {
+ return UnpackComparisonOp(depth_test_func);
}
};
struct ColorBlending {
- constexpr ColorBlending(
- std::array<float, 4> blend_constants, std::size_t attachments_count,
- std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments)
- : attachments_count{attachments_count}, attachments{attachments} {}
- ColorBlending() = default;
-
- std::size_t attachments_count;
std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
- std::size_t Hash() const noexcept;
+ void Fill(const Maxwell& regs) noexcept;
+ };
- bool operator==(const ColorBlending& rhs) const noexcept;
+ VertexInput vertex_input;
+ Rasterizer rasterizer;
+ DepthStencil depth_stencil;
+ ColorBlending color_blending;
- bool operator!=(const ColorBlending& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ void Fill(const Maxwell& regs);
std::size_t Hash() const noexcept;
@@ -248,27 +247,10 @@ struct FixedPipelineState {
bool operator!=(const FixedPipelineState& rhs) const noexcept {
return !operator==(rhs);
}
-
- VertexInput vertex_input;
- InputAssembly input_assembly;
- Tessellation tessellation;
- Rasterizer rasterizer;
- DepthStencil depth_stencil;
- ColorBlending color_blending;
};
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>);
+static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
static_assert(std::is_trivially_copyable_v<FixedPipelineState>);
-
-FixedPipelineState GetFixedPipelineState(const Maxwell& regs);
+static_assert(std::is_trivially_constructible_v<FixedPipelineState>);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp
new file mode 100644
index 000000000..435c8c1b8
--- /dev/null
+++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp
@@ -0,0 +1,220 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#ifdef HAS_NSIGHT_AFTERMATH
+
+#include <mutex>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+#include <fmt/format.h>
+
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+
+#include <GFSDK_Aftermath.h>
+#include <GFSDK_Aftermath_Defines.h>
+#include <GFSDK_Aftermath_GpuCrashDump.h>
+#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
+
+#include "common/common_paths.h"
+#include "common/common_types.h"
+#include "common/file_util.h"
+#include "common/logging/log.h"
+#include "common/scope_exit.h"
+
+#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
+
+namespace Vulkan {
+
+static constexpr char AFTERMATH_LIB_NAME[] = "GFSDK_Aftermath_Lib.x64.dll";
+
+NsightAftermathTracker::NsightAftermathTracker() = default;
+
+NsightAftermathTracker::~NsightAftermathTracker() {
+ if (initialized) {
+ (void)GFSDK_Aftermath_DisableGpuCrashDumps();
+ }
+}
+
+bool NsightAftermathTracker::Initialize() {
+ if (!dl.Open(AFTERMATH_LIB_NAME)) {
+ LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath DLL");
+ return false;
+ }
+
+ if (!dl.GetSymbol("GFSDK_Aftermath_DisableGpuCrashDumps",
+ &GFSDK_Aftermath_DisableGpuCrashDumps) ||
+ !dl.GetSymbol("GFSDK_Aftermath_EnableGpuCrashDumps",
+ &GFSDK_Aftermath_EnableGpuCrashDumps) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GetShaderDebugInfoIdentifier",
+ &GFSDK_Aftermath_GetShaderDebugInfoIdentifier) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GetShaderHashSpirv", &GFSDK_Aftermath_GetShaderHashSpirv) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_CreateDecoder",
+ &GFSDK_Aftermath_GpuCrashDump_CreateDecoder) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_DestroyDecoder",
+ &GFSDK_Aftermath_GpuCrashDump_DestroyDecoder) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GenerateJSON",
+ &GFSDK_Aftermath_GpuCrashDump_GenerateJSON) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GetJSON",
+ &GFSDK_Aftermath_GpuCrashDump_GetJSON)) {
+ LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath function pointers");
+ return false;
+ }
+
+ dump_dir = FileUtil::GetUserPath(FileUtil::UserPath::LogDir) + "gpucrash";
+
+ (void)FileUtil::DeleteDirRecursively(dump_dir);
+ if (!FileUtil::CreateDir(dump_dir)) {
+ LOG_ERROR(Render_Vulkan, "Failed to create Nsight Aftermath dump directory");
+ return false;
+ }
+
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_EnableGpuCrashDumps(
+ GFSDK_Aftermath_Version_API, GFSDK_Aftermath_GpuCrashDumpWatchedApiFlags_Vulkan,
+ GFSDK_Aftermath_GpuCrashDumpFeatureFlags_Default, GpuCrashDumpCallback,
+ ShaderDebugInfoCallback, CrashDumpDescriptionCallback, this))) {
+ LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_EnableGpuCrashDumps failed");
+ return false;
+ }
+
+ LOG_INFO(Render_Vulkan, "Nsight Aftermath dump directory is \"{}\"", dump_dir);
+
+ initialized = true;
+ return true;
+}
+
+void NsightAftermathTracker::SaveShader(const std::vector<u32>& spirv) const {
+ if (!initialized) {
+ return;
+ }
+
+ std::vector<u32> spirv_copy = spirv;
+ GFSDK_Aftermath_SpirvCode shader;
+ shader.pData = spirv_copy.data();
+ shader.size = static_cast<u32>(spirv_copy.size() * 4);
+
+ std::scoped_lock lock{mutex};
+
+ GFSDK_Aftermath_ShaderHash hash;
+ if (!GFSDK_Aftermath_SUCCEED(
+ GFSDK_Aftermath_GetShaderHashSpirv(GFSDK_Aftermath_Version_API, &shader, &hash))) {
+ LOG_ERROR(Render_Vulkan, "Failed to hash SPIR-V module");
+ return;
+ }
+
+ FileUtil::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb");
+ if (!file.IsOpen()) {
+ LOG_ERROR(Render_Vulkan, "Failed to dump SPIR-V module with hash={:016x}", hash.hash);
+ return;
+ }
+ if (file.WriteArray(spirv.data(), spirv.size()) != spirv.size()) {
+ LOG_ERROR(Render_Vulkan, "Failed to write SPIR-V module with hash={:016x}", hash.hash);
+ return;
+ }
+}
+
+void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump,
+ u32 gpu_crash_dump_size) {
+ std::scoped_lock lock{mutex};
+
+ LOG_CRITICAL(Render_Vulkan, "called");
+
+ GFSDK_Aftermath_GpuCrashDump_Decoder decoder;
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_CreateDecoder(
+ GFSDK_Aftermath_Version_API, gpu_crash_dump, gpu_crash_dump_size, &decoder))) {
+ LOG_ERROR(Render_Vulkan, "Failed to create decoder");
+ return;
+ }
+ SCOPE_EXIT({ GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder); });
+
+ u32 json_size = 0;
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON(
+ decoder, GFSDK_Aftermath_GpuCrashDumpDecoderFlags_ALL_INFO,
+ GFSDK_Aftermath_GpuCrashDumpFormatterFlags_NONE, nullptr, nullptr, nullptr, nullptr,
+ this, &json_size))) {
+ LOG_ERROR(Render_Vulkan, "Failed to generate JSON");
+ return;
+ }
+ std::vector<char> json(json_size);
+ if (!GFSDK_Aftermath_SUCCEED(
+ GFSDK_Aftermath_GpuCrashDump_GetJSON(decoder, json_size, json.data()))) {
+ LOG_ERROR(Render_Vulkan, "Failed to query JSON");
+ return;
+ }
+
+ const std::string base_name = [this] {
+ const int id = dump_id++;
+ if (id == 0) {
+ return fmt::format("{}/crash.nv-gpudmp", dump_dir);
+ } else {
+ return fmt::format("{}/crash_{}.nv-gpudmp", dump_dir, id);
+ }
+ }();
+
+ std::string_view dump_view(static_cast<const char*>(gpu_crash_dump), gpu_crash_dump_size);
+ if (FileUtil::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) {
+ LOG_ERROR(Render_Vulkan, "Failed to write dump file");
+ return;
+ }
+ const std::string_view json_view(json.data(), json.size());
+ if (FileUtil::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) {
+ LOG_ERROR(Render_Vulkan, "Failed to write JSON");
+ return;
+ }
+}
+
+void NsightAftermathTracker::OnShaderDebugInfoCallback(const void* shader_debug_info,
+ u32 shader_debug_info_size) {
+ std::scoped_lock lock{mutex};
+
+ GFSDK_Aftermath_ShaderDebugInfoIdentifier identifier;
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GetShaderDebugInfoIdentifier(
+ GFSDK_Aftermath_Version_API, shader_debug_info, shader_debug_info_size, &identifier))) {
+ LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_GetShaderDebugInfoIdentifier failed");
+ return;
+ }
+
+ const std::string path =
+ fmt::format("{}/shader_{:016x}{:016x}.nvdbg", dump_dir, identifier.id[0], identifier.id[1]);
+ FileUtil::IOFile file(path, "wb");
+ if (!file.IsOpen()) {
+ LOG_ERROR(Render_Vulkan, "Failed to create file {}", path);
+ return;
+ }
+ if (file.WriteBytes(static_cast<const u8*>(shader_debug_info), shader_debug_info_size) !=
+ shader_debug_info_size) {
+ LOG_ERROR(Render_Vulkan, "Failed to write file {}", path);
+ return;
+ }
+}
+
+void NsightAftermathTracker::OnCrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description) {
+ add_description(GFSDK_Aftermath_GpuCrashDumpDescriptionKey_ApplicationName, "yuzu");
+}
+
+void NsightAftermathTracker::GpuCrashDumpCallback(const void* gpu_crash_dump,
+ u32 gpu_crash_dump_size, void* user_data) {
+ static_cast<NsightAftermathTracker*>(user_data)->OnGpuCrashDumpCallback(gpu_crash_dump,
+ gpu_crash_dump_size);
+}
+
+void NsightAftermathTracker::ShaderDebugInfoCallback(const void* shader_debug_info,
+ u32 shader_debug_info_size, void* user_data) {
+ static_cast<NsightAftermathTracker*>(user_data)->OnShaderDebugInfoCallback(
+ shader_debug_info, shader_debug_info_size);
+}
+
+void NsightAftermathTracker::CrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data) {
+ static_cast<NsightAftermathTracker*>(user_data)->OnCrashDumpDescriptionCallback(
+ add_description);
+}
+
+} // namespace Vulkan
+
+#endif // HAS_NSIGHT_AFTERMATH
diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h
new file mode 100644
index 000000000..afe7ae99e
--- /dev/null
+++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h
@@ -0,0 +1,87 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <mutex>
+#include <string>
+#include <vector>
+
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+
+#ifdef HAS_NSIGHT_AFTERMATH
+#include <GFSDK_Aftermath_Defines.h>
+#include <GFSDK_Aftermath_GpuCrashDump.h>
+#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
+#endif
+
+#include "common/common_types.h"
+#include "common/dynamic_library.h"
+
+namespace Vulkan {
+
+class NsightAftermathTracker {
+public:
+ NsightAftermathTracker();
+ ~NsightAftermathTracker();
+
+ NsightAftermathTracker(const NsightAftermathTracker&) = delete;
+ NsightAftermathTracker& operator=(const NsightAftermathTracker&) = delete;
+
+ // Delete move semantics because Aftermath initialization uses a pointer to this.
+ NsightAftermathTracker(NsightAftermathTracker&&) = delete;
+ NsightAftermathTracker& operator=(NsightAftermathTracker&&) = delete;
+
+ bool Initialize();
+
+ void SaveShader(const std::vector<u32>& spirv) const;
+
+private:
+#ifdef HAS_NSIGHT_AFTERMATH
+ static void GpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size,
+ void* user_data);
+
+ static void ShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size,
+ void* user_data);
+
+ static void CrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data);
+
+ void OnGpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size);
+
+ void OnShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size);
+
+ void OnCrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description);
+
+ mutable std::mutex mutex;
+
+ std::string dump_dir;
+ int dump_id = 0;
+
+ bool initialized = false;
+
+ Common::DynamicLibrary dl;
+ PFN_GFSDK_Aftermath_DisableGpuCrashDumps GFSDK_Aftermath_DisableGpuCrashDumps;
+ PFN_GFSDK_Aftermath_EnableGpuCrashDumps GFSDK_Aftermath_EnableGpuCrashDumps;
+ PFN_GFSDK_Aftermath_GetShaderDebugInfoIdentifier GFSDK_Aftermath_GetShaderDebugInfoIdentifier;
+ PFN_GFSDK_Aftermath_GetShaderHashSpirv GFSDK_Aftermath_GetShaderHashSpirv;
+ PFN_GFSDK_Aftermath_GpuCrashDump_CreateDecoder GFSDK_Aftermath_GpuCrashDump_CreateDecoder;
+ PFN_GFSDK_Aftermath_GpuCrashDump_DestroyDecoder GFSDK_Aftermath_GpuCrashDump_DestroyDecoder;
+ PFN_GFSDK_Aftermath_GpuCrashDump_GenerateJSON GFSDK_Aftermath_GpuCrashDump_GenerateJSON;
+ PFN_GFSDK_Aftermath_GpuCrashDump_GetJSON GFSDK_Aftermath_GpuCrashDump_GetJSON;
+#endif
+};
+
+#ifndef HAS_NSIGHT_AFTERMATH
+inline NsightAftermathTracker::NsightAftermathTracker() = default;
+inline NsightAftermathTracker::~NsightAftermathTracker() = default;
+inline bool NsightAftermathTracker::Initialize() {
+ return false;
+}
+inline void NsightAftermathTracker::SaveShader(const std::vector<u32>&) const {}
+#endif
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 04532f8f8..59b441943 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -12,15 +12,12 @@
#include <fmt/format.h>
-#include "common/assert.h"
#include "common/dynamic_library.h"
#include "common/logging/log.h"
#include "common/telemetry.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/frontend/emu_window.h"
-#include "core/memory.h"
-#include "core/perf_stats.h"
#include "core/settings.h"
#include "core/telemetry_session.h"
#include "video_core/gpu.h"
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 18270909b..522b5bff8 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -5,7 +5,6 @@
#pragma once
#include <memory>
-#include <optional>
#include <string>
#include <vector>
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 5eb544aea..243640fab 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -4,7 +4,6 @@
#pragma once
-#include <array>
#include <memory>
#include <tuple>
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 81e1de2be..5b494da8c 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -5,11 +5,7 @@
#include <algorithm>
#include <cstring>
#include <memory>
-#include <optional>
-#include <tuple>
-#include "common/assert.h"
-#include "common/bit_util.h"
#include "core/core.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3cd2e2774..a54583e7d 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -5,14 +5,11 @@
#pragma once
#include <memory>
-#include <unordered_map>
-#include <vector>
#include "common/common_types.h"
#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
@@ -55,8 +52,6 @@ public:
protected:
VkBuffer ToHandle(const Buffer& buffer) override;
- void WriteBarrier() override {}
-
Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 7b0268033..da71e710c 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -6,7 +6,7 @@
#include <memory>
#include <optional>
#include <utility>
-#include <vector>
+
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 26bf834de..230b526bc 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -6,7 +6,7 @@
#include <optional>
#include <utility>
-#include <vector>
+
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 23beafa4f..8e1b46277 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -2,14 +2,12 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <memory>
#include <vector>
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
@@ -105,6 +103,8 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat
}
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
+ device.SaveShader(code);
+
VkShaderModuleCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
ci.pNext = nullptr;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 33b9af29e..6e2f22a4a 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -4,8 +4,6 @@
#pragma once
-#include <memory>
-
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index e9d528aa6..890fd52cf 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <memory>
#include <vector>
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index ab40c70f0..9efa66bef 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -4,10 +4,8 @@
#pragma once
-#include <memory>
#include <vector>
-#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 52d29e49d..0e4bbca97 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -4,11 +4,11 @@
#include <bitset>
#include <chrono>
-#include <cstdlib>
#include <optional>
#include <string_view>
#include <thread>
#include <unordered_set>
+#include <utility>
#include <vector>
#include "common/assert.h"
@@ -167,6 +167,7 @@ bool VKDevice::Create() {
VkPhysicalDeviceFeatures2 features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
features2.pNext = nullptr;
+ const void* first_next = &features2;
void** next = &features2.pNext;
auto& features = features2.features;
@@ -296,7 +297,19 @@ bool VKDevice::Create() {
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
}
- logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld);
+ VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv;
+ if (nv_device_diagnostics_config) {
+ nsight_aftermath_tracker.Initialize();
+
+ diagnostics_nv.sType = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV;
+ diagnostics_nv.pNext = &features2;
+ diagnostics_nv.flags = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV |
+ VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV |
+ VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV;
+ first_next = &diagnostics_nv;
+ }
+
+ logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld);
if (!logical) {
LOG_ERROR(Render_Vulkan, "Failed to create logical device");
return false;
@@ -344,17 +357,12 @@ VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFla
void VKDevice::ReportLoss() const {
LOG_CRITICAL(Render_Vulkan, "Device loss occured!");
- // Wait some time to let the log flush
- std::this_thread::sleep_for(std::chrono::seconds{1});
-
- if (!nv_device_diagnostic_checkpoints) {
- return;
- }
+ // Wait for the log to flush and for Nsight Aftermath to dump the results
+ std::this_thread::sleep_for(std::chrono::seconds{3});
+}
- [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld);
- // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be
- // executed. It can be done on a debugger by evaluating the expression:
- // *(VKGraphicsPipeline*)data[0]
+void VKDevice::SaveShader(const std::vector<u32>& spirv) const {
+ nsight_aftermath_tracker.SaveShader(spirv);
}
bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
@@ -527,8 +535,8 @@ std::vector<const char*> VKDevice::LoadExtensions() {
Test(extension, has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME,
false);
if (Settings::values.renderer_debug) {
- Test(extension, nv_device_diagnostic_checkpoints,
- VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true);
+ Test(extension, nv_device_diagnostics_config,
+ VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME, true);
}
}
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index 60d64572a..c8640762d 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -10,6 +10,7 @@
#include <vector>
#include "common/common_types.h"
+#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -43,6 +44,9 @@ public:
/// Reports a device loss.
void ReportLoss() const;
+ /// Reports a shader to Nsight Aftermath.
+ void SaveShader(const std::vector<u32>& spirv) const;
+
/// Returns the dispatch loader with direct function pointers of the device.
const vk::DeviceDispatch& GetDispatchLoader() const {
return dld;
@@ -78,11 +82,6 @@ public:
return present_family;
}
- /// Returns true if the device is integrated with the host CPU.
- bool IsIntegrated() const {
- return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
- }
-
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
u32 GetApiVersion() const {
return properties.apiVersion;
@@ -173,11 +172,6 @@ public:
return ext_transform_feedback;
}
- /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints.
- bool IsNvDeviceDiagnosticCheckpoints() const {
- return nv_device_diagnostic_checkpoints;
- }
-
/// Returns the vendor name reported from Vulkan.
std::string_view GetVendorName() const {
return vendor_name;
@@ -233,7 +227,7 @@ private:
bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback.
- bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints.
+ bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
// Telemetry parameters
std::string vendor_name; ///< Device's driver name.
@@ -241,6 +235,9 @@ private:
/// Format properties dictionary.
std::unordered_map<VkFormat, VkFormatProperties> format_properties;
+
+ /// Nsight Aftermath GPU crash tracker
+ NsightAftermathTracker nsight_aftermath_tracker;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
new file mode 100644
index 000000000..a02be5487
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -0,0 +1,101 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <memory>
+#include <thread>
+
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_fence_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Vulkan {
+
+InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(payload, is_stubbed), device{device}, scheduler{scheduler} {}
+
+InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address,
+ u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(address, payload, is_stubbed), device{device}, scheduler{scheduler} {}
+
+InnerFence::~InnerFence() = default;
+
+void InnerFence::Queue() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(!event);
+
+ event = device.GetLogical().CreateEvent();
+ ticks = scheduler.Ticks();
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+ });
+}
+
+bool InnerFence::IsSignaled() const {
+ if (is_stubbed) {
+ return true;
+ }
+ ASSERT(event);
+ return IsEventSignalled();
+}
+
+void InnerFence::Wait() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(event);
+
+ if (ticks >= scheduler.Ticks()) {
+ scheduler.Flush();
+ }
+ while (!IsEventSignalled()) {
+ std::this_thread::yield();
+ }
+}
+
+bool InnerFence::IsEventSignalled() const {
+ switch (const VkResult result = event.GetStatus()) {
+ case VK_EVENT_SET:
+ return true;
+ case VK_EVENT_RESET:
+ return false;
+ default:
+ throw vk::Exception(result);
+ }
+}
+
+VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ const VKDevice& device, VKScheduler& scheduler,
+ VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
+ VKQueryCache& query_cache)
+ : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache),
+ device{device}, scheduler{scheduler} {}
+
+Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
+ return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed);
+}
+
+Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
+ return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed);
+}
+
+void VKFenceManager::QueueFence(Fence& fence) {
+ fence->Queue();
+}
+
+bool VKFenceManager::IsFenceSignaled(Fence& fence) const {
+ return fence->IsSignaled();
+}
+
+void VKFenceManager::WaitFence(Fence& fence) {
+ fence->Wait();
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
new file mode 100644
index 000000000..04d07fe6a
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -0,0 +1,74 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+
+#include "video_core/fence_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Core {
+class System;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Vulkan {
+
+class VKBufferCache;
+class VKDevice;
+class VKQueryCache;
+class VKScheduler;
+class VKTextureCache;
+
+class InnerFence : public VideoCommon::FenceBase {
+public:
+ explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload,
+ bool is_stubbed);
+ explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address,
+ u32 payload, bool is_stubbed);
+ ~InnerFence();
+
+ void Queue();
+
+ bool IsSignaled() const;
+
+ void Wait();
+
+private:
+ bool IsEventSignalled() const;
+
+ const VKDevice& device;
+ VKScheduler& scheduler;
+ vk::Event event;
+ u64 ticks = 0;
+};
+using Fence = std::shared_ptr<InnerFence>;
+
+using GenericFenceManager =
+ VideoCommon::FenceManager<Fence, VKTextureCache, VKBufferCache, VKQueryCache>;
+
+class VKFenceManager final : public GenericFenceManager {
+public:
+ explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ const VKDevice& device, VKScheduler& scheduler,
+ VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
+ VKQueryCache& query_cache);
+
+protected:
+ Fence CreateFence(u32 value, bool is_stubbed) override;
+ Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ void QueueFence(Fence& fence) override;
+ bool IsFenceSignaled(Fence& fence) const override;
+ void WaitFence(Fence& fence) override;
+
+private:
+ const VKDevice& device;
+ VKScheduler& scheduler;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index b540b838d..1ac981974 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -6,7 +6,6 @@
#include <cstring>
#include <vector>
-#include "common/assert.h"
#include "common/common_types.h"
#include "common/microprofile.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -26,12 +25,13 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
namespace {
-VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
+template <class StencilFace>
+VkStencilOpState GetStencilFaceState(const StencilFace& face) {
VkStencilOpState state;
- state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail);
- state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass);
- state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail);
- state.compareOp = MaxwellToVK::ComparisonOp(face.test_func);
+ state.failOp = MaxwellToVK::StencilOp(face.ActionStencilFail());
+ state.passOp = MaxwellToVK::StencilOp(face.ActionDepthPass());
+ state.depthFailOp = MaxwellToVK::StencilOp(face.ActionDepthFail());
+ state.compareOp = MaxwellToVK::ComparisonOp(face.TestFunc());
state.compareMask = 0;
state.writeMask = 0;
state.reference = 0;
@@ -147,6 +147,8 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
continue;
}
+ device.SaveShader(stage->code);
+
ci.codeSize = stage->code.size() * sizeof(u32);
ci.pCode = stage->code.data();
modules.push_back(device.GetLogical().CreateShaderModule(ci));
@@ -157,43 +159,47 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
const SPIRVProgram& program) const {
const auto& vi = fixed_state.vertex_input;
- const auto& ia = fixed_state.input_assembly;
const auto& ds = fixed_state.depth_stencil;
const auto& cd = fixed_state.color_blending;
- const auto& ts = fixed_state.tessellation;
const auto& rs = fixed_state.rasterizer;
std::vector<VkVertexInputBindingDescription> vertex_bindings;
std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
- for (std::size_t i = 0; i < vi.num_bindings; ++i) {
- const auto& binding = vi.bindings[i];
- const bool instanced = binding.divisor != 0;
+ for (std::size_t index = 0; index < std::size(vi.bindings); ++index) {
+ const auto& binding = vi.bindings[index];
+ if (!binding.enabled) {
+ continue;
+ }
+ const bool instanced = vi.binding_divisors[index] != 0;
const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
auto& vertex_binding = vertex_bindings.emplace_back();
- vertex_binding.binding = binding.index;
+ vertex_binding.binding = static_cast<u32>(index);
vertex_binding.stride = binding.stride;
vertex_binding.inputRate = rate;
if (instanced) {
auto& binding_divisor = vertex_binding_divisors.emplace_back();
- binding_divisor.binding = binding.index;
- binding_divisor.divisor = binding.divisor;
+ binding_divisor.binding = static_cast<u32>(index);
+ binding_divisor.divisor = vi.binding_divisors[index];
}
}
std::vector<VkVertexInputAttributeDescription> vertex_attributes;
const auto& input_attributes = program[0]->entries.attributes;
- for (std::size_t i = 0; i < vi.num_attributes; ++i) {
- const auto& attribute = vi.attributes[i];
- if (input_attributes.find(attribute.index) == input_attributes.end()) {
+ for (std::size_t index = 0; index < std::size(vi.attributes); ++index) {
+ const auto& attribute = vi.attributes[index];
+ if (!attribute.enabled) {
+ continue;
+ }
+ if (input_attributes.find(static_cast<u32>(index)) == input_attributes.end()) {
// Skip attributes not used by the vertex shaders.
continue;
}
auto& vertex_attribute = vertex_attributes.emplace_back();
- vertex_attribute.location = attribute.index;
+ vertex_attribute.location = static_cast<u32>(index);
vertex_attribute.binding = attribute.buffer;
- vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size);
+ vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size());
vertex_attribute.offset = attribute.offset;
}
@@ -219,15 +225,15 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_ci.pNext = nullptr;
input_assembly_ci.flags = 0;
- input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
+ input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, rs.Topology());
input_assembly_ci.primitiveRestartEnable =
- ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology);
+ rs.primitive_restart_enable != 0 && SupportsPrimitiveRestart(input_assembly_ci.topology);
VkPipelineTessellationStateCreateInfo tessellation_ci;
tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
tessellation_ci.pNext = nullptr;
tessellation_ci.flags = 0;
- tessellation_ci.patchControlPoints = ts.patch_control_points;
+ tessellation_ci.patchControlPoints = rs.patch_control_points_minus_one.Value() + 1;
VkPipelineViewportStateCreateInfo viewport_ci;
viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
@@ -242,12 +248,12 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_ci.pNext = nullptr;
rasterization_ci.flags = 0;
- rasterization_ci.depthClampEnable = rs.depth_clamp_enable;
+ rasterization_ci.depthClampEnable = rs.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE;
rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_ci.cullMode =
- rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE;
- rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face);
+ rs.cull_enable ? MaxwellToVK::CullFace(rs.CullFace()) : VK_CULL_MODE_NONE;
+ rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.FrontFace());
rasterization_ci.depthBiasEnable = rs.depth_bias_enable;
rasterization_ci.depthBiasConstantFactor = 0.0f;
rasterization_ci.depthBiasClamp = 0.0f;
@@ -271,40 +277,38 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
depth_stencil_ci.flags = 0;
depth_stencil_ci.depthTestEnable = ds.depth_test_enable;
depth_stencil_ci.depthWriteEnable = ds.depth_write_enable;
- depth_stencil_ci.depthCompareOp = ds.depth_test_enable
- ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
- : VK_COMPARE_OP_ALWAYS;
+ depth_stencil_ci.depthCompareOp =
+ ds.depth_test_enable ? MaxwellToVK::ComparisonOp(ds.DepthTestFunc()) : VK_COMPARE_OP_ALWAYS;
depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable;
depth_stencil_ci.stencilTestEnable = ds.stencil_enable;
- depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil);
- depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil);
+ depth_stencil_ci.front = GetStencilFaceState(ds.front);
+ depth_stencil_ci.back = GetStencilFaceState(ds.back);
depth_stencil_ci.minDepthBounds = 0.0f;
depth_stencil_ci.maxDepthBounds = 0.0f;
std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
- const std::size_t num_attachments =
- std::min(cd.attachments_count, renderpass_params.color_attachments.size());
- for (std::size_t i = 0; i < num_attachments; ++i) {
- static constexpr std::array component_table = {
+ const auto num_attachments = static_cast<std::size_t>(renderpass_params.num_color_attachments);
+ for (std::size_t index = 0; index < num_attachments; ++index) {
+ static constexpr std::array COMPONENT_TABLE = {
VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
VK_COLOR_COMPONENT_A_BIT};
- const auto& blend = cd.attachments[i];
+ const auto& blend = cd.attachments[index];
VkColorComponentFlags color_components = 0;
- for (std::size_t j = 0; j < component_table.size(); ++j) {
- if (blend.components[j]) {
- color_components |= component_table[j];
+ for (std::size_t i = 0; i < COMPONENT_TABLE.size(); ++i) {
+ if (blend.Mask()[i]) {
+ color_components |= COMPONENT_TABLE[i];
}
}
- VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i];
- attachment.blendEnable = blend.enable;
- attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func);
- attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func);
- attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation);
- attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func);
- attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func);
- attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation);
+ VkPipelineColorBlendAttachmentState& attachment = cb_attachments[index];
+ attachment.blendEnable = blend.enable != 0;
+ attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor());
+ attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor());
+ attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB());
+ attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor());
+ attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor());
+ attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha());
attachment.colorWriteMask = color_components;
}
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 7aba70960..a1d699a6c 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -5,16 +5,13 @@
#pragma once
#include <array>
-#include <memory>
#include <optional>
-#include <unordered_map>
#include <vector>
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 6a9e658bf..b4c650a63 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -118,8 +118,7 @@ private:
};
VKMemoryManager::VKMemoryManager(const VKDevice& device)
- : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
- is_memory_unified{GetMemoryUnified(properties)} {}
+ : device{device}, properties{device.GetPhysical().GetMemoryProperties()} {}
VKMemoryManager::~VKMemoryManager() = default;
@@ -209,16 +208,6 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
return {};
}
-bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
- for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
- if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
- // Memory is considered unified when heaps are device local only.
- return false;
- }
- }
- return true;
-}
-
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
const vk::DeviceMemory& memory, u64 begin, u64 end)
: device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 5b6858e9b..1af88e3d4 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -40,11 +40,6 @@ public:
/// Commits memory required by the image and binds it.
VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
- /// Returns true if the memory allocations are done always in host visible and coherent memory.
- bool IsMemoryUnified() const {
- return is_memory_unified;
- }
-
private:
/// Allocates a chunk of memory.
bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
@@ -53,12 +48,8 @@ private:
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags wanted_properties);
- /// Returns true if the device uses an unified memory model.
- static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
-
- const VKDevice& device; ///< Device handler.
- const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
- const bool is_memory_unified; ///< True if memory model is unified.
+ const VKDevice& device; ///< Device handler.
+ const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
};
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 90e3a8edd..fe45ed269 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -22,17 +22,22 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/shader/compiler_settings.h"
+#include "video_core/shader/memory_util.h"
namespace Vulkan {
MICROPROFILE_DECLARE(Vulkan_PipelineCache);
using Tegra::Engines::ShaderType;
+using VideoCommon::Shader::GetShaderAddress;
+using VideoCommon::Shader::GetShaderCode;
+using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
+using VideoCommon::Shader::ProgramCode;
+using VideoCommon::Shader::STAGE_MAIN_OFFSET;
namespace {
@@ -45,60 +50,6 @@ constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
VideoCommon::Shader::CompileDepth::FullDecompile};
-/// Gets the address for the specified shader stage program
-GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
- const auto& gpu{system.GPU().Maxwell3D()};
- const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
- return gpu.regs.code_address.CodeAddress() + shader_config.offset;
-}
-
-/// Gets if the current instruction offset is a scheduler instruction
-constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
- // Sched instructions appear once every 4 instructions.
- constexpr std::size_t SchedPeriod = 4;
- const std::size_t absolute_offset = offset - main_offset;
- return (absolute_offset % SchedPeriod) == 0;
-}
-
-/// Calculates the size of a program stream
-std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
- const std::size_t start_offset = is_compute ? 0 : 10;
- // This is the encoded version of BRA that jumps to itself. All Nvidia
- // shaders end with one.
- constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
- constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
- std::size_t offset = start_offset;
- while (offset < program.size()) {
- const u64 instruction = program[offset];
- if (!IsSchedInstruction(offset, start_offset)) {
- if ((instruction & mask) == self_jumping_branch) {
- // End on Maxwell's "nop" instruction
- break;
- }
- if (instruction == 0) {
- break;
- }
- }
- ++offset;
- }
- // The last instruction is included in the program size
- return std::min(offset + 1, program.size());
-}
-
-/// Gets the shader program code from memory for the specified address
-ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
- const u8* host_ptr, bool is_compute) {
- ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
- ASSERT_OR_EXECUTE(host_ptr != nullptr, {
- std::fill(program_code.begin(), program_code.end(), 0);
- return program_code;
- });
- memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(),
- program_code.size() * sizeof(u64));
- program_code.resize(CalculateProgramSize(program_code, is_compute));
- return program_code;
-}
-
constexpr std::size_t GetStageFromProgram(std::size_t program) {
return program == 0 ? 0 : program - 1;
}
@@ -133,7 +84,7 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
u32 count = 1;
if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
// Combined image samplers can be arrayed.
- count = container[i].Size();
+ count = container[i].size;
}
VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
entry.binding = binding++;
@@ -161,6 +112,24 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
} // Anonymous namespace
+std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
+std::size_t ComputePipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage,
GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code,
u32 main_offset)
@@ -207,18 +176,22 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
const GPUVAddr program_addr{GetShaderAddress(system, program)};
const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
ASSERT(cpu_addr);
- auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr;
+ auto shader = cpu_addr ? TryGet(*cpu_addr) : null_shader;
if (!shader) {
const auto host_ptr{memory_manager.GetPointer(program_addr)};
// No shader found - create a new one
- constexpr u32 stage_offset = 10;
+ constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1);
- auto code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
+ ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr,
std::move(code), stage_offset);
- Register(shader);
+ if (cpu_addr) {
+ Register(shader);
+ } else {
+ null_shader = shader;
+ }
}
shaders[index] = std::move(shader);
}
@@ -261,17 +234,20 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
ASSERT(cpu_addr);
- auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr;
+ auto shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel;
if (!shader) {
// No shader found - create a new one
const auto host_ptr = memory_manager.GetPointer(program_addr);
- auto code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
- constexpr u32 kernel_main_offset = 0;
+ ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute,
program_addr, *cpu_addr, std::move(code),
- kernel_main_offset);
- Register(shader);
+ KERNEL_MAIN_OFFSET);
+ if (cpu_addr) {
+ Register(shader);
+ } else {
+ null_kernel = shader;
+ }
}
Specialization specialization;
@@ -329,12 +305,14 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
const auto& gpu = system.GPU().Maxwell3D();
Specialization specialization;
- if (fixed_state.input_assembly.topology == Maxwell::PrimitiveTopology::Points) {
- ASSERT(fixed_state.input_assembly.point_size != 0.0f);
- specialization.point_size = fixed_state.input_assembly.point_size;
+ if (fixed_state.rasterizer.Topology() == Maxwell::PrimitiveTopology::Points) {
+ float point_size;
+ std::memcpy(&point_size, &fixed_state.rasterizer.point_size, sizeof(float));
+ specialization.point_size = point_size;
+ ASSERT(point_size != 0.0f);
}
for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
- specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].type;
+ specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].Type();
}
specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
@@ -383,7 +361,7 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
for (u32 i = 0; i < count; ++i) {
- const u32 num_samplers = container[i].Size();
+ const u32 num_samplers = container[i].size;
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
entry.dstBinding = binding;
entry.dstArrayElement = 0;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 7ccdb7083..0b5796fef 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -7,7 +7,6 @@
#include <array>
#include <cstddef>
#include <memory>
-#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
@@ -22,12 +21,11 @@
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
-#include "video_core/surface.h"
namespace Core {
class System;
@@ -47,46 +45,40 @@ class CachedShader;
using Shader = std::shared_ptr<CachedShader>;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using ProgramCode = std::vector<u64>;
-
struct GraphicsPipelineCacheKey {
FixedPipelineState fixed_state;
- std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
RenderPassParams renderpass_params;
+ std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
+ u64 padding; // This is necessary for unique object representations
- std::size_t Hash() const noexcept {
- std::size_t hash = fixed_state.Hash();
- for (const auto& shader : shaders) {
- boost::hash_combine(hash, shader);
- }
- boost::hash_combine(hash, renderpass_params.Hash());
- return hash;
- }
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
- bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
- return std::tie(fixed_state, shaders, renderpass_params) ==
- std::tie(rhs.fixed_state, rhs.shaders, rhs.renderpass_params);
+ bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
struct ComputePipelineCacheKey {
- GPUVAddr shader{};
- u32 shared_memory_size{};
- std::array<u32, 3> workgroup_size{};
-
- std::size_t Hash() const noexcept {
- return static_cast<std::size_t>(shader) ^
- ((static_cast<std::size_t>(shared_memory_size) >> 7) << 40) ^
- static_cast<std::size_t>(workgroup_size[0]) ^
- (static_cast<std::size_t>(workgroup_size[1]) << 16) ^
- (static_cast<std::size_t>(workgroup_size[2]) << 24);
- }
+ GPUVAddr shader;
+ u32 shared_memory_size;
+ std::array<u32, 3> workgroup_size;
+
+ std::size_t Hash() const noexcept;
- bool operator==(const ComputePipelineCacheKey& rhs) const noexcept {
- return std::tie(shader, shared_memory_size, workgroup_size) ==
- std::tie(rhs.shader, rhs.shared_memory_size, rhs.workgroup_size);
+ bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
+
+ bool operator!=(const ComputePipelineCacheKey& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<ComputePipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<ComputePipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
} // namespace Vulkan
@@ -113,7 +105,8 @@ namespace Vulkan {
class CachedShader final : public RasterizerCacheObject {
public:
explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
- VAddr cpu_addr, ProgramCode program_code, u32 main_offset);
+ VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code,
+ u32 main_offset);
~CachedShader();
GPUVAddr GetGpuAddr() const {
@@ -145,7 +138,7 @@ private:
Tegra::Engines::ShaderType stage);
GPUVAddr gpu_addr{};
- ProgramCode program_code;
+ VideoCommon::Shader::ProgramCode program_code;
VideoCommon::Shader::Registry registry;
VideoCommon::Shader::ShaderIR shader_ir;
ShaderEntries entries;
@@ -182,6 +175,9 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
VKRenderPassCache& renderpass_cache;
+ Shader null_shader{};
+ Shader null_kernel{};
+
std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
GraphicsPipelineCacheKey last_graphics_key;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 0966c7ff7..bc91c48cc 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -4,7 +4,6 @@
#include <algorithm>
#include <cstddef>
-#include <cstdint>
#include <utility>
#include <vector>
@@ -113,8 +112,19 @@ u64 HostCounter::BlockingQuery() const {
if (ticks >= cache.Scheduler().Ticks()) {
cache.Scheduler().Flush();
}
- return cache.Device().GetLogical().GetQueryResult<u64>(
- query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+ u64 data;
+ const VkResult result = cache.Device().GetLogical().GetQueryResults(
+ query.first, query.second, 1, sizeof(data), &data, sizeof(data),
+ VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+ switch (result) {
+ case VK_SUCCESS:
+ return data;
+ case VK_ERROR_DEVICE_LOST:
+ cache.Device().ReportLoss();
+ [[fallthrough]];
+ default:
+ throw vk::Exception(result);
+ }
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b63784f4b..40119e6d3 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -5,7 +5,6 @@
#pragma once
#include <cstddef>
-#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 857bea19f..8b009fc22 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -9,14 +9,13 @@
#include <vector>
#include <boost/container/static_vector.hpp>
-#include <boost/functional/hash.hpp>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "core/core.h"
-#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -118,14 +117,13 @@ template <typename Engine, typename Entry>
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
std::size_t stage, std::size_t index = 0) {
const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
- if (entry.IsBindless()) {
- const Tegra::Texture::TextureHandle tex_handle =
- engine.AccessConstBuffer32(stage_type, entry.GetBuffer(), entry.GetOffset());
+ if (entry.is_bindless) {
+ const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset);
return engine.GetTextureInfo(tex_handle);
}
const auto& gpu_profile = engine.AccessGuestDriverProfile();
const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
- const u32 offset = entry.GetOffset() + entry_offset;
+ const u32 offset = entry.offset + entry_offset;
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
return engine.GetStageTexture(stage_type, offset);
} else {
@@ -292,14 +290,16 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind
staging_pool(device, memory_manager, scheduler), descriptor_pool(device),
update_descriptor_queue(device, scheduler), renderpass_cache(device),
quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
- uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
+ uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
texture_cache(system, *this, device, resource_manager, memory_manager, scheduler,
staging_pool),
pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue,
renderpass_cache),
buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool),
- sampler_cache(device), query_cache(system, *this, device, scheduler) {
+ sampler_cache(device),
+ fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache),
+ query_cache(system, *this, device, scheduler), wfi_event{device.GetLogical().CreateEvent()} {
scheduler.SetQueryCache(query_cache);
}
@@ -313,7 +313,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
query_cache.UpdateCounters();
const auto& gpu = system.GPU().Maxwell3D();
- GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)};
+ GraphicsPipelineCacheKey key;
+ key.fixed_state.Fill(gpu.regs);
buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed));
@@ -331,10 +332,11 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
buffer_cache.Unmap();
- const auto texceptions = UpdateAttachments();
+ const Texceptions texceptions = UpdateAttachments();
SetupImageTransitions(texceptions, color_attachments, zeta_attachment);
key.renderpass_params = GetRenderPassParams(texceptions);
+ key.padding = 0;
auto& pipeline = pipeline_cache.GetGraphicsPipeline(key);
scheduler.BindGraphicsPipeline(pipeline.GetHandle());
@@ -347,11 +349,6 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
buffer_bindings.Bind(scheduler);
- if (device.IsNvDeviceDiagnosticCheckpoints()) {
- scheduler.Record(
- [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); });
- }
-
BeginTransformFeedback();
const auto pipeline_layout = pipeline.GetLayout();
@@ -365,6 +362,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
});
EndTransformFeedback();
+
+ system.GPU().TickWork();
}
void RasterizerVulkan::Clear() {
@@ -453,10 +452,12 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
query_cache.UpdateCounters();
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
- const ComputePipelineCacheKey key{
- code_addr,
- launch_desc.shared_alloc,
- {launch_desc.block_dim_x, launch_desc.block_dim_y, launch_desc.block_dim_z}};
+ ComputePipelineCacheKey key;
+ key.shader = code_addr;
+ key.shared_memory_size = launch_desc.shared_alloc;
+ key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y,
+ launch_desc.block_dim_z};
+
auto& pipeline = pipeline_cache.GetComputePipeline(key);
// Compute dispatches can't be executed inside a renderpass
@@ -478,11 +479,6 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
- if (device.IsNvDeviceDiagnosticCheckpoints()) {
- scheduler.Record(
- [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); });
- }
-
scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(),
layout = pipeline.GetLayout(),
@@ -514,6 +510,13 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) {
query_cache.FlushRegion(addr, size);
}
+bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size) {
+ if (!Settings::IsGPULevelHigh()) {
+ return buffer_cache.MustFlushRegion(addr, size);
+ }
+ return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
+}
+
void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
if (addr == 0 || size == 0) {
return;
@@ -524,11 +527,72 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
query_cache.InvalidateRegion(addr, size);
}
+void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
+ if (addr == 0 || size == 0) {
+ return;
+ }
+ texture_cache.OnCPUWrite(addr, size);
+ pipeline_cache.InvalidateRegion(addr, size);
+ buffer_cache.OnCPUWrite(addr, size);
+ query_cache.InvalidateRegion(addr, size);
+}
+
+void RasterizerVulkan::SyncGuestHost() {
+ texture_cache.SyncGuestHost();
+ buffer_cache.SyncGuestHost();
+}
+
+void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ gpu.MemoryManager().Write<u32>(addr, value);
+ return;
+ }
+ fence_manager.SignalSemaphore(addr, value);
+}
+
+void RasterizerVulkan::SignalSyncPoint(u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ gpu.IncrementSyncPoint(value);
+ return;
+ }
+ fence_manager.SignalSyncPoint(value);
+}
+
+void RasterizerVulkan::ReleaseFences() {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ return;
+ }
+ fence_manager.WaitPendingFences();
+}
+
void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size) {
FlushRegion(addr, size);
InvalidateRegion(addr, size);
}
+void RasterizerVulkan::WaitForIdle() {
+ // Everything but wait pixel operations. This intentionally includes FRAGMENT_SHADER_BIT because
+ // fragment shaders can still write storage buffers.
+ VkPipelineStageFlags flags =
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
+ if (device.IsExtTransformFeedbackSupported()) {
+ flags |= VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
+ }
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([event = *wfi_event, flags](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetEvent(event, flags);
+ cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
+ });
+}
+
void RasterizerVulkan::FlushCommands() {
if (draw_counter > 0) {
draw_counter = 0;
@@ -609,7 +673,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
Texceptions texceptions;
for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
if (update_rendertargets) {
- color_attachments[rt] = texture_cache.GetColorBufferSurface(rt);
+ color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true);
}
if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
texceptions[rt] = true;
@@ -617,7 +681,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
}
if (update_rendertargets) {
- zeta_attachment = texture_cache.GetDepthBufferSurface();
+ zeta_attachment = texture_cache.GetDepthBufferSurface(true);
}
if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
texceptions[ZETA_TEXCEPTION_INDEX] = true;
@@ -645,7 +709,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
- const auto try_push = [&](const View& view) {
+ const auto try_push = [&key](const View& view) {
if (!view) {
return false;
}
@@ -656,7 +720,9 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
return true;
};
- for (std::size_t index = 0; index < std::size(color_attachments); ++index) {
+ const auto& regs = system.GPU().Maxwell3D().regs;
+ const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
+ for (std::size_t index = 0; index < num_attachments; ++index) {
if (try_push(color_attachments[index])) {
texture_cache.MarkColorBufferInUse(index);
}
@@ -807,42 +873,49 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
BufferBindings& buffer_bindings) {
const auto& regs = system.GPU().Maxwell3D().regs;
- for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexAttributes); ++index) {
+ for (std::size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
const auto& attrib = regs.vertex_attrib_format[index];
if (!attrib.IsValid()) {
+ vertex_input.SetAttribute(index, false, 0, 0, {}, {});
continue;
}
- const auto& buffer = regs.vertex_array[attrib.buffer];
+ [[maybe_unused]] const auto& buffer = regs.vertex_array[attrib.buffer];
ASSERT(buffer.IsEnabled());
- vertex_input.attributes[vertex_input.num_attributes++] =
- FixedPipelineState::VertexAttribute(index, attrib.buffer, attrib.type, attrib.size,
- attrib.offset);
+ vertex_input.SetAttribute(index, true, attrib.buffer, attrib.offset, attrib.type.Value(),
+ attrib.size.Value());
}
- for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexArrays); ++index) {
+ for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
const auto& vertex_array = regs.vertex_array[index];
if (!vertex_array.IsEnabled()) {
+ vertex_input.SetBinding(index, false, 0, 0);
continue;
}
+ vertex_input.SetBinding(
+ index, true, vertex_array.stride,
+ regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0);
const GPUVAddr start{vertex_array.StartAddress()};
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
- ASSERT(end > start);
- const std::size_t size{end - start + 1};
+ ASSERT(end >= start);
+ const std::size_t size{end - start};
+ if (size == 0) {
+ buffer_bindings.AddVertexBinding(DefaultBuffer(), 0);
+ continue;
+ }
const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
-
- vertex_input.bindings[vertex_input.num_bindings++] = FixedPipelineState::VertexBinding(
- index, vertex_array.stride,
- regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0);
buffer_bindings.AddVertexBinding(buffer, offset);
}
}
void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawParameters& params,
bool is_indexed) {
+ if (params.num_vertices == 0) {
+ return;
+ }
const auto& regs = system.GPU().Maxwell3D().regs;
switch (regs.draw.topology) {
case Maxwell::PrimitiveTopology::Quads: {
@@ -918,7 +991,7 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
MICROPROFILE_SCOPE(Vulkan_Textures);
const auto& gpu = system.GPU().Maxwell3D();
for (const auto& entry : entries.samplers) {
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(gpu, entry, stage, i);
SetupTexture(texture, entry);
}
@@ -970,7 +1043,7 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_Textures);
const auto& gpu = system.GPU().KeplerCompute();
for (const auto& entry : entries.samplers) {
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
SetupTexture(texture, entry);
}
@@ -990,8 +1063,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
const Tegra::Engines::ConstBufferInfo& buffer) {
if (!buffer.enabled) {
// Set values to zero to unbind buffers
- update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
- sizeof(float));
+ update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE);
return;
}
@@ -1014,7 +1086,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
if (size == 0) {
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
// because Vulkan doesn't like empty buffers.
- constexpr std::size_t dummy_size = 4;
+ // Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the
+ // default buffer.
+ static constexpr std::size_t dummy_size = 4;
const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
return;
@@ -1051,7 +1125,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
auto view = texture_cache.GetImageSurface(tic, entry);
- if (entry.IsWritten()) {
+ if (entry.is_written) {
view->MarkAsModified(texture_cache.Tick());
}
@@ -1179,7 +1253,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
DEBUG_ASSERT(end >= start);
- size += (end - start + 1) * regs.vertex_array[index].enable;
+ size += (end - start) * regs.vertex_array[index].enable;
}
return size;
}
@@ -1202,28 +1276,54 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize(
}
RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const {
- using namespace VideoCore::Surface;
-
const auto& regs = system.GPU().Maxwell3D().regs;
- RenderPassParams renderpass_params;
+ const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
- for (std::size_t rt = 0; rt < static_cast<std::size_t>(regs.rt_control.count); ++rt) {
+ RenderPassParams params;
+ params.color_formats = {};
+ std::size_t color_texceptions = 0;
+
+ std::size_t index = 0;
+ for (std::size_t rt = 0; rt < num_attachments; ++rt) {
const auto& rendertarget = regs.rt[rt];
if (rendertarget.Address() == 0 || rendertarget.format == Tegra::RenderTargetFormat::NONE) {
continue;
}
- renderpass_params.color_attachments.push_back(RenderPassParams::ColorAttachment{
- static_cast<u32>(rt), PixelFormatFromRenderTargetFormat(rendertarget.format),
- texceptions[rt]});
+ params.color_formats[index] = static_cast<u8>(rendertarget.format);
+ color_texceptions |= (texceptions[rt] ? 1ULL : 0ULL) << index;
+ ++index;
}
+ params.num_color_attachments = static_cast<u8>(index);
+ params.texceptions = static_cast<u8>(color_texceptions);
+
+ params.zeta_format = regs.zeta_enable ? static_cast<u8>(regs.zeta.format) : 0;
+ params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
+ return params;
+}
- renderpass_params.has_zeta = regs.zeta_enable;
- if (renderpass_params.has_zeta) {
- renderpass_params.zeta_pixel_format = PixelFormatFromDepthFormat(regs.zeta.format);
- renderpass_params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
+VkBuffer RasterizerVulkan::DefaultBuffer() {
+ if (default_buffer) {
+ return *default_buffer;
}
- return renderpass_params;
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = DEFAULT_BUFFER_SIZE;
+ ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ default_buffer = device.GetLogical().CreateBuffer(ci);
+ default_buffer_commit = memory_manager.Commit(default_buffer, false);
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
+ cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0);
+ });
+ return *default_buffer;
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index d9108f862..0ed0e48c6 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -14,13 +14,13 @@
#include <boost/functional/hash.hpp>
#include "common/common_types.h"
-#include "video_core/memory_manager.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/vk_fence_manager.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -118,8 +118,15 @@ public:
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void FlushAll() override;
void FlushRegion(VAddr addr, u64 size) override;
+ bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
+ void OnCPUWrite(VAddr addr, u64 size) override;
+ void SyncGuestHost() override;
+ void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void SignalSyncPoint(u32 value) override;
+ void ReleaseFences() override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+ void WaitForIdle() override;
void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -148,6 +155,7 @@ private:
using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
+ static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float);
void FlushWork();
@@ -240,6 +248,8 @@ private:
RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
+ VkBuffer DefaultBuffer();
+
Core::System& system;
Core::Frontend::EmuWindow& render_window;
VKScreenInfo& screen_info;
@@ -261,8 +271,13 @@ private:
VKPipelineCache pipeline_cache;
VKBufferCache buffer_cache;
VKSamplerCache sampler_cache;
+ VKFenceManager fence_manager;
VKQueryCache query_cache;
+ vk::Buffer default_buffer;
+ VKMemoryCommit default_buffer_commit;
+ vk::Event wfi_event;
+
std::array<View, Maxwell::NumRenderTargets> color_attachments;
View zeta_attachment;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 4e5286a69..3f71d005e 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -2,9 +2,11 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <cstring>
#include <memory>
#include <vector>
+#include "common/cityhash.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -13,6 +15,15 @@
namespace Vulkan {
+std::size_t RenderPassParams::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool RenderPassParams::operator==(const RenderPassParams& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
VKRenderPassCache::~VKRenderPassCache() = default;
@@ -27,20 +38,22 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
}
vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
+ using namespace VideoCore::Surface;
std::vector<VkAttachmentDescription> descriptors;
std::vector<VkAttachmentReference> color_references;
- for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) {
- const auto attachment = params.color_attachments[rt];
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, attachment.pixel_format);
+ const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
+ for (std::size_t rt = 0; rt < num_attachments; ++rt) {
+ const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
+ const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
+ const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
- static_cast<u32>(attachment.pixel_format));
+ static_cast<int>(pixel_format));
- // TODO(Rodrigo): Add eMayAlias when it's needed.
- const auto color_layout = attachment.is_texception
- ? VK_IMAGE_LAYOUT_GENERAL
- : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ // TODO(Rodrigo): Add MAY_ALIAS_BIT when it's needed.
+ const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentDescription& descriptor = descriptors.emplace_back();
descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
descriptor.format = format.format;
@@ -58,15 +71,17 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
}
VkAttachmentReference zeta_attachment_ref;
- if (params.has_zeta) {
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format);
+ const bool has_zeta = params.zeta_format != 0;
+ if (has_zeta) {
+ const auto guest_format = static_cast<Tegra::DepthFormat>(params.zeta_format);
+ const PixelFormat pixel_format = PixelFormatFromDepthFormat(guest_format);
+ const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
- static_cast<u32>(params.zeta_pixel_format));
+ static_cast<int>(pixel_format));
- const auto zeta_layout = params.zeta_texception
- ? VK_IMAGE_LAYOUT_GENERAL
- : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ const VkImageLayout zeta_layout = params.zeta_texception != 0
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentDescription& descriptor = descriptors.emplace_back();
descriptor.flags = 0;
descriptor.format = format.format;
@@ -78,7 +93,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
descriptor.initialLayout = zeta_layout;
descriptor.finalLayout = zeta_layout;
- zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size());
+ zeta_attachment_ref.attachment = static_cast<u32>(num_attachments);
zeta_attachment_ref.layout = zeta_layout;
}
@@ -90,7 +105,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
subpass_description.pColorAttachments = color_references.data();
subpass_description.pResolveAttachments = nullptr;
- subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr;
+ subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr;
subpass_description.preserveAttachmentCount = 0;
subpass_description.pPreserveAttachments = nullptr;
@@ -101,7 +116,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
- if (params.has_zeta) {
+ if (has_zeta) {
access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
index 921b6efb5..8b0fec720 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
@@ -4,8 +4,7 @@
#pragma once
-#include <memory>
-#include <tuple>
+#include <type_traits>
#include <unordered_map>
#include <boost/container/static_vector.hpp>
@@ -19,51 +18,25 @@ namespace Vulkan {
class VKDevice;
-// TODO(Rodrigo): Optimize this structure for faster hashing
-
struct RenderPassParams {
- struct ColorAttachment {
- u32 index = 0;
- VideoCore::Surface::PixelFormat pixel_format = VideoCore::Surface::PixelFormat::Invalid;
- bool is_texception = false;
-
- std::size_t Hash() const noexcept {
- return static_cast<std::size_t>(pixel_format) |
- static_cast<std::size_t>(is_texception) << 6 |
- static_cast<std::size_t>(index) << 7;
- }
-
- bool operator==(const ColorAttachment& rhs) const noexcept {
- return std::tie(index, pixel_format, is_texception) ==
- std::tie(rhs.index, rhs.pixel_format, rhs.is_texception);
- }
- };
-
- boost::container::static_vector<ColorAttachment,
- Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
- color_attachments{};
- // TODO(Rodrigo): Unify has_zeta into zeta_pixel_format and zeta_component_type.
- VideoCore::Surface::PixelFormat zeta_pixel_format = VideoCore::Surface::PixelFormat::Invalid;
- bool has_zeta = false;
- bool zeta_texception = false;
-
- std::size_t Hash() const noexcept {
- std::size_t hash = 0;
- for (const auto& rt : color_attachments) {
- boost::hash_combine(hash, rt.Hash());
- }
- boost::hash_combine(hash, zeta_pixel_format);
- boost::hash_combine(hash, has_zeta);
- boost::hash_combine(hash, zeta_texception);
- return hash;
- }
+ std::array<u8, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_formats;
+ u8 num_color_attachments;
+ u8 texceptions;
+
+ u8 zeta_format;
+ u8 zeta_texception;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const RenderPassParams& rhs) const noexcept;
- bool operator==(const RenderPassParams& rhs) const {
- return std::tie(color_attachments, zeta_pixel_format, has_zeta, zeta_texception) ==
- std::tie(rhs.color_attachments, rhs.zeta_pixel_format, rhs.has_zeta,
- rhs.zeta_texception);
+ bool operator!=(const RenderPassParams& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<RenderPassParams>);
+static_assert(std::is_trivially_copyable_v<RenderPassParams>);
+static_assert(std::is_trivially_constructible_v<RenderPassParams>);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 07bbcf520..2687d8d95 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -2,11 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <cstring>
-#include <optional>
#include <unordered_map>
-#include "common/assert.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 900f551b3..82ec9180e 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -8,7 +8,6 @@
#include <thread>
#include <utility>
-#include "common/assert.h"
#include "common/microprofile.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -166,7 +165,15 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
submit_info.pCommandBuffers = current_cmdbuf.address();
submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
submit_info.pSignalSemaphores = &semaphore;
- device.GetGraphicsQueue().Submit(submit_info, *current_fence);
+ switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
+ case VK_SUCCESS:
+ break;
+ case VK_ERROR_DEVICE_LOST:
+ device.ReportLoss();
+ [[fallthrough]];
+ default:
+ vk::Check(result);
+ }
}
void VKScheduler::AllocateNewContext() {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 82a8adc69..970a65566 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -7,7 +7,6 @@
#include <atomic>
#include <condition_variable>
#include <memory>
-#include <optional>
#include <stack>
#include <thread>
#include <utility>
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index aaa138f52..18678968c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -103,8 +103,8 @@ struct GenericVaryingDescription {
};
spv::Dim GetSamplerDim(const Sampler& sampler) {
- ASSERT(!sampler.IsBuffer());
- switch (sampler.GetType()) {
+ ASSERT(!sampler.is_buffer);
+ switch (sampler.type) {
case Tegra::Shader::TextureType::Texture1D:
return spv::Dim::Dim1D;
case Tegra::Shader::TextureType::Texture2D:
@@ -114,13 +114,13 @@ spv::Dim GetSamplerDim(const Sampler& sampler) {
case Tegra::Shader::TextureType::TextureCube:
return spv::Dim::Cube;
default:
- UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<u32>(sampler.GetType()));
+ UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<int>(sampler.type));
return spv::Dim::Dim2D;
}
}
std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
- switch (image.GetType()) {
+ switch (image.type) {
case Tegra::Shader::ImageType::Texture1D:
return {spv::Dim::Dim1D, false};
case Tegra::Shader::ImageType::TextureBuffer:
@@ -134,7 +134,7 @@ std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
case Tegra::Shader::ImageType::Texture3D:
return {spv::Dim::Dim3D, false};
default:
- UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType()));
+ UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<int>(image.type));
return {spv::Dim::Dim2D, false};
}
}
@@ -879,11 +879,11 @@ private:
u32 DeclareTexelBuffers(u32 binding) {
for (const auto& sampler : ir.GetSamplers()) {
- if (!sampler.IsBuffer()) {
+ if (!sampler.is_buffer) {
continue;
}
- ASSERT(!sampler.IsArray());
- ASSERT(!sampler.IsShadow());
+ ASSERT(!sampler.is_array);
+ ASSERT(!sampler.is_shadow);
constexpr auto dim = spv::Dim::Buffer;
constexpr int depth = 0;
@@ -894,23 +894,23 @@ private:
const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
+ AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
Decorate(id, spv::Decoration::Binding, binding++);
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id});
+ texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id});
}
return binding;
}
u32 DeclareSamplers(u32 binding) {
for (const auto& sampler : ir.GetSamplers()) {
- if (sampler.IsBuffer()) {
+ if (sampler.is_buffer) {
continue;
}
const auto dim = GetSamplerDim(sampler);
- const int depth = sampler.IsShadow() ? 1 : 0;
- const int arrayed = sampler.IsArray() ? 1 : 0;
+ const int depth = sampler.is_shadow ? 1 : 0;
+ const int arrayed = sampler.is_array ? 1 : 0;
constexpr bool ms = false;
constexpr int sampled = 1;
constexpr auto format = spv::ImageFormat::Unknown;
@@ -918,17 +918,17 @@ private:
const Id sampler_type = TypeSampledImage(image_type);
const Id sampler_pointer_type =
TypePointer(spv::StorageClass::UniformConstant, sampler_type);
- const Id type = sampler.IsIndexed()
- ? TypeArray(sampler_type, Constant(t_uint, sampler.Size()))
+ const Id type = sampler.is_indexed
+ ? TypeArray(sampler_type, Constant(t_uint, sampler.size))
: sampler_type;
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
+ AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
Decorate(id, spv::Decoration::Binding, binding++);
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- sampled_images.emplace(sampler.GetIndex(), SampledImage{image_type, sampler_type,
- sampler_pointer_type, id});
+ sampled_images.emplace(
+ sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id});
}
return binding;
}
@@ -943,17 +943,17 @@ private:
const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex())));
+ AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
Decorate(id, spv::Decoration::Binding, binding++);
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- if (image.IsRead() && !image.IsWritten()) {
+ if (image.is_read && !image.is_written) {
Decorate(id, spv::Decoration::NonWritable);
- } else if (image.IsWritten() && !image.IsRead()) {
+ } else if (image.is_written && !image.is_read) {
Decorate(id, spv::Decoration::NonReadable);
}
- images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id});
+ images.emplace(image.index, StorageImage{image_type, id});
}
return binding;
}
@@ -1584,6 +1584,15 @@ private:
return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat};
}
+ Expression LogicalAddCarry(Operation operation) {
+ const Id op_a = AsUint(Visit(operation[0]));
+ const Id op_b = AsUint(Visit(operation[1]));
+
+ const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
+ const Id carry = OpCompositeExtract(t_uint, result, 1);
+ return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool};
+ }
+
Expression LogicalAssign(Operation operation) {
const Node& dest = operation[0];
const Node& src = operation[1];
@@ -1611,11 +1620,11 @@ private:
Id GetTextureSampler(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- ASSERT(!meta.sampler.IsBuffer());
+ ASSERT(!meta.sampler.is_buffer);
- const auto& entry = sampled_images.at(meta.sampler.GetIndex());
+ const auto& entry = sampled_images.at(meta.sampler.index);
Id sampler = entry.variable;
- if (meta.sampler.IsIndexed()) {
+ if (meta.sampler.is_indexed) {
const Id index = AsInt(Visit(meta.index));
sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
}
@@ -1624,8 +1633,8 @@ private:
Id GetTextureImage(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const u32 index = meta.sampler.GetIndex();
- if (meta.sampler.IsBuffer()) {
+ const u32 index = meta.sampler.index;
+ if (meta.sampler.is_buffer) {
const auto& entry = texel_buffers.at(index);
return OpLoad(entry.image_type, entry.image);
} else {
@@ -1636,7 +1645,7 @@ private:
Id GetImage(Operation operation) {
const auto& meta = std::get<MetaImage>(operation.GetMeta());
- const auto entry = images.at(meta.image.GetIndex());
+ const auto entry = images.at(meta.image.index);
return OpLoad(entry.image_type, entry.image);
}
@@ -1652,7 +1661,7 @@ private:
}
if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) {
// Add array coordinate for textures
- if (meta->sampler.IsArray()) {
+ if (meta->sampler.is_array) {
Id array = AsInt(Visit(meta->array));
if (type == Type::Float) {
array = OpConvertSToF(t_float, array);
@@ -1758,7 +1767,7 @@ private:
operands.push_back(GetOffsetCoordinates(operation));
}
- if (meta.sampler.IsShadow()) {
+ if (meta.sampler.is_shadow) {
const Id dref = AsFloat(Visit(meta.depth_compare));
return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
Type::Float};
@@ -1773,7 +1782,7 @@ private:
const Id coords = GetCoordinates(operation, Type::Float);
Id texture{};
- if (meta.sampler.IsShadow()) {
+ if (meta.sampler.is_shadow) {
texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords,
AsFloat(Visit(meta.depth_compare)));
} else {
@@ -1800,8 +1809,8 @@ private:
}
const Id lod = AsUint(Visit(operation[0]));
- const std::size_t coords_count = [&]() {
- switch (const auto type = meta.sampler.GetType(); type) {
+ const std::size_t coords_count = [&meta] {
+ switch (const auto type = meta.sampler.type) {
case Tegra::Shader::TextureType::Texture1D:
return 1;
case Tegra::Shader::TextureType::Texture2D:
@@ -1810,7 +1819,7 @@ private:
case Tegra::Shader::TextureType::Texture3D:
return 3;
default:
- UNREACHABLE_MSG("Invalid texture type={}", static_cast<u32>(type));
+ UNREACHABLE_MSG("Invalid texture type={}", static_cast<int>(type));
return 2;
}
}();
@@ -1853,7 +1862,7 @@ private:
const Id image = GetTextureImage(operation);
const Id coords = GetCoordinates(operation, Type::Int);
Id fetch;
- if (meta.lod && !meta.sampler.IsBuffer()) {
+ if (meta.lod && !meta.sampler.is_buffer) {
fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod,
AsInt(Visit(meta.lod)));
} else {
@@ -2518,6 +2527,8 @@ private:
&SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>,
&SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>,
+ &SPIRVDecompiler::LogicalAddCarry,
+
&SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>,
&SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>,
&SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>,
@@ -2969,7 +2980,7 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written);
}
for (const auto& sampler : ir.GetSamplers()) {
- if (sampler.IsBuffer()) {
+ if (sampler.is_buffer) {
entries.texel_buffers.emplace_back(sampler);
} else {
entries.samplers.emplace_back(sampler);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index ffea4709e..f4c05ac3c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -5,11 +5,7 @@
#pragma once
#include <array>
-#include <bitset>
-#include <memory>
#include <set>
-#include <type_traits>
-#include <utility>
#include <vector>
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index 784839327..112df9c71 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -4,8 +4,7 @@
#include <cstring>
#include <memory>
-#include <vector>
-#include "common/alignment.h"
+
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.h b/src/video_core/renderer_vulkan/vk_shader_util.h
index be38d6697..d1d3f3cae 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.h
+++ b/src/video_core/renderer_vulkan/vk_shader_util.h
@@ -4,7 +4,6 @@
#pragma once
-#include <vector>
#include "common/common_types.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 94d954d7a..45c180221 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -39,8 +39,7 @@ VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator
VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler)
- : device{device}, memory_manager{memory_manager}, scheduler{scheduler},
- is_device_integrated{device.IsIntegrated()} {}
+ : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
VKStagingBufferPool::~VKStagingBufferPool() = default;
@@ -56,9 +55,7 @@ void VKStagingBufferPool::TickFrame() {
current_delete_level = (current_delete_level + 1) % NumLevels;
ReleaseCache(true);
- if (!is_device_integrated) {
- ReleaseCache(false);
- }
+ ReleaseCache(false);
}
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
@@ -81,7 +78,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
ci.size = 1ULL << log2;
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
@@ -95,7 +92,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
}
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
- return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers;
+ return host_visible ? host_staging_buffers : device_staging_buffers;
}
void VKStagingBufferPool::ReleaseCache(bool host_visible) {
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index a0840ff8c..3c4901437 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -5,8 +5,6 @@
#pragma once
#include <climits>
-#include <unordered_map>
-#include <utility>
#include <vector>
#include "common/common_types.h"
@@ -71,7 +69,6 @@ private:
const VKDevice& device;
VKMemoryManager& memory_manager;
VKScheduler& scheduler;
- const bool is_device_integrated;
StagingBuffersCache host_staging_buffers;
StagingBuffersCache device_staging_buffers;
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index 38a93a01a..868447af2 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <limits>
#include <optional>
#include <tuple>
#include <vector>
@@ -22,22 +23,38 @@ namespace {
constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
-constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
+constexpr u64 PREFERRED_STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
-std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
- VkMemoryPropertyFlags wanted) {
- const auto properties = device.GetPhysical().GetMemoryProperties();
- for (u32 i = 0; i < properties.memoryTypeCount; i++) {
- if (!(filter & (1 << i))) {
- continue;
- }
- if ((properties.memoryTypes[i].propertyFlags & wanted) == wanted) {
+/// Find a memory type with the passed requirements
+std::optional<u32> FindMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
+ VkMemoryPropertyFlags wanted,
+ u32 filter = std::numeric_limits<u32>::max()) {
+ for (u32 i = 0; i < properties.memoryTypeCount; ++i) {
+ const auto flags = properties.memoryTypes[i].propertyFlags;
+ if ((flags & wanted) == wanted && (filter & (1U << i)) != 0) {
return i;
}
}
return std::nullopt;
}
+/// Get the preferred host visible memory type.
+u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
+ u32 filter = std::numeric_limits<u32>::max()) {
+ // Prefer device local host visible allocations. Both AMD and Nvidia now provide one.
+ // Otherwise search for a host visible allocation.
+ static constexpr auto HOST_MEMORY =
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ static constexpr auto DYNAMIC_MEMORY = HOST_MEMORY | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ std::optional preferred_type = FindMemoryType(properties, DYNAMIC_MEMORY);
+ if (!preferred_type) {
+ preferred_type = FindMemoryType(properties, HOST_MEMORY);
+ ASSERT_MSG(preferred_type, "No host visible and coherent memory type found");
+ }
+ return preferred_type.value_or(0);
+}
+
} // Anonymous namespace
VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
@@ -51,7 +68,7 @@ VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
VKStreamBuffer::~VKStreamBuffer() = default;
std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
- ASSERT(size <= STREAM_BUFFER_SIZE);
+ ASSERT(size <= stream_buffer_size);
mapped_size = size;
if (alignment > 0) {
@@ -61,7 +78,7 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
WaitPendingOperations(offset);
bool invalidated = false;
- if (offset + size > STREAM_BUFFER_SIZE) {
+ if (offset + size > stream_buffer_size) {
// The buffer would overflow, save the amount of used watches and reset the state.
invalidation_mark = current_watch_cursor;
current_watch_cursor = 0;
@@ -98,40 +115,37 @@ void VKStreamBuffer::Unmap(u64 size) {
}
void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
+ const auto memory_properties = device.GetPhysical().GetMemoryProperties();
+ const u32 preferred_type = GetMemoryType(memory_properties);
+ const u32 preferred_heap = memory_properties.memoryTypes[preferred_type].heapIndex;
+
+ // Substract from the preferred heap size some bytes to avoid getting out of memory.
+ const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
+ const VkDeviceSize allocable_size = heap_size - 4 * 1024 * 1024;
+
VkBufferCreateInfo buffer_ci;
buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_ci.pNext = nullptr;
buffer_ci.flags = 0;
- buffer_ci.size = STREAM_BUFFER_SIZE;
+ buffer_ci.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size);
buffer_ci.usage = usage;
buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_ci.queueFamilyIndexCount = 0;
buffer_ci.pQueueFamilyIndices = nullptr;
- const auto& dev = device.GetLogical();
- buffer = dev.CreateBuffer(buffer_ci);
-
- const auto& dld = device.GetDispatchLoader();
- const auto requirements = dev.GetBufferMemoryRequirements(*buffer);
- // Prefer device local host visible allocations (this should hit AMD's pinned memory).
- auto type =
- FindMemoryType(device, requirements.memoryTypeBits,
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
- if (!type) {
- // Otherwise search for a host visible allocation.
- type = FindMemoryType(device, requirements.memoryTypeBits,
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
- ASSERT_MSG(type, "No host visible and coherent memory type found");
- }
+ buffer = device.GetLogical().CreateBuffer(buffer_ci);
+
+ const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
+ const u32 required_flags = requirements.memoryTypeBits;
+ stream_buffer_size = static_cast<u64>(requirements.size);
+
VkMemoryAllocateInfo memory_ai;
memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_ai.pNext = nullptr;
memory_ai.allocationSize = requirements.size;
- memory_ai.memoryTypeIndex = *type;
+ memory_ai.memoryTypeIndex = GetMemoryType(memory_properties, required_flags);
- memory = dev.AllocateMemory(memory_ai);
+ memory = device.GetLogical().AllocateMemory(memory_ai);
buffer.BindMemory(*memory, 0);
}
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 58ce8b973..dfddf7ad6 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -56,8 +56,9 @@ private:
const VKDevice& device; ///< Vulkan device manager.
VKScheduler& scheduler; ///< Command scheduler.
- vk::Buffer buffer; ///< Mapped buffer.
- vk::DeviceMemory memory; ///< Memory allocation.
+ vk::Buffer buffer; ///< Mapped buffer.
+ vk::DeviceMemory memory; ///< Memory allocation.
+ u64 stream_buffer_size{}; ///< Stream buffer size.
u64 offset{}; ///< Buffer iterator.
u64 mapped_size{}; ///< Size reserved for the current copy.
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index de4c23120..55f43e61b 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -10,11 +10,9 @@
#include <variant>
#include <vector>
-#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
-#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/morton.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
@@ -26,7 +24,6 @@
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
-#include "video_core/textures/convert.h"
namespace Vulkan {
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 115595f28..f211ccb1e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -7,19 +7,13 @@
#include <memory>
#include <unordered_map>
-#include "common/assert.h"
#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "common/math_util.h"
-#include "video_core/gpu.h"
-#include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/texture_cache/surface_base.h"
#include "video_core/texture_cache/texture_cache.h"
-#include "video_core/textures/decoders.h"
namespace Core {
class System;
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 6ba2c9997..cc7e3dff4 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -4,7 +4,6 @@
#pragma once
-#include <type_traits>
#include <variant>
#include <boost/container/static_vector.hpp>
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 9b94dfff1..2ce9b0626 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
#include <exception>
#include <memory>
#include <optional>
@@ -16,6 +17,23 @@ namespace Vulkan::vk {
namespace {
+void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
+ std::stable_sort(devices.begin(), devices.end(), [&](auto lhs, auto rhs) {
+ // This will call Vulkan more than needed, but these calls are cheap.
+ const auto lhs_properties = vk::PhysicalDevice(lhs, dld).GetProperties();
+ const auto rhs_properties = vk::PhysicalDevice(rhs, dld).GetProperties();
+
+ // Prefer discrete GPUs, Nvidia over AMD, AMD over Intel, Intel over the rest.
+ const bool preferred =
+ (lhs_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
+ rhs_properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) ||
+ (lhs_properties.vendorID == 0x10DE && rhs_properties.vendorID != 0x10DE) ||
+ (lhs_properties.vendorID == 0x1002 && rhs_properties.vendorID != 0x1002) ||
+ (lhs_properties.vendorID == 0x8086 && rhs_properties.vendorID != 0x8086);
+ return !preferred;
+ });
+}
+
template <typename T>
bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name,
VkInstance instance = nullptr) noexcept {
@@ -61,14 +79,15 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCmdPipelineBarrier);
X(vkCmdPushConstants);
X(vkCmdSetBlendConstants);
- X(vkCmdSetCheckpointNV);
X(vkCmdSetDepthBias);
X(vkCmdSetDepthBounds);
+ X(vkCmdSetEvent);
X(vkCmdSetScissor);
X(vkCmdSetStencilCompareMask);
X(vkCmdSetStencilReference);
X(vkCmdSetStencilWriteMask);
X(vkCmdSetViewport);
+ X(vkCmdWaitEvents);
X(vkCreateBuffer);
X(vkCreateBufferView);
X(vkCreateCommandPool);
@@ -76,6 +95,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCreateDescriptorPool);
X(vkCreateDescriptorSetLayout);
X(vkCreateDescriptorUpdateTemplateKHR);
+ X(vkCreateEvent);
X(vkCreateFence);
X(vkCreateFramebuffer);
X(vkCreateGraphicsPipelines);
@@ -94,6 +114,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkDestroyDescriptorPool);
X(vkDestroyDescriptorSetLayout);
X(vkDestroyDescriptorUpdateTemplateKHR);
+ X(vkDestroyEvent);
X(vkDestroyFence);
X(vkDestroyFramebuffer);
X(vkDestroyImage);
@@ -113,10 +134,10 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkFreeMemory);
X(vkGetBufferMemoryRequirements);
X(vkGetDeviceQueue);
+ X(vkGetEventStatus);
X(vkGetFenceStatus);
X(vkGetImageMemoryRequirements);
X(vkGetQueryPoolResults);
- X(vkGetQueueCheckpointDataNV);
X(vkMapMemory);
X(vkQueueSubmit);
X(vkResetFences);
@@ -271,6 +292,10 @@ void Destroy(VkDevice device, VkDeviceMemory handle, const DeviceDispatch& dld)
dld.vkFreeMemory(device, handle, nullptr);
}
+void Destroy(VkDevice device, VkEvent handle, const DeviceDispatch& dld) noexcept {
+ dld.vkDestroyEvent(device, handle, nullptr);
+}
+
void Destroy(VkDevice device, VkFence handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyFence(device, handle, nullptr);
}
@@ -383,7 +408,8 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) {
return std::nullopt;
}
- return physical_devices;
+ SortPhysicalDevices(physical_devices, *dld);
+ return std::make_optional(std::move(physical_devices));
}
DebugCallback Instance::TryCreateDebugCallback(
@@ -409,17 +435,6 @@ DebugCallback Instance::TryCreateDebugCallback(
return DebugCallback(messenger, handle, *dld);
}
-std::vector<VkCheckpointDataNV> Queue::GetCheckpointDataNV(const DeviceDispatch& dld) const {
- if (!dld.vkGetQueueCheckpointDataNV) {
- return {};
- }
- u32 num;
- dld.vkGetQueueCheckpointDataNV(queue, &num, nullptr);
- std::vector<VkCheckpointDataNV> checkpoints(num);
- dld.vkGetQueueCheckpointDataNV(queue, &num, checkpoints.data());
- return checkpoints;
-}
-
void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const {
Check(dld->vkBindBufferMemory(owner, handle, memory, offset));
}
@@ -469,12 +484,11 @@ std::vector<VkImage> SwapchainKHR::GetImages() const {
}
Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
- Span<const char*> enabled_extensions,
- const VkPhysicalDeviceFeatures2& enabled_features,
+ Span<const char*> enabled_extensions, const void* next,
DeviceDispatch& dld) noexcept {
VkDeviceCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
- ci.pNext = &enabled_features;
+ ci.pNext = next;
ci.flags = 0;
ci.queueCreateInfoCount = queues_ci.size();
ci.pQueueCreateInfos = queues_ci.data();
@@ -613,6 +627,16 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
return ShaderModule(object, handle, *dld);
}
+Event Device::CreateEvent() const {
+ VkEventCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ VkEvent object;
+ Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
+ return Event(object, handle, *dld);
+}
+
SwapchainKHR Device::CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const {
VkSwapchainKHR object;
Check(dld->vkCreateSwapchainKHR(handle, &ci, nullptr, &object));
diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h
index fb3657819..98937a77a 100644
--- a/src/video_core/renderer_vulkan/wrapper.h
+++ b/src/video_core/renderer_vulkan/wrapper.h
@@ -197,14 +197,15 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;
PFN_vkCmdPushConstants vkCmdPushConstants;
PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;
- PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;
PFN_vkCmdSetDepthBias vkCmdSetDepthBias;
PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;
+ PFN_vkCmdSetEvent vkCmdSetEvent;
PFN_vkCmdSetScissor vkCmdSetScissor;
PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;
PFN_vkCmdSetStencilReference vkCmdSetStencilReference;
PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;
PFN_vkCmdSetViewport vkCmdSetViewport;
+ PFN_vkCmdWaitEvents vkCmdWaitEvents;
PFN_vkCreateBuffer vkCreateBuffer;
PFN_vkCreateBufferView vkCreateBufferView;
PFN_vkCreateCommandPool vkCreateCommandPool;
@@ -212,6 +213,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkCreateDescriptorPool vkCreateDescriptorPool;
PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;
PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
+ PFN_vkCreateEvent vkCreateEvent;
PFN_vkCreateFence vkCreateFence;
PFN_vkCreateFramebuffer vkCreateFramebuffer;
PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;
@@ -230,6 +232,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;
PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;
PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
+ PFN_vkDestroyEvent vkDestroyEvent;
PFN_vkDestroyFence vkDestroyFence;
PFN_vkDestroyFramebuffer vkDestroyFramebuffer;
PFN_vkDestroyImage vkDestroyImage;
@@ -249,10 +252,10 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkFreeMemory vkFreeMemory;
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
PFN_vkGetDeviceQueue vkGetDeviceQueue;
+ PFN_vkGetEventStatus vkGetEventStatus;
PFN_vkGetFenceStatus vkGetFenceStatus;
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
PFN_vkGetQueryPoolResults vkGetQueryPoolResults;
- PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;
PFN_vkMapMemory vkMapMemory;
PFN_vkQueueSubmit vkQueueSubmit;
PFN_vkResetFences vkResetFences;
@@ -281,6 +284,7 @@ void Destroy(VkDevice, VkDescriptorPool, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkDescriptorSetLayout, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkDescriptorUpdateTemplateKHR, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkDeviceMemory, const DeviceDispatch&) noexcept;
+void Destroy(VkDevice, VkEvent, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkFence, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkFramebuffer, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkImage, const DeviceDispatch&) noexcept;
@@ -567,12 +571,8 @@ public:
/// Construct a queue handle.
constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {}
- /// Returns the checkpoint data.
- /// @note Returns an empty vector when the function pointer is not present.
- std::vector<VkCheckpointDataNV> GetCheckpointDataNV(const DeviceDispatch& dld) const;
-
- void Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const {
- Check(dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence));
+ VkResult Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const noexcept {
+ return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence);
}
VkResult Present(const VkPresentInfoKHR& present_info) const noexcept {
@@ -654,13 +654,21 @@ public:
std::vector<VkImage> GetImages() const;
};
+class Event : public Handle<VkEvent, VkDevice, DeviceDispatch> {
+ using Handle<VkEvent, VkDevice, DeviceDispatch>::Handle;
+
+public:
+ VkResult GetStatus() const noexcept {
+ return dld->vkGetEventStatus(owner, handle);
+ }
+};
+
class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> {
using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle;
public:
static Device Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
- Span<const char*> enabled_extensions,
- const VkPhysicalDeviceFeatures2& enabled_features,
+ Span<const char*> enabled_extensions, const void* next,
DeviceDispatch& dld) noexcept;
Queue GetQueue(u32 family_index) const noexcept;
@@ -702,6 +710,8 @@ public:
ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const;
+ Event CreateEvent() const;
+
SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const;
DeviceMemory TryAllocateMemory(const VkMemoryAllocateInfo& ai) const noexcept;
@@ -734,18 +744,11 @@ public:
dld->vkResetQueryPoolEXT(handle, query_pool, first, count);
}
- void GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size,
- void* data, VkDeviceSize stride, VkQueryResultFlags flags) const {
- Check(dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride,
- flags));
- }
-
- template <typename T>
- T GetQueryResult(VkQueryPool query_pool, u32 first, VkQueryResultFlags flags) const {
- static_assert(std::is_trivially_copyable_v<T>);
- T value;
- GetQueryResults(query_pool, first, 1, sizeof(T), &value, sizeof(T), flags);
- return value;
+ VkResult GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size,
+ void* data, VkDeviceSize stride, VkQueryResultFlags flags) const
+ noexcept {
+ return dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride,
+ flags);
}
};
@@ -920,10 +923,6 @@ public:
dld->vkCmdPushConstants(handle, layout, flags, offset, size, values);
}
- void SetCheckpointNV(const void* checkpoint_marker) const noexcept {
- dld->vkCmdSetCheckpointNV(handle, checkpoint_marker);
- }
-
void SetViewport(u32 first, Span<VkViewport> viewports) const noexcept {
dld->vkCmdSetViewport(handle, first, viewports.size(), viewports.data());
}
@@ -956,6 +955,19 @@ public:
dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds);
}
+ void SetEvent(VkEvent event, VkPipelineStageFlags stage_flags) const noexcept {
+ dld->vkCmdSetEvent(handle, event, stage_flags);
+ }
+
+ void WaitEvents(Span<VkEvent> events, VkPipelineStageFlags src_stage_mask,
+ VkPipelineStageFlags dst_stage_mask, Span<VkMemoryBarrier> memory_barriers,
+ Span<VkBufferMemoryBarrier> buffer_barriers,
+ Span<VkImageMemoryBarrier> image_barriers) const noexcept {
+ dld->vkCmdWaitEvents(handle, events.size(), events.data(), src_stage_mask, dst_stage_mask,
+ memory_barriers.size(), memory_barriers.data(), buffer_barriers.size(),
+ buffer_barriers.data(), image_barriers.size(), image_barriers.data());
+ }
+
void BindTransformFeedbackBuffersEXT(u32 first, u32 count, const VkBuffer* buffers,
const VkDeviceSize* offsets,
const VkDeviceSize* sizes) const noexcept {
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index 6d313963a..8d86020f6 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -13,6 +13,7 @@
#include "common/common_types.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/control_flow.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
@@ -115,17 +116,6 @@ Pred GetPredicate(u32 index, bool negated) {
return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL));
}
-/**
- * Returns whether the instruction at the specified offset is a 'sched' instruction.
- * Sched instructions always appear before a sequence of 3 instructions.
- */
-constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
- constexpr u32 SchedPeriod = 4;
- u32 absolute_offset = offset - main_offset;
-
- return (absolute_offset % SchedPeriod) == 0;
-}
-
enum class ParseResult : u32 {
ControlCaught,
BlockEnd,
@@ -587,8 +577,6 @@ bool TryQuery(CFGRebuildState& state) {
return true;
}
-} // Anonymous namespace
-
void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch_info) {
const auto get_expr = ([&](const Condition& cond) -> Expr {
Expr result{};
@@ -655,6 +643,8 @@ void DecompileShader(CFGRebuildState& state) {
state.manager->Decompile();
}
+} // Anonymous namespace
+
std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address,
const CompilerSettings& settings,
Registry& registry) {
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index 87ac9ac6c..a75a5cc63 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -13,6 +13,7 @@
#include "video_core/engines/shader_bytecode.h"
#include "video_core/engines/shader_header.h"
#include "video_core/shader/control_flow.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/node_helper.h"
#include "video_core/shader/shader_ir.h"
@@ -23,17 +24,6 @@ using Tegra::Shader::OpCode;
namespace {
-/**
- * Returns whether the instruction at the specified offset is a 'sched' instruction.
- * Sched instructions always appear before a sequence of 3 instructions.
- */
-constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
- constexpr u32 SchedPeriod = 4;
- u32 absolute_offset = offset - main_offset;
-
- return (absolute_offset % SchedPeriod) == 0;
-}
-
void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
const std::list<Sampler>& used_samplers) {
if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) {
@@ -42,11 +32,11 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
u32 count{};
std::vector<u32> bound_offsets;
for (const auto& sampler : used_samplers) {
- if (sampler.IsBindless()) {
+ if (sampler.is_bindless) {
continue;
}
++count;
- bound_offsets.emplace_back(sampler.GetOffset());
+ bound_offsets.emplace_back(sampler.offset);
}
if (count > 1) {
gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
@@ -56,14 +46,14 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
VideoCore::GuestDriverProfile& gpu_driver,
const std::list<Sampler>& used_samplers) {
- const u32 base_offset = sampler_to_deduce.GetOffset();
+ const u32 base_offset = sampler_to_deduce.offset;
u32 max_offset{std::numeric_limits<u32>::max()};
for (const auto& sampler : used_samplers) {
- if (sampler.IsBindless()) {
+ if (sampler.is_bindless) {
continue;
}
- if (sampler.GetOffset() > base_offset) {
- max_offset = std::min(sampler.GetOffset(), max_offset);
+ if (sampler.offset > base_offset) {
+ max_offset = std::min(sampler.offset, max_offset);
}
}
if (max_offset == std::numeric_limits<u32>::max()) {
@@ -363,14 +353,14 @@ void ShaderIR::PostDecode() {
return;
}
for (auto& sampler : used_samplers) {
- if (!sampler.IsIndexed()) {
+ if (!sampler.is_indexed) {
continue;
}
if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
- sampler.SetSize(*size);
+ sampler.size = *size;
} else {
LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
- sampler.SetSize(1);
+ sampler.size = 1;
}
}
}
diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp
index ee7d9a29d..a276aee44 100644
--- a/src/video_core/shader/decode/arithmetic_half.cpp
+++ b/src/video_core/shader/decode/arithmetic_half.cpp
@@ -19,22 +19,46 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
- opcode->get().GetId() == OpCode::Id::HADD2_R) {
+ bool negate_a = false;
+ bool negate_b = false;
+ bool absolute_a = false;
+ bool absolute_b = false;
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::HADD2_R:
if (instr.alu_half.ftz == 0) {
LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
}
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HADD2_C:
+ if (instr.alu_half.ftz == 0) {
+ LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
+ }
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 56) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_R:
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_C:
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
}
- const bool negate_a =
- opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
- const bool negate_b =
- opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
-
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a);
- op_a = GetOperandAbsNegHalf(op_a, instr.alu_half.abs_a, negate_a);
+ op_a = GetOperandAbsNegHalf(op_a, absolute_a, negate_a);
- auto [type_b, op_b] = [&]() -> std::tuple<HalfType, Node> {
+ auto [type_b, op_b] = [this, instr, opcode]() -> std::pair<HalfType, Node> {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HMUL2_C:
@@ -48,17 +72,16 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
}
}();
op_b = UnpackHalfFloat(op_b, type_b);
- // redeclaration to avoid a bug in clang with reusing local bindings in lambdas
- Node op_b_alt = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b);
+ op_b = GetOperandAbsNegHalf(op_b, absolute_b, negate_b);
- Node value = [&]() {
+ Node value = [this, opcode, op_a, op_b = op_b] {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HADD2_R:
- return Operation(OperationCode::HAdd, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
case OpCode::Id::HMUL2_C:
case OpCode::Id::HMUL2_R:
- return Operation(OperationCode::HMul, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
default:
UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName());
return Immediate(0);
diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp
index 0f4c3103a..a041519b7 100644
--- a/src/video_core/shader/decode/arithmetic_integer.cpp
+++ b/src/video_core/shader/decode/arithmetic_integer.cpp
@@ -35,15 +35,38 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
case OpCode::Id::IADD_C:
case OpCode::Id::IADD_R:
case OpCode::Id::IADD_IMM: {
- UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD saturation not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD.SAT");
+ UNIMPLEMENTED_IF_MSG(instr.iadd.x && instr.generates_cc, "IADD.X Rd.CC");
op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true);
op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true);
- const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b);
+ Node value = Operation(OperationCode::UAdd, op_a, op_b);
- SetInternalFlagsFromInteger(bb, value, instr.generates_cc);
- SetRegister(bb, instr.gpr0, value);
+ if (instr.iadd.x) {
+ Node carry = GetInternalFlag(InternalFlag::Carry);
+ Node x = Operation(OperationCode::Select, std::move(carry), Immediate(1), Immediate(0));
+ value = Operation(OperationCode::UAdd, std::move(value), std::move(x));
+ }
+
+ if (instr.generates_cc) {
+ const Node i0 = Immediate(0);
+
+ Node zero = Operation(OperationCode::LogicalIEqual, value, i0);
+ Node sign = Operation(OperationCode::LogicalILessThan, value, i0);
+ Node carry = Operation(OperationCode::LogicalAddCarry, op_a, op_b);
+
+ Node pos_a = Operation(OperationCode::LogicalIGreaterThan, op_a, i0);
+ Node pos_b = Operation(OperationCode::LogicalIGreaterThan, op_b, i0);
+ Node pos = Operation(OperationCode::LogicalAnd, std::move(pos_a), std::move(pos_b));
+ Node overflow = Operation(OperationCode::LogicalAnd, pos, sign);
+
+ SetInternalFlag(bb, InternalFlag::Zero, std::move(zero));
+ SetInternalFlag(bb, InternalFlag::Sign, std::move(sign));
+ SetInternalFlag(bb, InternalFlag::Carry, std::move(carry));
+ SetInternalFlag(bb, InternalFlag::Overflow, std::move(overflow));
+ }
+ SetRegister(bb, instr.gpr0, std::move(value));
break;
}
case OpCode::Id::IADD3_C:
@@ -249,8 +272,8 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
}
case OpCode::Id::LEA_IMM: {
const bool neg = instr.lea.imm.neg != 0;
- return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
- GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
Immediate(static_cast<u32>(instr.lea.imm.entry_b))};
}
case OpCode::Id::LEA_RZ: {
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index 85ee9aa5e..60b6ad72a 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -485,11 +485,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
const auto offset = static_cast<u32>(image.index.Value());
- const auto it =
- std::find_if(std::begin(used_images), std::end(used_images),
- [offset](const Image& entry) { return entry.GetOffset() == offset; });
+ const auto it = std::find_if(std::begin(used_images), std::end(used_images),
+ [offset](const Image& entry) { return entry.offset == offset; });
if (it != std::end(used_images)) {
- ASSERT(!it->IsBindless() && it->GetType() == it->GetType());
+ ASSERT(!it->is_bindless && it->type == type);
return *it;
}
@@ -505,13 +504,12 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
const auto buffer = std::get<1>(result);
const auto offset = std::get<2>(result);
- const auto it =
- std::find_if(std::begin(used_images), std::end(used_images),
- [buffer = buffer, offset = offset](const Image& entry) {
- return entry.GetBuffer() == buffer && entry.GetOffset() == offset;
- });
+ const auto it = std::find_if(std::begin(used_images), std::end(used_images),
+ [buffer, offset](const Image& entry) {
+ return entry.buffer == buffer && entry.offset == offset;
+ });
if (it != std::end(used_images)) {
- ASSERT(it->IsBindless() && it->GetType() == it->GetType());
+ ASSERT(it->is_bindless && it->type == type);
return *it;
}
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index 8112ead3e..9392f065b 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -479,7 +479,7 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackGlobalMemory(NodeBlock&
bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset)));
const GlobalMemoryBase descriptor{index, offset};
- const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor);
+ const auto& entry = used_global_memory.try_emplace(descriptor).first;
auto& usage = entry->second;
usage.is_written |= is_write;
usage.is_read |= is_read;
diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp
index 8d54cce34..6116c31aa 100644
--- a/src/video_core/shader/decode/register_set_predicate.cpp
+++ b/src/video_core/shader/decode/register_set_predicate.cpp
@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <utility>
+
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/engines/shader_bytecode.h"
@@ -10,20 +12,20 @@
namespace VideoCommon::Shader {
+using std::move;
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
namespace {
-constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7;
-}
+constexpr u64 NUM_CONDITION_CODES = 4;
+constexpr u64 NUM_PREDICATES = 7;
+} // namespace
u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr);
-
- const Node apply_mask = [&] {
+ Node apply_mask = [this, opcode, instr] {
switch (opcode->get().GetId()) {
case OpCode::Id::R2P_IMM:
case OpCode::Id::P2R_IMM:
@@ -34,39 +36,43 @@ u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
}
}();
- const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
+ const u32 offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
+
+ const bool cc = instr.p2r_r2p.mode == Tegra::Shader::R2pMode::Cc;
+ const u64 num_entries = cc ? NUM_CONDITION_CODES : NUM_PREDICATES;
+ const auto get_entry = [this, cc](u64 entry) {
+ return cc ? GetInternalFlag(static_cast<InternalFlag>(entry)) : GetPredicate(entry);
+ };
switch (opcode->get().GetId()) {
case OpCode::Id::R2P_IMM: {
- const Node mask = GetRegister(instr.gpr8);
+ Node mask = GetRegister(instr.gpr8);
- for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) {
- const auto shift = static_cast<u32>(pred);
+ for (u64 entry = 0; entry < num_entries; ++entry) {
+ const u32 shift = static_cast<u32>(entry);
- const Node apply_compare = BitfieldExtract(apply_mask, shift, 1);
- const Node condition =
- Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0));
+ Node apply = BitfieldExtract(apply_mask, shift, 1);
+ Node condition = Operation(OperationCode::LogicalUNotEqual, apply, Immediate(0));
- const Node value_compare = BitfieldExtract(mask, offset + shift, 1);
- const Node value =
- Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0));
+ Node compare = BitfieldExtract(mask, offset + shift, 1);
+ Node value = Operation(OperationCode::LogicalUNotEqual, move(compare), Immediate(0));
- const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value);
- bb.push_back(Conditional(condition, {code}));
+ Node code = Operation(OperationCode::LogicalAssign, get_entry(entry), move(value));
+ bb.push_back(Conditional(condition, {move(code)}));
}
break;
}
case OpCode::Id::P2R_IMM: {
Node value = Immediate(0);
- for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) {
- Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred),
+ for (u64 entry = 0; entry < num_entries; ++entry) {
+ Node bit = Operation(OperationCode::Select, get_entry(entry), Immediate(1U << entry),
Immediate(0));
- value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit));
+ value = Operation(OperationCode::UBitwiseOr, move(value), move(bit));
}
- value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask);
- value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8);
+ value = Operation(OperationCode::UBitwiseAnd, move(value), apply_mask);
+ value = BitfieldInsert(GetRegister(instr.gpr8), move(value), offset, 8);
- SetRegister(bb, instr.gpr0, std::move(value));
+ SetRegister(bb, instr.gpr0, move(value));
break;
}
default:
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 6c4a1358b..8f0bb996e 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -139,15 +139,15 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
}
const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
- const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare};
- const Sampler& sampler = *GetSampler(instr.sampler, info);
+ SamplerInfo info;
+ info.is_shadow = is_depth_compare;
+ const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
- auto coords_copy = coords;
- MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {},
- {}, {}, component, element, {}};
- values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
+ MetaTexture meta{*sampler, {}, depth_compare, aoffi, {}, {},
+ {}, {}, component, element, {}};
+ values[element] = Operation(OperationCode::TextureGather, meta, coords);
}
if (instr.tld4s.fp16_flag) {
@@ -165,19 +165,20 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
"AOFFI is not implemented");
const bool is_array = instr.txd.is_array != 0;
- u64 base_reg = instr.gpr8.Value();
const auto derivate_reg = instr.gpr20.Value();
const auto texture_type = instr.txd.texture_type.Value();
const auto coord_count = GetCoordCount(texture_type);
- Node index_var{};
- const Sampler* sampler =
- is_bindless ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false}})
- : GetSampler(instr.sampler, {{texture_type, is_array, false}});
+ u64 base_reg = instr.gpr8.Value();
+ Node index_var;
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ const std::optional<Sampler> sampler = is_bindless
+ ? GetBindlessSampler(base_reg, info, index_var)
+ : GetSampler(instr.sampler, info);
Node4 values;
- if (sampler == nullptr) {
- for (u32 element = 0; element < values.size(); ++element) {
- values[element] = Immediate(0);
- }
+ if (!sampler) {
+ std::generate(values.begin(), values.end(), [this] { return Immediate(0); });
WriteTexInstructionFloat(bb, instr, values);
break;
}
@@ -215,14 +216,12 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
is_bindless = true;
[[fallthrough]];
case OpCode::Id::TXQ: {
- // TODO: The new commits on the texture refactor, change the way samplers work.
- // Sadly, not all texture instructions specify the type of texture their sampler
- // uses. This must be fixed at a later instance.
- Node index_var{};
- const Sampler* sampler =
- is_bindless ? GetBindlessSampler(instr.gpr8, index_var) : GetSampler(instr.sampler);
-
- if (sampler == nullptr) {
+ Node index_var;
+ const std::optional<Sampler> sampler = is_bindless
+ ? GetBindlessSampler(instr.gpr8, {}, index_var)
+ : GetSampler(instr.sampler, {});
+
+ if (!sampler) {
u32 indexer = 0;
for (u32 element = 0; element < 4; ++element) {
if (!instr.txq.IsComponentEnabled(element)) {
@@ -268,13 +267,17 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
"NDV is not implemented");
- auto texture_type = instr.tmml.texture_type.Value();
+ const auto texture_type = instr.tmml.texture_type.Value();
const bool is_array = instr.tmml.array != 0;
- Node index_var{};
- const Sampler* sampler =
- is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler);
-
- if (sampler == nullptr) {
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ Node index_var;
+ const std::optional<Sampler> sampler =
+ is_bindless ? GetBindlessSampler(instr.gpr20, info, index_var)
+ : GetSampler(instr.sampler, info);
+
+ if (!sampler) {
u32 indexer = 0;
for (u32 element = 0; element < 2; ++element) {
if (!instr.tmml.IsComponentEnabled(element)) {
@@ -301,12 +304,11 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
break;
default:
- UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
+ UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<int>(texture_type));
// Fallback to interpreting as a 2D texture for now
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
- texture_type = TextureType::Texture2D;
}
u32 indexer = 0;
for (u32 element = 0; element < 2; ++element) {
@@ -355,98 +357,103 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
return pc;
}
-ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset,
+ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset,
std::optional<u32> buffer) {
- if (sampler_info) {
- return *sampler_info;
+ if (info.IsComplete()) {
+ return info;
}
const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset)
: registry.ObtainBoundSampler(offset);
if (!sampler) {
LOG_WARNING(HW_GPU, "Unknown sampler info");
- return SamplerInfo{TextureType::Texture2D, false, false, false};
- }
- return SamplerInfo{sampler->texture_type, sampler->is_array != 0, sampler->is_shadow != 0,
- sampler->is_buffer != 0};
+ info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D);
+ info.is_array = info.is_array.value_or(false);
+ info.is_shadow = info.is_shadow.value_or(false);
+ info.is_buffer = info.is_buffer.value_or(false);
+ return info;
+ }
+ info.type = info.type.value_or(sampler->texture_type);
+ info.is_array = info.is_array.value_or(sampler->is_array != 0);
+ info.is_shadow = info.is_shadow.value_or(sampler->is_shadow != 0);
+ info.is_buffer = info.is_buffer.value_or(sampler->is_buffer != 0);
+ return info;
}
-const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler,
- std::optional<SamplerInfo> sampler_info) {
+std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
+ SamplerInfo sampler_info) {
const auto offset = static_cast<u32>(sampler.index.Value());
const auto info = GetSamplerInfo(sampler_info, offset);
// If this sampler has already been used, return the existing mapping.
- const auto it =
- std::find_if(used_samplers.begin(), used_samplers.end(),
- [offset](const Sampler& entry) { return entry.GetOffset() == offset; });
+ const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
+ [offset](const Sampler& entry) { return entry.offset == offset; });
if (it != used_samplers.end()) {
- ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array &&
- it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer);
- return &*it;
+ ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
+ it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
+ return *it;
}
// Otherwise create a new mapping for this sampler
const auto next_index = static_cast<u32>(used_samplers.size());
- return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow,
- info.is_buffer, false);
+ return used_samplers.emplace_back(next_index, offset, *info.type, *info.is_array,
+ *info.is_shadow, *info.is_buffer, false);
}
-const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
- std::optional<SamplerInfo> sampler_info) {
+std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
+ Node& index_var) {
const Node sampler_register = GetRegister(reg);
const auto [base_node, tracked_sampler_info] =
TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
ASSERT(base_node != nullptr);
if (base_node == nullptr) {
- return nullptr;
+ return std::nullopt;
}
if (const auto bindless_sampler_info =
std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) {
const u32 buffer = bindless_sampler_info->GetIndex();
const u32 offset = bindless_sampler_info->GetOffset();
- const auto info = GetSamplerInfo(sampler_info, offset, buffer);
+ info = GetSamplerInfo(info, offset, buffer);
// If this sampler has already been used, return the existing mapping.
- const auto it =
- std::find_if(used_samplers.begin(), used_samplers.end(),
- [buffer = buffer, offset = offset](const Sampler& entry) {
- return entry.GetBuffer() == buffer && entry.GetOffset() == offset;
- });
+ const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
+ [buffer = buffer, offset = offset](const Sampler& entry) {
+ return entry.buffer == buffer && entry.offset == offset;
+ });
if (it != used_samplers.end()) {
- ASSERT(it->IsBindless() && it->GetType() == info.type &&
- it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow);
- return &*it;
+ ASSERT(it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
+ it->is_shadow == info.is_shadow);
+ return *it;
}
// Otherwise create a new mapping for this sampler
const auto next_index = static_cast<u32>(used_samplers.size());
- return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array,
- info.is_shadow, info.is_buffer, false);
- } else if (const auto array_sampler_info =
- std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
+ return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array,
+ *info.is_shadow, *info.is_buffer, false);
+ }
+ if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
const u32 base_offset = array_sampler_info->GetBaseOffset() / 4;
index_var = GetCustomVariable(array_sampler_info->GetIndexVar());
- const auto info = GetSamplerInfo(sampler_info, base_offset);
+ info = GetSamplerInfo(info, base_offset);
// If this sampler has already been used, return the existing mapping.
const auto it = std::find_if(
used_samplers.begin(), used_samplers.end(),
- [base_offset](const Sampler& entry) { return entry.GetOffset() == base_offset; });
+ [base_offset](const Sampler& entry) { return entry.offset == base_offset; });
if (it != used_samplers.end()) {
- ASSERT(!it->IsBindless() && it->GetType() == info.type &&
- it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow &&
- it->IsBuffer() == info.is_buffer && it->IsIndexed());
- return &*it;
+ ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
+ it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
+ it->is_indexed);
+ return *it;
}
uses_indexed_samplers = true;
// Otherwise create a new mapping for this sampler
const auto next_index = static_cast<u32>(used_samplers.size());
- return &used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array,
- info.is_shadow, info.is_buffer, true);
+ return used_samplers.emplace_back(next_index, base_offset, *info.type, *info.is_array,
+ *info.is_shadow, *info.is_buffer, true);
}
- return nullptr;
+ return std::nullopt;
}
void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
@@ -531,10 +538,16 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
"Illegal texture type");
- const SamplerInfo info{texture_type, is_array, is_shadow, false};
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ info.is_shadow = is_shadow;
+ info.is_buffer = false;
+
Node index_var;
- const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info)
- : GetSampler(instr.sampler, info);
+ const std::optional<Sampler> sampler = is_bindless
+ ? GetBindlessSampler(*bindless_reg, info, index_var)
+ : GetSampler(instr.sampler, info);
if (!sampler) {
return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
}
@@ -593,8 +606,9 @@ Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
++parameter_register;
}
- const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
- texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
+ const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array,
+ lod_bias_enabled, 4, 5);
+ const auto coord_count = std::get<0>(coord_counts);
// If enabled arrays index is always stored in the gpr8 field
const u64 array_register = instr.gpr8.Value();
// First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
@@ -632,8 +646,10 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
const bool lod_bias_enabled =
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
- const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
- texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
+ const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array,
+ lod_bias_enabled, 4, 4);
+ const auto coord_count = std::get<0>(coord_counts);
+
// If enabled arrays index is always stored in the gpr8 field
const u64 array_register = instr.gpr8.Value();
// First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
@@ -682,12 +698,17 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
u64 parameter_register = instr.gpr20.Value();
- const SamplerInfo info{texture_type, is_array, depth_compare, false};
- Node index_var{};
- const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, index_var, info)
- : GetSampler(instr.sampler, info);
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ info.is_shadow = depth_compare;
+
+ Node index_var;
+ const std::optional<Sampler> sampler =
+ is_bindless ? GetBindlessSampler(parameter_register++, info, index_var)
+ : GetSampler(instr.sampler, info);
Node4 values;
- if (sampler == nullptr) {
+ if (!sampler) {
for (u32 element = 0; element < values.size(); ++element) {
values[element] = Immediate(0);
}
@@ -742,12 +763,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
// const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
// const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
- const auto& sampler = *GetSampler(instr.sampler);
+ const std::optional<Sampler> sampler = GetSampler(instr.sampler, {});
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
auto coords_copy = coords;
- MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
+ MetaTexture meta{*sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
}
@@ -755,7 +776,11 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
}
Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
- const Sampler& sampler = *GetSampler(instr.sampler);
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ info.is_shadow = false;
+ const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
const std::size_t type_coord_count = GetCoordCount(texture_type);
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
@@ -783,7 +808,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
auto coords_copy = coords;
- MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
+ MetaTexture meta{*sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
}
return values;
diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp
new file mode 100644
index 000000000..074f21691
--- /dev/null
+++ b/src/video_core/shader/memory_util.cpp
@@ -0,0 +1,77 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <cstddef>
+
+#include <boost/container_hash/hash.hpp>
+
+#include "common/common_types.h"
+#include "core/core.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/memory_manager.h"
+#include "video_core/shader/memory_util.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+GPUVAddr GetShaderAddress(Core::System& system,
+ Tegra::Engines::Maxwell3D::Regs::ShaderProgram program) {
+ const auto& gpu{system.GPU().Maxwell3D()};
+ const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
+ return gpu.regs.code_address.CodeAddress() + shader_config.offset;
+}
+
+bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
+ // Sched instructions appear once every 4 instructions.
+ constexpr std::size_t SchedPeriod = 4;
+ const std::size_t absolute_offset = offset - main_offset;
+ return (absolute_offset % SchedPeriod) == 0;
+}
+
+std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
+ // This is the encoded version of BRA that jumps to itself. All Nvidia
+ // shaders end with one.
+ static constexpr u64 SELF_JUMPING_BRANCH = 0xE2400FFFFF07000FULL;
+ static constexpr u64 MASK = 0xFFFFFFFFFF7FFFFFULL;
+
+ const std::size_t start_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
+ std::size_t offset = start_offset;
+ while (offset < program.size()) {
+ const u64 instruction = program[offset];
+ if (!IsSchedInstruction(offset, start_offset)) {
+ if ((instruction & MASK) == SELF_JUMPING_BRANCH) {
+ // End on Maxwell's "nop" instruction
+ break;
+ }
+ if (instruction == 0) {
+ break;
+ }
+ }
+ ++offset;
+ }
+ // The last instruction is included in the program size
+ return std::min(offset + 1, program.size());
+}
+
+ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
+ const u8* host_ptr, bool is_compute) {
+ ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
+ ASSERT_OR_EXECUTE(host_ptr != nullptr, { return code; });
+ memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
+ code.resize(CalculateProgramSize(code, is_compute));
+ return code;
+}
+
+u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
+ const ProgramCode& code_b) {
+ u64 unique_identifier = boost::hash_value(code);
+ if (is_a) {
+ // VertexA programs include two programs
+ boost::hash_combine(unique_identifier, boost::hash_value(code_b));
+ }
+ return unique_identifier;
+}
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/memory_util.h b/src/video_core/shader/memory_util.h
new file mode 100644
index 000000000..be90d24fd
--- /dev/null
+++ b/src/video_core/shader/memory_util.h
@@ -0,0 +1,47 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <vector>
+
+#include "common/common_types.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/engines/shader_type.h"
+
+namespace Core {
+class System;
+}
+
+namespace Tegra {
+class MemoryManager;
+}
+
+namespace VideoCommon::Shader {
+
+using ProgramCode = std::vector<u64>;
+
+constexpr u32 STAGE_MAIN_OFFSET = 10;
+constexpr u32 KERNEL_MAIN_OFFSET = 0;
+
+/// Gets the address for the specified shader stage program
+GPUVAddr GetShaderAddress(Core::System& system,
+ Tegra::Engines::Maxwell3D::Regs::ShaderProgram program);
+
+/// Gets if the current instruction offset is a scheduler instruction
+bool IsSchedInstruction(std::size_t offset, std::size_t main_offset);
+
+/// Calculates the size of a program stream
+std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute);
+
+/// Gets the shader program code from memory for the specified address
+ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
+ const u8* host_ptr, bool is_compute);
+
+/// Hashes one (or two) program streams
+u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
+ const ProgramCode& code_b = {});
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 3eee961f5..601c822d2 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -132,6 +132,8 @@ enum class OperationCode {
LogicalUNotEqual, /// (uint a, uint b) -> bool
LogicalUGreaterEqual, /// (uint a, uint b) -> bool
+ LogicalAddCarry, /// (uint a, uint b) -> bool
+
Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
@@ -265,76 +267,30 @@ class ArraySamplerNode;
using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>;
using TrackSampler = std::shared_ptr<TrackSamplerData>;
-class Sampler {
-public:
- /// This constructor is for bound samplers
+struct Sampler {
+ /// Bound samplers constructor
constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
: index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
is_buffer{is_buffer}, is_indexed{is_indexed} {}
- /// This constructor is for bindless samplers
+ /// Bindless samplers constructor
constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
- constexpr u32 GetIndex() const {
- return index;
- }
-
- constexpr u32 GetOffset() const {
- return offset;
- }
-
- constexpr u32 GetBuffer() const {
- return buffer;
- }
-
- constexpr Tegra::Shader::TextureType GetType() const {
- return type;
- }
-
- constexpr bool IsArray() const {
- return is_array;
- }
-
- constexpr bool IsShadow() const {
- return is_shadow;
- }
-
- constexpr bool IsBuffer() const {
- return is_buffer;
- }
-
- constexpr bool IsBindless() const {
- return is_bindless;
- }
-
- constexpr bool IsIndexed() const {
- return is_indexed;
- }
-
- constexpr u32 Size() const {
- return size;
- }
-
- constexpr void SetSize(u32 new_size) {
- size = new_size;
- }
-
-private:
- u32 index{}; ///< Emulated index given for the this sampler.
- u32 offset{}; ///< Offset in the const buffer from where the sampler is being read.
- u32 buffer{}; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
- u32 size{1}; ///< Size of the sampler.
+ u32 index = 0; ///< Emulated index given for the this sampler.
+ u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
+ u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
+ u32 size = 1; ///< Size of the sampler.
Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
- bool is_array{}; ///< Whether the texture is being sampled as an array texture or not.
- bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not.
- bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler.
- bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not.
- bool is_indexed{}; ///< Whether this sampler is an indexed array of textures.
+ bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
+ bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
+ bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
+ bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
+ bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
};
/// Represents a tracked bindless sampler into a direct const buffer
@@ -379,13 +335,13 @@ private:
u32 offset;
};
-class Image final {
+struct Image {
public:
- /// This constructor is for bound images
+ /// Bound images constructor
constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
: index{index}, offset{offset}, type{type} {}
- /// This constructor is for bindless samplers
+ /// Bindless samplers constructor
constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
@@ -403,53 +359,20 @@ public:
is_atomic = true;
}
- constexpr u32 GetIndex() const {
- return index;
- }
-
- constexpr u32 GetOffset() const {
- return offset;
- }
-
- constexpr u32 GetBuffer() const {
- return buffer;
- }
-
- constexpr Tegra::Shader::ImageType GetType() const {
- return type;
- }
-
- constexpr bool IsBindless() const {
- return is_bindless;
- }
-
- constexpr bool IsWritten() const {
- return is_written;
- }
-
- constexpr bool IsRead() const {
- return is_read;
- }
-
- constexpr bool IsAtomic() const {
- return is_atomic;
- }
-
-private:
- u32 index{};
- u32 offset{};
- u32 buffer{};
+ u32 index = 0;
+ u32 offset = 0;
+ u32 buffer = 0;
Tegra::Shader::ImageType type{};
- bool is_bindless{};
- bool is_written{};
- bool is_read{};
- bool is_atomic{};
+ bool is_bindless = false;
+ bool is_written = false;
+ bool is_read = false;
+ bool is_atomic = false;
};
struct GlobalMemoryBase {
- u32 cbuf_index{};
- u32 cbuf_offset{};
+ u32 cbuf_index = 0;
+ u32 cbuf_offset = 0;
bool operator<(const GlobalMemoryBase& rhs) const {
return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
@@ -463,7 +386,7 @@ struct MetaArithmetic {
/// Parameters describing a texture sampler
struct MetaTexture {
- const Sampler& sampler;
+ Sampler sampler;
Node array;
Node depth_compare;
std::vector<Node> aoffi;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index c6e7bdf50..15ae152f2 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -18,6 +18,7 @@
#include "video_core/engines/shader_header.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/compiler_settings.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/node.h"
#include "video_core/shader/registry.h"
@@ -25,16 +26,13 @@ namespace VideoCommon::Shader {
struct ShaderBlock;
-using ProgramCode = std::vector<u64>;
-
constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
-class ConstBuffer {
-public:
- explicit ConstBuffer(u32 max_offset, bool is_indirect)
+struct ConstBuffer {
+ constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
: max_offset{max_offset}, is_indirect{is_indirect} {}
- ConstBuffer() = default;
+ constexpr ConstBuffer() = default;
void MarkAsUsed(u64 offset) {
max_offset = std::max(max_offset, static_cast<u32>(offset));
@@ -57,8 +55,8 @@ public:
}
private:
- u32 max_offset{};
- bool is_indirect{};
+ u32 max_offset = 0;
+ bool is_indirect = false;
};
struct GlobalMemoryUsage {
@@ -192,10 +190,14 @@ private:
friend class ASTDecoder;
struct SamplerInfo {
- Tegra::Shader::TextureType type;
- bool is_array;
- bool is_shadow;
- bool is_buffer;
+ std::optional<Tegra::Shader::TextureType> type;
+ std::optional<bool> is_array;
+ std::optional<bool> is_shadow;
+ std::optional<bool> is_buffer;
+
+ constexpr bool IsComplete() const noexcept {
+ return type && is_array && is_shadow && is_buffer;
+ }
};
void Decode();
@@ -328,16 +330,15 @@ private:
OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation);
/// Queries the missing sampler info from the execution context.
- SamplerInfo GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset,
+ SamplerInfo GetSamplerInfo(SamplerInfo info, u32 offset,
std::optional<u32> buffer = std::nullopt);
- /// Accesses a texture sampler
- const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler,
- std::optional<SamplerInfo> sampler_info = std::nullopt);
+ /// Accesses a texture sampler.
+ std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
/// Accesses a texture sampler for a bindless texture.
- const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
- std::optional<SamplerInfo> sampler_info = std::nullopt);
+ std::optional<Sampler> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
+ Node& index_var);
/// Accesses an image.
Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index 513e9bf49..eb97bfd41 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -153,21 +153,13 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co
if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) {
return {};
}
- s64 current_cursor = cursor;
- while (current_cursor > 0) {
- // Reduce the cursor in one to avoid infinite loops when the instruction sets the same
- // register that it uses as operand
- const auto [source, new_cursor] = TrackRegister(gpr, code, current_cursor - 1);
- current_cursor = new_cursor;
- if (!source) {
- continue;
- }
- const auto [base_address, index, offset] = TrackCbuf(source, code, current_cursor);
- if (base_address != nullptr) {
- return {base_address, index, offset};
- }
+ // Reduce the cursor in one to avoid infinite loops when the instruction sets the same
+ // register that it uses as operand
+ const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1);
+ if (!source) {
+ return {};
}
- return {};
+ return TrackCbuf(source, code, new_cursor);
}
if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) {
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index c5ab21f56..79e10ffbb 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -192,6 +192,22 @@ public:
index = index_;
}
+ void SetMemoryMarked(bool is_memory_marked_) {
+ is_memory_marked = is_memory_marked_;
+ }
+
+ bool IsMemoryMarked() const {
+ return is_memory_marked;
+ }
+
+ void SetSyncPending(bool is_sync_pending_) {
+ is_sync_pending = is_sync_pending_;
+ }
+
+ bool IsSyncPending() const {
+ return is_sync_pending;
+ }
+
void MarkAsPicked(bool is_picked_) {
is_picked = is_picked_;
}
@@ -303,6 +319,8 @@ private:
bool is_target{};
bool is_registered{};
bool is_picked{};
+ bool is_memory_marked{};
+ bool is_sync_pending{};
u32 index{NO_RT};
u64 modification_tick{};
};
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 0de499946..884fabffe 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -81,7 +81,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
params.pixel_format = lookup_table.GetPixelFormat(
tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
params.type = GetFormatType(params.pixel_format);
- if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
+ if (entry.is_shadow && params.type == SurfaceType::ColorTexture) {
switch (params.pixel_format) {
case PixelFormat::R16U:
case PixelFormat::R16F:
@@ -108,7 +108,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
params.emulated_levels = 1;
params.is_layered = false;
} else {
- params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
+ params.target = TextureTypeToSurfaceTarget(entry.type, entry.is_array);
params.width = tic.Width();
params.height = tic.Height();
params.depth = tic.Depth();
@@ -138,7 +138,7 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
params.type = GetFormatType(params.pixel_format);
params.type = GetFormatType(params.pixel_format);
- params.target = ImageTypeToSurfaceTarget(entry.GetType());
+ params.target = ImageTypeToSurfaceTarget(entry.type);
// TODO: on 1DBuffer we should use the tic info.
if (tic.IsBuffer()) {
params.target = SurfaceTarget::TextureBuffer;
@@ -248,12 +248,12 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
const VideoCommon::Shader::Sampler& entry) {
- return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
+ return TextureTypeToSurfaceTarget(entry.type, entry.is_array);
}
VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
const VideoCommon::Shader::Image& entry) {
- return ImageTypeToSurfaceTarget(entry.GetType());
+ return ImageTypeToSurfaceTarget(entry.type);
}
bool SurfaceParams::IsLayered() const {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 69ca08fd1..d6efc34b2 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -6,6 +6,7 @@
#include <algorithm>
#include <array>
+#include <list>
#include <memory>
#include <mutex>
#include <set>
@@ -62,6 +63,30 @@ public:
}
}
+ void OnCPUWrite(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : GetSurfacesInRegion(addr, size)) {
+ if (surface->IsMemoryMarked()) {
+ UnmarkMemory(surface);
+ surface->SetSyncPending(true);
+ marked_for_unregister.emplace_back(surface);
+ }
+ }
+ }
+
+ void SyncGuestHost() {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : marked_for_unregister) {
+ if (surface->IsRegistered()) {
+ surface->SetSyncPending(false);
+ Unregister(surface);
+ }
+ }
+ marked_for_unregister.clear();
+ }
+
/**
* Guarantees that rendertargets don't unregister themselves if the
* collide. Protection is currently only done on 3D slices.
@@ -85,10 +110,20 @@ public:
return a->GetModificationTick() < b->GetModificationTick();
});
for (const auto& surface : surfaces) {
+ mutex.unlock();
FlushSurface(surface);
+ mutex.lock();
}
}
+ bool MustFlushRegion(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ const auto surfaces = GetSurfacesInRegion(addr, size);
+ return std::any_of(surfaces.cbegin(), surfaces.cend(),
+ [](const TSurface& surface) { return surface->IsModified(); });
+ }
+
TView GetTextureSurface(const Tegra::Texture::TICEntry& tic,
const VideoCommon::Shader::Sampler& entry) {
std::lock_guard lock{mutex};
@@ -108,7 +143,7 @@ public:
}
const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)};
- const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false);
+ const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
if (guard_samplers) {
sampled_textures.push_back(surface);
}
@@ -128,7 +163,7 @@ public:
return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
}
const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)};
- const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false);
+ const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
if (guard_samplers) {
sampled_textures.push_back(surface);
}
@@ -143,7 +178,7 @@ public:
return any_rt;
}
- TView GetDepthBufferSurface() {
+ TView GetDepthBufferSurface(bool preserve_contents) {
std::lock_guard lock{mutex};
auto& maxwell3d = system.GPU().Maxwell3D();
if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) {
@@ -164,7 +199,7 @@ public:
return {};
}
const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
- auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true);
+ auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true);
if (depth_buffer.target)
depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
depth_buffer.target = surface_view.first;
@@ -174,7 +209,7 @@ public:
return surface_view.second;
}
- TView GetColorBufferSurface(std::size_t index) {
+ TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
std::lock_guard lock{mutex};
ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
auto& maxwell3d = system.GPU().Maxwell3D();
@@ -204,10 +239,17 @@ public:
return {};
}
- auto surface_view = GetSurface(gpu_addr, *cpu_addr,
- SurfaceParams::CreateForFramebuffer(system, index), true);
- if (render_targets[index].target)
- render_targets[index].target->MarkAsRenderTarget(false, NO_RT);
+ auto surface_view =
+ GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
+ preserve_contents, true);
+ if (render_targets[index].target) {
+ auto& surface = render_targets[index].target;
+ surface->MarkAsRenderTarget(false, NO_RT);
+ const auto& cr_params = surface->GetSurfaceParams();
+ if (!cr_params.is_tiled && Settings::values.use_asynchronous_gpu_emulation) {
+ AsyncFlushSurface(surface);
+ }
+ }
render_targets[index].target = surface_view.first;
render_targets[index].view = surface_view.second;
if (render_targets[index].target)
@@ -259,9 +301,9 @@ public:
const std::optional<VAddr> src_cpu_addr =
system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr);
std::pair<TSurface, TView> dst_surface =
- GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false);
+ GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
std::pair<TSurface, TView> src_surface =
- GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false);
+ GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
ImageBlit(src_surface.second, dst_surface.second, copy_config);
dst_surface.first->MarkAsModified(true, Tick());
}
@@ -284,6 +326,34 @@ public:
return ++ticks;
}
+ void CommitAsyncFlushes() {
+ committed_flushes.push_back(uncommitted_flushes);
+ uncommitted_flushes.reset();
+ }
+
+ bool HasUncommittedFlushes() const {
+ return uncommitted_flushes != nullptr;
+ }
+
+ bool ShouldWaitAsyncFlushes() const {
+ return !committed_flushes.empty() && committed_flushes.front() != nullptr;
+ }
+
+ void PopAsyncFlushes() {
+ if (committed_flushes.empty()) {
+ return;
+ }
+ auto& flush_list = committed_flushes.front();
+ if (!flush_list) {
+ committed_flushes.pop_front();
+ return;
+ }
+ for (TSurface& surface : *flush_list) {
+ FlushSurface(surface);
+ }
+ committed_flushes.pop_front();
+ }
+
protected:
explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
bool is_astc_supported)
@@ -345,9 +415,20 @@ protected:
surface->SetCpuAddr(*cpu_addr);
RegisterInnerCache(surface);
surface->MarkAsRegistered(true);
+ surface->SetMemoryMarked(true);
rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
}
+ void UnmarkMemory(TSurface surface) {
+ if (!surface->IsMemoryMarked()) {
+ return;
+ }
+ const std::size_t size = surface->GetSizeInBytes();
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ surface->SetMemoryMarked(false);
+ }
+
void Unregister(TSurface surface) {
if (guard_render_targets && surface->IsProtected()) {
return;
@@ -355,9 +436,11 @@ protected:
if (!guard_render_targets && surface->IsRenderTarget()) {
ManageRenderTargetUnregister(surface);
}
- const std::size_t size = surface->GetSizeInBytes();
- const VAddr cpu_addr = surface->GetCpuAddr();
- rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ UnmarkMemory(surface);
+ if (surface->IsSyncPending()) {
+ marked_for_unregister.remove(surface);
+ surface->SetSyncPending(false);
+ }
UnregisterInnerCache(surface);
surface->MarkAsRegistered(false);
ReserveSurface(surface->GetSurfaceParams(), surface);
@@ -417,7 +500,7 @@ private:
**/
RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params,
const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
return RecycleStrategy::Flush;
}
// 3D Textures decision
@@ -450,18 +533,22 @@ private:
* @param overlaps The overlapping surfaces registered in the cache.
* @param params The parameters for the new surface.
* @param gpu_addr The starting address of the new surface.
+ * @param preserve_contents Indicates that the new surface should be loaded from memory or left
+ * blank.
* @param untopological Indicates to the recycler that the texture has no way to match the
* overlaps due to topological reasons.
**/
std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
const SurfaceParams& params, const GPUVAddr gpu_addr,
+ const bool preserve_contents,
const MatchTopologyResult untopological) {
+ const bool do_load = preserve_contents && Settings::IsGPULevelExtreme();
for (auto& surface : overlaps) {
Unregister(surface);
}
switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
case RecycleStrategy::Ignore: {
- return InitializeSurface(gpu_addr, params, Settings::values.use_accurate_gpu_emulation);
+ return InitializeSurface(gpu_addr, params, do_load);
}
case RecycleStrategy::Flush: {
std::sort(overlaps.begin(), overlaps.end(),
@@ -471,7 +558,7 @@ private:
for (auto& surface : overlaps) {
FlushSurface(surface);
}
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
case RecycleStrategy::BufferCopy: {
auto new_surface = GetUncachedSurface(gpu_addr, params);
@@ -480,7 +567,7 @@ private:
}
default: {
UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, do_load);
}
}
}
@@ -509,7 +596,7 @@ private:
}
const auto& final_params = new_surface->GetSurfaceParams();
if (cr_params.type != final_params.type) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
BufferCopy(current_surface, new_surface);
}
} else {
@@ -598,7 +685,7 @@ private:
if (passed_tests == 0) {
return {};
// In Accurate GPU all tests should pass, else we recycle
- } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
+ } else if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) {
return {};
}
for (const auto& surface : overlaps) {
@@ -618,11 +705,14 @@ private:
* @param params The parameters on the new surface.
* @param gpu_addr The starting address of the new surface.
* @param cpu_addr The starting address of the new surface on physical memory.
+ * @param preserve_contents Indicates that the new surface should be loaded from memory or
+ * left blank.
*/
std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
const SurfaceParams& params,
const GPUVAddr gpu_addr,
- const VAddr cpu_addr) {
+ const VAddr cpu_addr,
+ bool preserve_contents) {
if (params.target == SurfaceTarget::Texture3D) {
bool failed = false;
if (params.num_levels > 1) {
@@ -668,11 +758,11 @@ private:
for (const auto& surface : overlaps) {
if (!surface->MatchTarget(params.target)) {
if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
return std::nullopt;
}
Unregister(surface);
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
return std::nullopt;
}
@@ -683,7 +773,7 @@ private:
return {{surface, surface->GetMainView()}};
}
}
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
}
@@ -706,10 +796,13 @@ private:
*
* @param gpu_addr The starting address of the candidate surface.
* @param params The parameters on the candidate surface.
+ * @param preserve_contents Indicates that the new surface should be loaded from memory or
+ * left blank.
* @param is_render Whether or not the surface is a render target.
**/
std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr,
- const SurfaceParams& params, bool is_render) {
+ const SurfaceParams& params, bool preserve_contents,
+ bool is_render) {
// Step 1
// Check Level 1 Cache for a fast structural match. If candidate surface
// matches at certain level we are pretty much done.
@@ -718,7 +811,8 @@ private:
const auto topological_result = current_surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
std::vector<TSurface> overlaps{current_surface};
- return RecycleSurface(overlaps, params, gpu_addr, topological_result);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
}
const auto struct_result = current_surface->MatchesStructure(params);
@@ -743,7 +837,7 @@ private:
// If none are found, we are done. we just load the surface and create it.
if (overlaps.empty()) {
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
// Step 3
@@ -753,13 +847,15 @@ private:
for (const auto& surface : overlaps) {
const auto topological_result = surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
- return RecycleSurface(overlaps, params, gpu_addr, topological_result);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
}
}
// Check if it's a 3D texture
if (params.block_depth > 0) {
- auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr);
+ auto surface =
+ Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
if (surface) {
return *surface;
}
@@ -779,7 +875,8 @@ private:
return *view;
}
}
- return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
}
// Now we check if the candidate is a mipmap/layer of the overlap
std::optional<TView> view =
@@ -803,7 +900,7 @@ private:
pair.first->EmplaceView(params, gpu_addr, candidate_size);
if (mirage_view)
return {pair.first, *mirage_view};
- return RecycleSurface(overlaps, params, gpu_addr,
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
MatchTopologyResult::FullMatch);
}
return {current_surface, *view};
@@ -819,7 +916,8 @@ private:
}
}
// We failed all the tests, recycle the overlaps into a new texture.
- return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
}
/**
@@ -977,10 +1075,10 @@ private:
}
std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
- bool do_load = true) {
+ bool preserve_contents) {
auto new_surface{GetUncachedSurface(gpu_addr, params)};
Register(new_surface);
- if (do_load) {
+ if (preserve_contents) {
LoadSurface(new_surface);
}
return {new_surface, new_surface->GetMainView()};
@@ -1074,7 +1172,7 @@ private:
/// Returns true the shader sampler entry is compatible with the TIC texture type.
static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type,
const VideoCommon::Shader::Sampler& entry) {
- const auto shader_type = entry.GetType();
+ const auto shader_type = entry.type;
switch (tic_type) {
case Tegra::Texture::TextureType::Texture1D:
case Tegra::Texture::TextureType::Texture1DArray:
@@ -1095,7 +1193,7 @@ private:
if (shader_type == Tegra::Shader::TextureType::TextureCube) {
return true;
}
- return shader_type == Tegra::Shader::TextureType::Texture2D && entry.IsArray();
+ return shader_type == Tegra::Shader::TextureType::Texture2D && entry.is_array;
}
UNREACHABLE();
return true;
@@ -1106,6 +1204,13 @@ private:
TView view;
};
+ void AsyncFlushSurface(TSurface& surface) {
+ if (!uncommitted_flushes) {
+ uncommitted_flushes = std::make_shared<std::list<TSurface>>();
+ }
+ uncommitted_flushes->push_back(surface);
+ }
+
VideoCore::RasterizerInterface& rasterizer;
FormatLookupTable format_lookup_table;
@@ -1150,6 +1255,11 @@ private:
std::unordered_map<u32, TSurface> invalid_cache;
std::vector<u8> invalid_memory;
+ std::list<TSurface> marked_for_unregister;
+
+ std::shared_ptr<std::list<TSurface>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::list<TSurface>>> committed_flushes;
+
StagingCache staging_cache;
std::recursive_mutex mutex;
};
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 7df5f1452..fae8638ec 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -11,6 +11,7 @@
#include "video_core/textures/texture.h"
namespace Tegra::Texture {
+namespace {
/**
* This table represents the internal swizzle of a gob,
@@ -174,6 +175,8 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
}
}
+} // Anonymous namespace
+
void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
u32 out_bytes_per_pixel, u8* const swizzled_data, u8* const unswizzled_data,
bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index e5eac3f3b..9f2d6d308 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -56,8 +56,7 @@ void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32
u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height,
u32 offset_x, u32 offset_y);
-void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
- const u32 block_height, const std::size_t copy_size, const u8* source_data,
- u8* swizzle_data);
+void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
+ std::size_t copy_size, const u8* source_data, u8* swizzle_data);
} // namespace Tegra::Texture