summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp296
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h282
-rw-r--r--src/video_core/renderer_vulkan/shaders/blit.frag24
-rw-r--r--src/video_core/renderer_vulkan/shaders/blit.vert28
-rw-r--r--src/video_core/renderer_vulkan/shaders/quad_array.comp37
-rw-r--r--src/video_core/renderer_vulkan/shaders/uint8.comp33
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h9
-rw-r--r--src/video_core/renderer_vulkan/vk_image.cpp106
-rw-r--r--src/video_core/renderer_vulkan/vk_image.h84
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp36
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h16
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp107
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp127
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h83
16 files changed, 1232 insertions, 62 deletions
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
new file mode 100644
index 000000000..5a490f6ef
--- /dev/null
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -0,0 +1,296 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+
+#include <boost/functional/hash.hpp>
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+
+namespace Vulkan {
+
+namespace {
+
+constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) {
+ const FixedPipelineState::StencilFace front_stencil(
+ regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass,
+ regs.stencil_front_func_func);
+ const FixedPipelineState::StencilFace back_stencil =
+ regs.stencil_two_side_enable
+ ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail,
+ regs.stencil_back_op_zpass,
+ regs.stencil_back_func_func)
+ : front_stencil;
+ return FixedPipelineState::DepthStencil(
+ regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1,
+ regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil);
+}
+
+constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) {
+ return FixedPipelineState::InputAssembly(
+ regs.draw.topology, regs.primitive_restart.enabled,
+ regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f);
+}
+
+constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState(
+ const Maxwell& regs, std::size_t render_target) {
+ const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target];
+ const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0};
+
+ const FixedPipelineState::BlendingAttachment default_blending(
+ false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
+ Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
+ Maxwell::Blend::Factor::Zero, components);
+ if (render_target >= regs.rt_control.count) {
+ return default_blending;
+ }
+
+ if (!regs.independent_blend_enable) {
+ const auto& src = regs.blend;
+ if (!src.enable[render_target]) {
+ return default_blending;
+ }
+ return FixedPipelineState::BlendingAttachment(
+ true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
+ src.factor_source_a, src.factor_dest_a, components);
+ }
+
+ if (!regs.blend.enable[render_target]) {
+ return default_blending;
+ }
+ const auto& src = regs.independent_blend[render_target];
+ return FixedPipelineState::BlendingAttachment(
+ true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
+ src.factor_source_a, src.factor_dest_a, components);
+}
+
+constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) {
+ return FixedPipelineState::ColorBlending(
+ {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a},
+ regs.rt_control.count,
+ {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1),
+ GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3),
+ GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5),
+ GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)});
+}
+
+constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) {
+ return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim,
+ regs.tess_mode.spacing, regs.tess_mode.cw != 0);
+}
+
+constexpr std::size_t Point = 0;
+constexpr std::size_t Line = 1;
+constexpr std::size_t Polygon = 2;
+constexpr std::array PolygonOffsetEnableLUT = {
+ Point, // Points
+ Line, // Lines
+ Line, // LineLoop
+ Line, // LineStrip
+ Polygon, // Triangles
+ Polygon, // TriangleStrip
+ Polygon, // TriangleFan
+ Polygon, // Quads
+ Polygon, // QuadStrip
+ Polygon, // Polygon
+ Line, // LinesAdjacency
+ Line, // LineStripAdjacency
+ Polygon, // TrianglesAdjacency
+ Polygon, // TriangleStripAdjacency
+ Polygon, // Patches
+};
+
+constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) {
+ const std::array enabled_lut = {regs.polygon_offset_point_enable,
+ regs.polygon_offset_line_enable,
+ regs.polygon_offset_fill_enable};
+ const auto topology = static_cast<std::size_t>(regs.draw.topology.Value());
+ const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]];
+
+ Maxwell::Cull::FrontFace front_face = regs.cull.front_face;
+ if (regs.screen_y_control.triangle_rast_flip != 0 &&
+ regs.viewport_transform[0].scale_y > 0.0f) {
+ if (front_face == Maxwell::Cull::FrontFace::CounterClockWise)
+ front_face = Maxwell::Cull::FrontFace::ClockWise;
+ else if (front_face == Maxwell::Cull::FrontFace::ClockWise)
+ front_face = Maxwell::Cull::FrontFace::CounterClockWise;
+ }
+
+ const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne;
+ return FixedPipelineState::Rasterizer(regs.cull.enabled, depth_bias_enabled, gl_ndc,
+ regs.cull.cull_face, front_face);
+}
+
+} // Anonymous namespace
+
+std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept {
+ return (index << stride) ^ divisor;
+}
+
+bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept {
+ return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor);
+}
+
+std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept {
+ return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^
+ (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^
+ (static_cast<std::size_t>(offset) << 36);
+}
+
+bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept {
+ return std::tie(index, buffer, type, size, offset) ==
+ std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset);
+}
+
+std::size_t FixedPipelineState::StencilFace::Hash() const noexcept {
+ return static_cast<std::size_t>(action_stencil_fail) ^
+ (static_cast<std::size_t>(action_depth_fail) << 4) ^
+ (static_cast<std::size_t>(action_depth_fail) << 20) ^
+ (static_cast<std::size_t>(action_depth_pass) << 36);
+}
+
+bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept {
+ return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) ==
+ std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass,
+ rhs.test_func);
+}
+
+std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept {
+ return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^
+ (static_cast<std::size_t>(src_rgb_func) << 10) ^
+ (static_cast<std::size_t>(dst_rgb_func) << 15) ^
+ (static_cast<std::size_t>(a_equation) << 20) ^
+ (static_cast<std::size_t>(src_a_func) << 25) ^
+ (static_cast<std::size_t>(dst_a_func) << 30) ^
+ (static_cast<std::size_t>(components[0]) << 35) ^
+ (static_cast<std::size_t>(components[1]) << 36) ^
+ (static_cast<std::size_t>(components[2]) << 37) ^
+ (static_cast<std::size_t>(components[3]) << 38);
+}
+
+bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const
+ noexcept {
+ return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func,
+ dst_a_func, components) ==
+ std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func,
+ rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components);
+}
+
+std::size_t FixedPipelineState::VertexInput::Hash() const noexcept {
+ std::size_t hash = num_bindings ^ (num_attributes << 32);
+ for (std::size_t i = 0; i < num_bindings; ++i) {
+ boost::hash_combine(hash, bindings[i].Hash());
+ }
+ for (std::size_t i = 0; i < num_attributes; ++i) {
+ boost::hash_combine(hash, attributes[i].Hash());
+ }
+ return hash;
+}
+
+bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept {
+ return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(),
+ rhs.bindings.begin() + rhs.num_bindings) &&
+ std::equal(attributes.begin(), attributes.begin() + num_attributes,
+ rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes);
+}
+
+std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept {
+ std::size_t point_size_int = 0;
+ std::memcpy(&point_size_int, &point_size, sizeof(point_size));
+ return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^
+ static_cast<std::size_t>(primitive_restart_enable);
+}
+
+bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept {
+ return std::tie(topology, primitive_restart_enable, point_size) ==
+ std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size);
+}
+
+std::size_t FixedPipelineState::Tessellation::Hash() const noexcept {
+ return static_cast<std::size_t>(patch_control_points) ^
+ (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^
+ (static_cast<std::size_t>(clockwise) << 10);
+}
+
+bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept {
+ return std::tie(patch_control_points, primitive, spacing, clockwise) ==
+ std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise);
+}
+
+std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept {
+ return static_cast<std::size_t>(cull_enable) ^
+ (static_cast<std::size_t>(depth_bias_enable) << 1) ^
+ (static_cast<std::size_t>(ndc_minus_one_to_one) << 2) ^
+ (static_cast<std::size_t>(cull_face) << 24) ^
+ (static_cast<std::size_t>(front_face) << 48);
+}
+
+bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept {
+ return std::tie(cull_enable, depth_bias_enable, ndc_minus_one_to_one, cull_face, front_face) ==
+ std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.ndc_minus_one_to_one, rhs.cull_face,
+ rhs.front_face);
+}
+
+std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept {
+ std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^
+ (static_cast<std::size_t>(depth_write_enable) << 1) ^
+ (static_cast<std::size_t>(depth_bounds_enable) << 2) ^
+ (static_cast<std::size_t>(stencil_enable) << 3) ^
+ (static_cast<std::size_t>(depth_test_function) << 4);
+ boost::hash_combine(hash, front_stencil.Hash());
+ boost::hash_combine(hash, back_stencil.Hash());
+ return hash;
+}
+
+bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept {
+ return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function,
+ stencil_enable, front_stencil, back_stencil) ==
+ std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable,
+ rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil,
+ rhs.back_stencil);
+}
+
+std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept {
+ std::size_t hash = attachments_count << 13;
+ for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) {
+ boost::hash_combine(hash, attachments[rt].Hash());
+ }
+ return hash;
+}
+
+bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept {
+ return std::equal(attachments.begin(), attachments.begin() + attachments_count,
+ rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count);
+}
+
+std::size_t FixedPipelineState::Hash() const noexcept {
+ std::size_t hash = 0;
+ boost::hash_combine(hash, vertex_input.Hash());
+ boost::hash_combine(hash, input_assembly.Hash());
+ boost::hash_combine(hash, tessellation.Hash());
+ boost::hash_combine(hash, rasterizer.Hash());
+ boost::hash_combine(hash, depth_stencil.Hash());
+ boost::hash_combine(hash, color_blending.Hash());
+ return hash;
+}
+
+bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
+ return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil,
+ color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly,
+ rhs.tessellation, rhs.rasterizer, rhs.depth_stencil,
+ rhs.color_blending);
+}
+
+FixedPipelineState GetFixedPipelineState(const Maxwell& regs) {
+ FixedPipelineState fixed_state;
+ fixed_state.input_assembly = GetInputAssemblyState(regs);
+ fixed_state.tessellation = GetTessellationState(regs);
+ fixed_state.rasterizer = GetRasterizerState(regs);
+ fixed_state.depth_stencil = GetDepthStencilState(regs);
+ fixed_state.color_blending = GetColorBlendingState(regs);
+ return fixed_state;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
new file mode 100644
index 000000000..04152c0d4
--- /dev/null
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -0,0 +1,282 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <type_traits>
+
+#include "common/common_types.h"
+
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/surface.h"
+
+namespace Vulkan {
+
+using Maxwell = Tegra::Engines::Maxwell3D::Regs;
+
+// TODO(Rodrigo): Optimize this structure.
+
+struct FixedPipelineState {
+ using PixelFormat = VideoCore::Surface::PixelFormat;
+
+ struct VertexBinding {
+ constexpr VertexBinding(u32 index, u32 stride, u32 divisor)
+ : index{index}, stride{stride}, divisor{divisor} {}
+ VertexBinding() = default;
+
+ u32 index;
+ u32 stride;
+ u32 divisor;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const VertexBinding& rhs) const noexcept;
+
+ bool operator!=(const VertexBinding& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct VertexAttribute {
+ constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type,
+ Maxwell::VertexAttribute::Size size, u32 offset)
+ : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {}
+ VertexAttribute() = default;
+
+ u32 index;
+ u32 buffer;
+ Maxwell::VertexAttribute::Type type;
+ Maxwell::VertexAttribute::Size size;
+ u32 offset;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const VertexAttribute& rhs) const noexcept;
+
+ bool operator!=(const VertexAttribute& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct StencilFace {
+ constexpr StencilFace(Maxwell::StencilOp action_stencil_fail,
+ Maxwell::StencilOp action_depth_fail,
+ Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func)
+ : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail},
+ action_depth_pass{action_depth_pass}, test_func{test_func} {}
+ StencilFace() = default;
+
+ Maxwell::StencilOp action_stencil_fail;
+ Maxwell::StencilOp action_depth_fail;
+ Maxwell::StencilOp action_depth_pass;
+ Maxwell::ComparisonOp test_func;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const StencilFace& rhs) const noexcept;
+
+ bool operator!=(const StencilFace& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct BlendingAttachment {
+ constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation,
+ Maxwell::Blend::Factor src_rgb_func,
+ Maxwell::Blend::Factor dst_rgb_func,
+ Maxwell::Blend::Equation a_equation,
+ Maxwell::Blend::Factor src_a_func,
+ Maxwell::Blend::Factor dst_a_func,
+ std::array<bool, 4> components)
+ : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func},
+ dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func},
+ dst_a_func{dst_a_func}, components{components} {}
+ BlendingAttachment() = default;
+
+ bool enable;
+ Maxwell::Blend::Equation rgb_equation;
+ Maxwell::Blend::Factor src_rgb_func;
+ Maxwell::Blend::Factor dst_rgb_func;
+ Maxwell::Blend::Equation a_equation;
+ Maxwell::Blend::Factor src_a_func;
+ Maxwell::Blend::Factor dst_a_func;
+ std::array<bool, 4> components;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const BlendingAttachment& rhs) const noexcept;
+
+ bool operator!=(const BlendingAttachment& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct VertexInput {
+ std::size_t num_bindings = 0;
+ std::size_t num_attributes = 0;
+ std::array<VertexBinding, Maxwell::NumVertexArrays> bindings;
+ std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const VertexInput& rhs) const noexcept;
+
+ bool operator!=(const VertexInput& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct InputAssembly {
+ constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable,
+ float point_size)
+ : topology{topology}, primitive_restart_enable{primitive_restart_enable},
+ point_size{point_size} {}
+ InputAssembly() = default;
+
+ Maxwell::PrimitiveTopology topology;
+ bool primitive_restart_enable;
+ float point_size;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const InputAssembly& rhs) const noexcept;
+
+ bool operator!=(const InputAssembly& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct Tessellation {
+ constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive,
+ Maxwell::TessellationSpacing spacing, bool clockwise)
+ : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing},
+ clockwise{clockwise} {}
+ Tessellation() = default;
+
+ u32 patch_control_points;
+ Maxwell::TessellationPrimitive primitive;
+ Maxwell::TessellationSpacing spacing;
+ bool clockwise;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const Tessellation& rhs) const noexcept;
+
+ bool operator!=(const Tessellation& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct Rasterizer {
+ constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool ndc_minus_one_to_one,
+ Maxwell::Cull::CullFace cull_face, Maxwell::Cull::FrontFace front_face)
+ : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable},
+ ndc_minus_one_to_one{ndc_minus_one_to_one}, cull_face{cull_face}, front_face{
+ front_face} {}
+ Rasterizer() = default;
+
+ bool cull_enable;
+ bool depth_bias_enable;
+ bool ndc_minus_one_to_one;
+ Maxwell::Cull::CullFace cull_face;
+ Maxwell::Cull::FrontFace front_face;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const Rasterizer& rhs) const noexcept;
+
+ bool operator!=(const Rasterizer& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct DepthStencil {
+ constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable,
+ bool depth_bounds_enable, bool stencil_enable,
+ Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil,
+ StencilFace back_stencil)
+ : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable},
+ depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable},
+ depth_test_function{depth_test_function}, front_stencil{front_stencil},
+ back_stencil{back_stencil} {}
+ DepthStencil() = default;
+
+ bool depth_test_enable;
+ bool depth_write_enable;
+ bool depth_bounds_enable;
+ bool stencil_enable;
+ Maxwell::ComparisonOp depth_test_function;
+ StencilFace front_stencil;
+ StencilFace back_stencil;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const DepthStencil& rhs) const noexcept;
+
+ bool operator!=(const DepthStencil& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ struct ColorBlending {
+ constexpr ColorBlending(
+ std::array<float, 4> blend_constants, std::size_t attachments_count,
+ std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments)
+ : attachments_count{attachments_count}, attachments{attachments} {}
+ ColorBlending() = default;
+
+ std::size_t attachments_count;
+ std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const ColorBlending& rhs) const noexcept;
+
+ bool operator!=(const ColorBlending& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+ };
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const FixedPipelineState& rhs) const noexcept;
+
+ bool operator!=(const FixedPipelineState& rhs) const noexcept {
+ return !operator==(rhs);
+ }
+
+ VertexInput vertex_input;
+ InputAssembly input_assembly;
+ Tessellation tessellation;
+ Rasterizer rasterizer;
+ DepthStencil depth_stencil;
+ ColorBlending color_blending;
+};
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>);
+static_assert(std::is_trivially_copyable_v<FixedPipelineState>);
+
+FixedPipelineState GetFixedPipelineState(const Maxwell& regs);
+
+} // namespace Vulkan
+
+namespace std {
+
+template <>
+struct hash<Vulkan::FixedPipelineState> {
+ std::size_t operator()(const Vulkan::FixedPipelineState& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/renderer_vulkan/shaders/blit.frag b/src/video_core/renderer_vulkan/shaders/blit.frag
new file mode 100644
index 000000000..a06ecd24a
--- /dev/null
+++ b/src/video_core/renderer_vulkan/shaders/blit.frag
@@ -0,0 +1,24 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+/*
+ * Build instructions:
+ * $ glslangValidator -V $THIS_FILE -o output.spv
+ * $ spirv-opt -O --strip-debug output.spv -o optimized.spv
+ * $ xxd -i optimized.spv
+ *
+ * Then copy that bytecode to the C++ file
+ */
+
+#version 460 core
+
+layout (location = 0) in vec2 frag_tex_coord;
+
+layout (location = 0) out vec4 color;
+
+layout (binding = 1) uniform sampler2D color_texture;
+
+void main() {
+ color = texture(color_texture, frag_tex_coord);
+}
diff --git a/src/video_core/renderer_vulkan/shaders/blit.vert b/src/video_core/renderer_vulkan/shaders/blit.vert
new file mode 100644
index 000000000..c64d9235a
--- /dev/null
+++ b/src/video_core/renderer_vulkan/shaders/blit.vert
@@ -0,0 +1,28 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+/*
+ * Build instructions:
+ * $ glslangValidator -V $THIS_FILE -o output.spv
+ * $ spirv-opt -O --strip-debug output.spv -o optimized.spv
+ * $ xxd -i optimized.spv
+ *
+ * Then copy that bytecode to the C++ file
+ */
+
+#version 460 core
+
+layout (location = 0) in vec2 vert_position;
+layout (location = 1) in vec2 vert_tex_coord;
+
+layout (location = 0) out vec2 frag_tex_coord;
+
+layout (set = 0, binding = 0) uniform MatrixBlock {
+ mat4 modelview_matrix;
+};
+
+void main() {
+ gl_Position = modelview_matrix * vec4(vert_position, 0.0, 1.0);
+ frag_tex_coord = vert_tex_coord;
+}
diff --git a/src/video_core/renderer_vulkan/shaders/quad_array.comp b/src/video_core/renderer_vulkan/shaders/quad_array.comp
new file mode 100644
index 000000000..5a5703308
--- /dev/null
+++ b/src/video_core/renderer_vulkan/shaders/quad_array.comp
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+/*
+ * Build instructions:
+ * $ glslangValidator -V $THIS_FILE -o output.spv
+ * $ spirv-opt -O --strip-debug output.spv -o optimized.spv
+ * $ xxd -i optimized.spv
+ *
+ * Then copy that bytecode to the C++ file
+ */
+
+#version 460 core
+
+layout (local_size_x = 1024) in;
+
+layout (std430, set = 0, binding = 0) buffer OutputBuffer {
+ uint output_indexes[];
+};
+
+layout (push_constant) uniform PushConstants {
+ uint first;
+};
+
+void main() {
+ uint primitive = gl_GlobalInvocationID.x;
+ if (primitive * 6 >= output_indexes.length()) {
+ return;
+ }
+
+ const uint quad_map[6] = uint[](0, 1, 2, 0, 2, 3);
+ for (uint vertex = 0; vertex < 6; ++vertex) {
+ uint index = first + primitive * 4 + quad_map[vertex];
+ output_indexes[primitive * 6 + vertex] = index;
+ }
+}
diff --git a/src/video_core/renderer_vulkan/shaders/uint8.comp b/src/video_core/renderer_vulkan/shaders/uint8.comp
new file mode 100644
index 000000000..a320f3ae0
--- /dev/null
+++ b/src/video_core/renderer_vulkan/shaders/uint8.comp
@@ -0,0 +1,33 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+/*
+ * Build instructions:
+ * $ glslangValidator -V $THIS_FILE -o output.spv
+ * $ spirv-opt -O --strip-debug output.spv -o optimized.spv
+ * $ xxd -i optimized.spv
+ *
+ * Then copy that bytecode to the C++ file
+ */
+
+#version 460 core
+#extension GL_EXT_shader_16bit_storage : require
+#extension GL_EXT_shader_8bit_storage : require
+
+layout (local_size_x = 1024) in;
+
+layout (std430, set = 0, binding = 0) readonly buffer InputBuffer {
+ uint8_t input_indexes[];
+};
+
+layout (std430, set = 0, binding = 1) writeonly buffer OutputBuffer {
+ uint16_t output_indexes[];
+};
+
+void main() {
+ uint id = gl_GlobalInvocationID.x;
+ if (id < input_indexes.length()) {
+ output_indexes[id] = uint16_t(input_indexes[id]);
+ }
+}
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 92854a4b3..939eebe83 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -3,12 +3,15 @@
// Refer to the license.txt file included.
#include <bitset>
+#include <chrono>
#include <cstdlib>
#include <optional>
#include <set>
#include <string_view>
+#include <thread>
#include <vector>
#include "common/assert.h"
+#include "core/settings.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -201,6 +204,22 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format,
return wanted_format;
}
+void VKDevice::ReportLoss() const {
+ LOG_CRITICAL(Render_Vulkan, "Device loss occured!");
+
+ // Wait some time to let the log flush
+ std::this_thread::sleep_for(std::chrono::seconds{1});
+
+ if (!nv_device_diagnostic_checkpoints) {
+ return;
+ }
+
+ [[maybe_unused]] const std::vector data = graphics_queue.getCheckpointDataNV(dld);
+ // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be
+ // executed. It can be done on a debugger by evaluating the expression:
+ // *(VKGraphicsPipeline*)data[0]
+}
+
bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features,
const vk::DispatchLoaderDynamic& dldi) const {
// Disable for now to avoid converting ASTC twice.
@@ -381,6 +400,8 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami
VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, true);
Test(extension, ext_subgroup_size_control, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME,
false);
+ Test(extension, nv_device_diagnostic_checkpoints,
+ VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true);
}
if (khr_shader_float16_int8) {
@@ -464,6 +485,7 @@ std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() con
std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) {
static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32,
+ vk::Format::eA8B8G8R8UintPack32,
vk::Format::eA8B8G8R8SnormPack32,
vk::Format::eA8B8G8R8SrgbPack32,
vk::Format::eB5G6R5UnormPack16,
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index a844c52df..72603f9f6 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -39,6 +39,9 @@ public:
vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage,
FormatType format_type) const;
+ /// Reports a device loss.
+ void ReportLoss() const;
+
/// Returns the dispatch loader with direct function pointers of the device.
const vk::DispatchLoaderDynamic& GetDispatchLoader() const {
return dld;
@@ -159,6 +162,11 @@ public:
return ext_shader_viewport_index_layer;
}
+ /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints.
+ bool IsNvDeviceDiagnosticCheckpoints() const {
+ return nv_device_diagnostic_checkpoints;
+ }
+
/// Returns the vendor name reported from Vulkan.
std::string_view GetVendorName() const {
return vendor_name;
@@ -218,6 +226,7 @@ private:
bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
+ bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints.
// Telemetry parameters
std::string vendor_name; ///< Device's driver name.
diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp
new file mode 100644
index 000000000..4bcbef959
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_image.cpp
@@ -0,0 +1,106 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <memory>
+#include <vector>
+
+#include "common/assert.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_image.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+
+namespace Vulkan {
+
+VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler,
+ const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask)
+ : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask},
+ image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} {
+ UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0,
+ "Queue family tracking is not implemented");
+
+ const auto dev = device.GetLogical();
+ image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader());
+
+ const u32 num_ranges = image_num_layers * image_num_levels;
+ barriers.resize(num_ranges);
+ subrange_states.resize(num_ranges, {{}, image_ci.initialLayout});
+}
+
+VKImage::~VKImage() = default;
+
+void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
+ vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
+ vk::ImageLayout new_layout) {
+ if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) {
+ return;
+ }
+
+ std::size_t cursor = 0;
+ for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) {
+ for (u32 level_it = 0; level_it < num_levels; ++level_it, ++cursor) {
+ const u32 layer = base_layer + layer_it;
+ const u32 level = base_level + level_it;
+ auto& state = GetSubrangeState(layer, level);
+ barriers[cursor] = vk::ImageMemoryBarrier(
+ state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED,
+ VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1});
+ state.access = new_access;
+ state.layout = new_layout;
+ }
+ }
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+
+ scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) {
+ // TODO(Rodrigo): Implement a way to use the latest stage across subresources.
+ constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands;
+ cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr,
+ static_cast<u32>(cursor), barriers.data(), dld);
+ });
+}
+
+bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
+ vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept {
+ const bool is_full_range = base_layer == 0 && num_layers == image_num_layers &&
+ base_level == 0 && num_levels == image_num_levels;
+ if (!is_full_range) {
+ state_diverged = true;
+ }
+
+ if (!state_diverged) {
+ auto& state = GetSubrangeState(0, 0);
+ if (state.access != new_access || state.layout != new_layout) {
+ return true;
+ }
+ }
+
+ for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) {
+ for (u32 level_it = 0; level_it < num_levels; ++level_it) {
+ const u32 layer = base_layer + layer_it;
+ const u32 level = base_level + level_it;
+ auto& state = GetSubrangeState(layer, level);
+ if (state.access != new_access || state.layout != new_layout) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void VKImage::CreatePresentView() {
+ // Image type has to be 2D to be presented.
+ const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {},
+ {aspect_mask, 0, 1, 0, 1});
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld);
+}
+
+VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
+ return subrange_states[static_cast<std::size_t>(layer * image_num_levels) +
+ static_cast<std::size_t>(level)];
+}
+
+} // namespace Vulkan \ No newline at end of file
diff --git a/src/video_core/renderer_vulkan/vk_image.h b/src/video_core/renderer_vulkan/vk_image.h
new file mode 100644
index 000000000..b78242512
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_image.h
@@ -0,0 +1,84 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKScheduler;
+
+class VKImage {
+public:
+ explicit VKImage(const VKDevice& device, VKScheduler& scheduler,
+ const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask);
+ ~VKImage();
+
+ /// Records in the passed command buffer an image transition and updates the state of the image.
+ void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
+ vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
+ vk::ImageLayout new_layout);
+
+ /// Returns a view compatible with presentation, the image has to be 2D.
+ vk::ImageView GetPresentView() {
+ if (!present_view) {
+ CreatePresentView();
+ }
+ return *present_view;
+ }
+
+ /// Returns the Vulkan image handler.
+ vk::Image GetHandle() const {
+ return *image;
+ }
+
+ /// Returns the Vulkan format for this image.
+ vk::Format GetFormat() const {
+ return format;
+ }
+
+ /// Returns the Vulkan aspect mask.
+ vk::ImageAspectFlags GetAspectMask() const {
+ return aspect_mask;
+ }
+
+private:
+ struct SubrangeState final {
+ vk::AccessFlags access{}; ///< Current access bits.
+ vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout.
+ };
+
+ bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
+ vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept;
+
+ /// Creates a presentation view.
+ void CreatePresentView();
+
+ /// Returns the subrange state for a layer and layer.
+ SubrangeState& GetSubrangeState(u32 layer, u32 level) noexcept;
+
+ const VKDevice& device; ///< Device handler.
+ VKScheduler& scheduler; ///< Device scheduler.
+
+ const vk::Format format; ///< Vulkan format.
+ const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask.
+ const u32 image_num_layers; ///< Number of layers.
+ const u32 image_num_levels; ///< Number of mipmap levels.
+
+ UniqueImage image; ///< Image handle.
+ UniqueImageView present_view; ///< Image view compatible with presentation.
+
+ std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers.
+ std::vector<SubrangeState> subrange_states; ///< Current subrange state.
+
+ bool state_diverged = false; ///< True when subresources mismatch in layout.
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index 13c46e5b8..525b4bb46 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -72,12 +72,22 @@ VKFence::VKFence(const VKDevice& device, UniqueFence handle)
VKFence::~VKFence() = default;
void VKFence::Wait() {
+ static constexpr u64 timeout = std::numeric_limits<u64>::max();
const auto dev = device.GetLogical();
const auto& dld = device.GetDispatchLoader();
- dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
+ switch (const auto result = dev.waitForFences(1, &*handle, true, timeout, dld)) {
+ case vk::Result::eSuccess:
+ return;
+ case vk::Result::eErrorDeviceLost:
+ device.ReportLoss();
+ [[fallthrough]];
+ default:
+ vk::throwResultException(result, "vk::waitForFences");
+ }
}
void VKFence::Release() {
+ ASSERT(is_owned);
is_owned = false;
}
@@ -133,8 +143,32 @@ void VKFence::Unprotect(VKResource* resource) {
protected_resources.erase(it);
}
+void VKFence::RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept {
+ std::replace(std::begin(protected_resources), std::end(protected_resources), old_resource,
+ new_resource);
+}
+
VKFenceWatch::VKFenceWatch() = default;
+VKFenceWatch::VKFenceWatch(VKFence& initial_fence) {
+ Watch(initial_fence);
+}
+
+VKFenceWatch::VKFenceWatch(VKFenceWatch&& rhs) noexcept {
+ fence = std::exchange(rhs.fence, nullptr);
+ if (fence) {
+ fence->RedirectProtection(&rhs, this);
+ }
+}
+
+VKFenceWatch& VKFenceWatch::operator=(VKFenceWatch&& rhs) noexcept {
+ fence = std::exchange(rhs.fence, nullptr);
+ if (fence) {
+ fence->RedirectProtection(&rhs, this);
+ }
+ return *this;
+}
+
VKFenceWatch::~VKFenceWatch() {
if (fence) {
fence->Unprotect(this);
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
index 08ee86fa6..d4cbc95a5 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -65,6 +65,9 @@ public:
/// Removes protection for a resource.
void Unprotect(VKResource* resource);
+ /// Redirects one protected resource to a new address.
+ void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept;
+
/// Retreives the fence.
operator vk::Fence() const {
return *handle;
@@ -97,8 +100,13 @@ private:
class VKFenceWatch final : public VKResource {
public:
explicit VKFenceWatch();
+ VKFenceWatch(VKFence& initial_fence);
+ VKFenceWatch(VKFenceWatch&&) noexcept;
+ VKFenceWatch(const VKFenceWatch&) = delete;
~VKFenceWatch() override;
+ VKFenceWatch& operator=(VKFenceWatch&&) noexcept;
+
/// Waits for the fence to be released.
void Wait();
@@ -116,6 +124,14 @@ public:
void OnFenceRemoval(VKFence* signaling_fence) override;
+ /**
+ * Do not use it paired with Watch. Use TryWatch instead.
+ * Returns true when the watch is free.
+ */
+ bool IsUsed() const {
+ return fence != nullptr;
+ }
+
private:
VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free.
};
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 6227bc70b..a8baf91de 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -543,7 +543,7 @@ private:
}
for (u32 rt = 0; rt < static_cast<u32>(frag_colors.size()); ++rt) {
- if (!IsRenderTargetUsed(rt)) {
+ if (!specialization.enabled_rendertargets[rt]) {
continue;
}
@@ -1555,40 +1555,48 @@ private:
Expression Texture(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- UNIMPLEMENTED_IF(!meta.aoffi.empty());
const bool can_implicit = stage == ShaderType::Fragment;
const Id sampler = GetTextureSampler(operation);
const Id coords = GetCoordinates(operation, Type::Float);
+ std::vector<Id> operands;
+ spv::ImageOperandsMask mask{};
+ if (meta.bias) {
+ mask = mask | spv::ImageOperandsMask::Bias;
+ operands.push_back(AsFloat(Visit(meta.bias)));
+ }
+
+ if (!can_implicit) {
+ mask = mask | spv::ImageOperandsMask::Lod;
+ operands.push_back(v_float_zero);
+ }
+
+ if (!meta.aoffi.empty()) {
+ mask = mask | spv::ImageOperandsMask::Offset;
+ operands.push_back(GetOffsetCoordinates(operation));
+ }
+
if (meta.depth_compare) {
// Depth sampling
UNIMPLEMENTED_IF(meta.bias);
const Id dref = AsFloat(Visit(meta.depth_compare));
if (can_implicit) {
- return {OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, {}),
- Type::Float};
+ return {
+ OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, mask, operands),
+ Type::Float};
} else {
- return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref,
- spv::ImageOperandsMask::Lod, v_float_zero),
- Type::Float};
+ return {
+ OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
+ Type::Float};
}
}
- std::vector<Id> operands;
- spv::ImageOperandsMask mask{};
- if (meta.bias) {
- mask = mask | spv::ImageOperandsMask::Bias;
- operands.push_back(AsFloat(Visit(meta.bias)));
- }
-
Id texture;
if (can_implicit) {
texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, operands);
} else {
- texture = OpImageSampleExplicitLod(t_float4, sampler, coords,
- mask | spv::ImageOperandsMask::Lod, v_float_zero,
- operands);
+ texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands);
}
return GetTextureElement(operation, texture, Type::Float);
}
@@ -1601,7 +1609,8 @@ private:
const Id lod = AsFloat(Visit(meta.lod));
spv::ImageOperandsMask mask = spv::ImageOperandsMask::Lod;
- std::vector<Id> operands;
+ std::vector<Id> operands{lod};
+
if (!meta.aoffi.empty()) {
mask = mask | spv::ImageOperandsMask::Offset;
operands.push_back(GetOffsetCoordinates(operation));
@@ -1609,11 +1618,10 @@ private:
if (meta.sampler.IsShadow()) {
const Id dref = AsFloat(Visit(meta.depth_compare));
- return {
- OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, lod, operands),
- Type::Float};
+ return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
+ Type::Float};
}
- const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, lod, operands);
+ const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands);
return GetTextureElement(operation, texture, Type::Float);
}
@@ -1722,7 +1730,7 @@ private:
const std::vector grad = {dx, dy};
static constexpr auto mask = spv::ImageOperandsMask::Grad;
- const Id texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, grad);
+ const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, grad);
return GetTextureElement(operation, texture, Type::Float);
}
@@ -1833,7 +1841,7 @@ private:
}
void PreExit() {
- if (stage == ShaderType::Vertex) {
+ if (stage == ShaderType::Vertex && specialization.ndc_minus_one_to_one) {
const u32 position_index = out_indices.position.value();
const Id z_pointer = AccessElement(t_out_float, out_vertex, position_index, 2U);
const Id w_pointer = AccessElement(t_out_float, out_vertex, position_index, 3U);
@@ -1860,12 +1868,18 @@ private:
// rendertargets/components are skipped in the register assignment.
u32 current_reg = 0;
for (u32 rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
+ if (!specialization.enabled_rendertargets[rt]) {
+ // Skip rendertargets that are not enabled
+ continue;
+ }
// TODO(Subv): Figure out how dual-source blending is configured in the Switch.
for (u32 component = 0; component < 4; ++component) {
+ const Id pointer = AccessElement(t_out_float, frag_colors.at(rt), component);
if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
- OpStore(AccessElement(t_out_float, frag_colors.at(rt), component),
- SafeGetRegister(current_reg));
+ OpStore(pointer, SafeGetRegister(current_reg));
++current_reg;
+ } else {
+ OpStore(pointer, component == 3 ? v_float_one : v_float_zero);
}
}
}
@@ -1995,15 +2009,6 @@ private:
return DeclareBuiltIn(builtin, spv::StorageClass::Input, type, std::move(name));
}
- bool IsRenderTargetUsed(u32 rt) const {
- for (u32 component = 0; component < 4; ++component) {
- if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
- return true;
- }
- }
- return false;
- }
-
template <typename... Args>
Id AccessElement(Id pointer_type, Id composite, Args... elements_) {
std::vector<Id> members;
@@ -2552,29 +2557,7 @@ public:
}
Id operator()(const ExprCondCode& expr) {
- const Node cc = decomp.ir.GetConditionCode(expr.cc);
- Id target;
-
- if (const auto pred = std::get_if<PredicateNode>(&*cc)) {
- const auto index = pred->GetIndex();
- switch (index) {
- case Tegra::Shader::Pred::NeverExecute:
- target = decomp.v_false;
- break;
- case Tegra::Shader::Pred::UnusedIndex:
- target = decomp.v_true;
- break;
- default:
- target = decomp.predicates.at(index);
- break;
- }
- } else if (const auto flag = std::get_if<InternalFlagNode>(&*cc)) {
- target = decomp.internal_flags.at(static_cast<u32>(flag->GetFlag()));
- } else {
- UNREACHABLE();
- }
-
- return decomp.OpLoad(decomp.t_bool, target);
+ return decomp.AsBool(decomp.Visit(decomp.ir.GetConditionCode(expr.cc)));
}
Id operator()(const ExprVar& expr) {
@@ -2589,7 +2572,7 @@ public:
const Id target = decomp.Constant(decomp.t_uint, expr.value);
Id gpr = decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr));
gpr = decomp.OpBitcast(decomp.t_uint, gpr);
- return decomp.OpLogicalEqual(decomp.t_uint, gpr, target);
+ return decomp.OpIEqual(decomp.t_bool, gpr, target);
}
Id Visit(const Expr& node) {
@@ -2659,11 +2642,11 @@ public:
const Id loop_label = decomp.OpLabel();
const Id endloop_label = decomp.OpLabel();
const Id loop_start_block = decomp.OpLabel();
- const Id loop_end_block = decomp.OpLabel();
+ const Id loop_continue_block = decomp.OpLabel();
current_loop_exit = endloop_label;
decomp.OpBranch(loop_label);
decomp.AddLabel(loop_label);
- decomp.OpLoopMerge(endloop_label, loop_end_block, spv::LoopControlMask::MaskNone);
+ decomp.OpLoopMerge(endloop_label, loop_continue_block, spv::LoopControlMask::MaskNone);
decomp.OpBranch(loop_start_block);
decomp.AddLabel(loop_start_block);
ASTNode current = ast.nodes.GetFirst();
@@ -2671,6 +2654,8 @@ public:
Visit(current);
current = current->GetNext();
}
+ decomp.OpBranch(loop_continue_block);
+ decomp.AddLabel(loop_continue_block);
ExprDecompiler expr_parser{decomp};
const Id condition = expr_parser.Visit(ast.condition);
decomp.OpBranchConditional(condition, loop_label, endloop_label);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index 2b01321b6..10794be1c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -94,6 +94,7 @@ struct Specialization final {
Maxwell::PrimitiveTopology primitive_topology{};
std::optional<float> point_size{};
std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{};
+ bool ndc_minus_one_to_one{};
// Tessellation specific
struct {
@@ -101,6 +102,9 @@ struct Specialization final {
Maxwell::TessellationSpacing spacing{};
bool clockwise{};
} tessellation;
+
+ // Fragment specific
+ std::bitset<8> enabled_rendertargets;
};
// Old gcc versions don't consider this trivially copyable.
// static_assert(std::is_trivially_copyable_v<Specialization>);
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
new file mode 100644
index 000000000..171d78afc
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -0,0 +1,127 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "common/bit_util.h"
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
+
+namespace Vulkan {
+
+VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence,
+ u64 last_epoch)
+ : buffer{std::move(buffer)}, watch{fence}, last_epoch{last_epoch} {}
+
+VKStagingBufferPool::StagingBuffer::StagingBuffer(StagingBuffer&& rhs) noexcept {
+ buffer = std::move(rhs.buffer);
+ watch = std::move(rhs.watch);
+ last_epoch = rhs.last_epoch;
+}
+
+VKStagingBufferPool::StagingBuffer::~StagingBuffer() = default;
+
+VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator=(
+ StagingBuffer&& rhs) noexcept {
+ buffer = std::move(rhs.buffer);
+ watch = std::move(rhs.watch);
+ last_epoch = rhs.last_epoch;
+ return *this;
+}
+
+VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
+ VKScheduler& scheduler)
+ : device{device}, memory_manager{memory_manager}, scheduler{scheduler},
+ is_device_integrated{device.IsIntegrated()} {}
+
+VKStagingBufferPool::~VKStagingBufferPool() = default;
+
+VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) {
+ if (const auto buffer = TryGetReservedBuffer(size, host_visible)) {
+ return *buffer;
+ }
+ return CreateStagingBuffer(size, host_visible);
+}
+
+void VKStagingBufferPool::TickFrame() {
+ ++epoch;
+ current_delete_level = (current_delete_level + 1) % NumLevels;
+
+ ReleaseCache(true);
+ if (!is_device_integrated) {
+ ReleaseCache(false);
+ }
+}
+
+VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
+ for (auto& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) {
+ if (entry.watch.TryWatch(scheduler.GetFence())) {
+ entry.last_epoch = epoch;
+ return &*entry.buffer;
+ }
+ }
+ return nullptr;
+}
+
+VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
+ const auto usage =
+ vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
+ vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer;
+ const u32 log2 = Common::Log2Ceil64(size);
+ const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0,
+ nullptr);
+ const auto dev = device.GetLogical();
+ auto buffer = std::make_unique<VKBuffer>();
+ buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader());
+ buffer->commit = memory_manager.Commit(*buffer->handle, host_visible);
+
+ auto& entries = GetCache(host_visible)[log2].entries;
+ return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer;
+}
+
+VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
+ return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers;
+}
+
+void VKStagingBufferPool::ReleaseCache(bool host_visible) {
+ auto& cache = GetCache(host_visible);
+ const u64 size = ReleaseLevel(cache, current_delete_level);
+ if (size == 0) {
+ return;
+ }
+}
+
+u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) {
+ static constexpr u64 epochs_to_destroy = 180;
+ static constexpr std::size_t deletions_per_tick = 16;
+
+ auto& staging = cache[log2];
+ auto& entries = staging.entries;
+ const std::size_t old_size = entries.size();
+
+ const auto is_deleteable = [this](const auto& entry) {
+ return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed();
+ };
+ const std::size_t begin_offset = staging.delete_index;
+ const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
+ const auto begin = std::begin(entries) + begin_offset;
+ const auto end = std::begin(entries) + end_offset;
+ entries.erase(std::remove_if(begin, end, is_deleteable), end);
+
+ const std::size_t new_size = entries.size();
+ staging.delete_index += deletions_per_tick;
+ if (staging.delete_index >= new_size) {
+ staging.delete_index = 0;
+ }
+
+ return (1ULL << log2) * (old_size - new_size);
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
new file mode 100644
index 000000000..02310375f
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -0,0 +1,83 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <climits>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "common/common_types.h"
+
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_memory_manager.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKFenceWatch;
+class VKScheduler;
+
+struct VKBuffer final {
+ UniqueBuffer handle;
+ VKMemoryCommit commit;
+};
+
+class VKStagingBufferPool final {
+public:
+ explicit VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
+ VKScheduler& scheduler);
+ ~VKStagingBufferPool();
+
+ VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible);
+
+ void TickFrame();
+
+private:
+ struct StagingBuffer final {
+ explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, u64 last_epoch);
+ StagingBuffer(StagingBuffer&& rhs) noexcept;
+ StagingBuffer(const StagingBuffer&) = delete;
+ ~StagingBuffer();
+
+ StagingBuffer& operator=(StagingBuffer&& rhs) noexcept;
+
+ std::unique_ptr<VKBuffer> buffer;
+ VKFenceWatch watch;
+ u64 last_epoch = 0;
+ };
+
+ struct StagingBuffers final {
+ std::vector<StagingBuffer> entries;
+ std::size_t delete_index = 0;
+ };
+
+ static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT;
+ using StagingBuffersCache = std::array<StagingBuffers, NumLevels>;
+
+ VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible);
+
+ VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible);
+
+ StagingBuffersCache& GetCache(bool host_visible);
+
+ void ReleaseCache(bool host_visible);
+
+ u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2);
+
+ const VKDevice& device;
+ VKMemoryManager& memory_manager;
+ VKScheduler& scheduler;
+ const bool is_device_integrated;
+
+ StagingBuffersCache host_staging_buffers;
+ StagingBuffersCache device_staging_buffers;
+
+ u64 epoch = 0;
+
+ std::size_t current_delete_level = 0;
+};
+
+} // namespace Vulkan