diff options
Diffstat (limited to 'src/video_core')
34 files changed, 2831 insertions, 1132 deletions
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 4cb7339b5..dbb4e597f 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -310,6 +310,11 @@ public: } }; + enum class DepthMode : u32 { + MinusOneToOne = 0, + ZeroToOne = 1, + }; + enum class PrimitiveTopology : u32 { Points = 0x0, Lines = 0x1, @@ -491,6 +496,18 @@ public: INSERT_UNION_PADDING_WORDS(1); }; + enum class TessellationPrimitive : u32 { + Isolines = 0, + Triangles = 1, + Quads = 2, + }; + + enum class TessellationSpacing : u32 { + Equal = 0, + FractionalOdd = 1, + FractionalEven = 2, + }; + struct RenderTargetConfig { u32 address_high; u32 address_low; @@ -628,7 +645,19 @@ public: }; } sync_info; - INSERT_UNION_PADDING_WORDS(0x11E); + INSERT_UNION_PADDING_WORDS(0x15); + + union { + BitField<0, 2, TessellationPrimitive> prim; + BitField<4, 2, TessellationSpacing> spacing; + BitField<8, 1, u32> cw; + BitField<9, 1, u32> connected; + } tess_mode; + + std::array<f32, 4> tess_level_outer; + std::array<f32, 2> tess_level_inner; + + INSERT_UNION_PADDING_WORDS(0x102); u32 tfb_enabled; @@ -647,7 +676,7 @@ public: u32 count; } vertex_buffer; - INSERT_UNION_PADDING_WORDS(1); + DepthMode depth_mode; float clear_color[4]; float clear_depth; @@ -662,7 +691,9 @@ public: u32 polygon_offset_line_enable; u32 polygon_offset_fill_enable; - INSERT_UNION_PADDING_WORDS(0xD); + u32 patch_vertices; + + INSERT_UNION_PADDING_WORDS(0xC); std::array<ScissorTest, NumViewports> scissor_test; @@ -1386,17 +1417,22 @@ ASSERT_REG_POSITION(upload, 0x60); ASSERT_REG_POSITION(exec_upload, 0x6C); ASSERT_REG_POSITION(data_upload, 0x6D); ASSERT_REG_POSITION(sync_info, 0xB2); +ASSERT_REG_POSITION(tess_mode, 0xC8); +ASSERT_REG_POSITION(tess_level_outer, 0xC9); +ASSERT_REG_POSITION(tess_level_inner, 0xCD); ASSERT_REG_POSITION(tfb_enabled, 0x1D1); ASSERT_REG_POSITION(rt, 0x200); ASSERT_REG_POSITION(viewport_transform, 0x280); ASSERT_REG_POSITION(viewports, 0x300); ASSERT_REG_POSITION(vertex_buffer, 0x35D); +ASSERT_REG_POSITION(depth_mode, 0x35F); ASSERT_REG_POSITION(clear_color[0], 0x360); ASSERT_REG_POSITION(clear_depth, 0x364); ASSERT_REG_POSITION(clear_stencil, 0x368); ASSERT_REG_POSITION(polygon_offset_point_enable, 0x370); ASSERT_REG_POSITION(polygon_offset_line_enable, 0x371); ASSERT_REG_POSITION(polygon_offset_fill_enable, 0x372); +ASSERT_REG_POSITION(patch_vertices, 0x373); ASSERT_REG_POSITION(scissor_test, 0x380); ASSERT_REG_POSITION(stencil_back_func_ref, 0x3D5); ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D6); diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 9fafed4a2..d6a2cc8b8 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -98,10 +98,11 @@ union Attribute { BitField<20, 10, u64> immediate; BitField<22, 2, u64> element; BitField<24, 6, Index> index; + BitField<31, 1, u64> patch; BitField<47, 3, AttributeSize> size; bool IsPhysical() const { - return element == 0 && static_cast<u64>(index.Value()) == 0; + return patch == 0 && element == 0 && static_cast<u64>(index.Value()) == 0; } } fmt20; @@ -383,6 +384,15 @@ enum class IsberdMode : u64 { enum class IsberdShift : u64 { None = 0, U16 = 1, B32 = 2 }; +enum class MembarType : u64 { + CTA = 0, + GL = 1, + SYS = 2, + VC = 3, +}; + +enum class MembarUnknown : u64 { Default = 0, IVALLD = 1, IVALLT = 2, IVALLTD = 3 }; + enum class HalfType : u64 { H0_H1 = 0, F32 = 1, @@ -800,6 +810,12 @@ union Instruction { } popc; union { + BitField<41, 1, u64> sh; + BitField<40, 1, u64> invert; + BitField<48, 1, u64> is_signed; + } flo; + + union { BitField<39, 3, u64> pred; BitField<42, 1, u64> neg_pred; } sel; @@ -1276,6 +1292,7 @@ union Instruction { BitField<50, 1, u64> dc_flag; BitField<51, 1, u64> aoffi_flag; BitField<52, 2, u64> component; + BitField<55, 1, u64> fp16_flag; bool UsesMiscMode(TextureMiscMode mode) const { switch (mode) { @@ -1440,6 +1457,26 @@ union Instruction { } tlds; union { + BitField<28, 1, u64> is_array; + BitField<29, 2, TextureType> texture_type; + BitField<35, 1, u64> aoffi_flag; + BitField<49, 1, u64> nodep_flag; + + bool UsesMiscMode(TextureMiscMode mode) const { + switch (mode) { + case TextureMiscMode::AOFFI: + return aoffi_flag != 0; + case TextureMiscMode::NODEP: + return nodep_flag != 0; + default: + break; + } + return false; + } + + } txd; + + union { BitField<24, 2, StoreCacheManagement> cache_management; BitField<33, 3, ImageType> image_type; BitField<49, 2, OutOfBoundsStore> out_of_bounds_store; @@ -1519,6 +1556,11 @@ union Instruction { } isberd; union { + BitField<8, 2, MembarType> type; + BitField<0, 2, MembarUnknown> unknown; + } membar; + + union { BitField<48, 1, u64> signed_a; BitField<38, 1, u64> is_byte_chunk_a; BitField<36, 2, VideoType> type_a; @@ -1632,6 +1674,8 @@ public: TLD4S, // Texture Load 4 with scalar / non - vec4 source / destinations TMML_B, // Texture Mip Map Level TMML, // Texture Mip Map Level + TXD, // Texture Gradient/Load with Derivates + TXD_B, // Texture Gradient/Load with Derivates Bindless SUST, // Surface Store SULD, // Surface Load SUATOM, // Surface Atomic Operation @@ -1640,6 +1684,7 @@ public: IPA, OUT_R, // Emit vertex/primitive ISBERD, + MEMBAR, VMAD, VSETP, FFMA_IMM, // Fused Multiply and Add @@ -1664,6 +1709,9 @@ public: ISCADD_C, // Scale and Add ISCADD_R, ISCADD_IMM, + FLO_R, + FLO_C, + FLO_IMM, LEA_R1, LEA_R2, LEA_RZ, @@ -1727,6 +1775,10 @@ public: SHR_C, SHR_R, SHR_IMM, + SHF_RIGHT_R, + SHF_RIGHT_IMM, + SHF_LEFT_R, + SHF_LEFT_IMM, FMNMX_C, FMNMX_R, FMNMX_IMM, @@ -1894,7 +1946,7 @@ private: INST("111000100100----", Id::BRA, Type::Flow, "BRA"), INST("111000100101----", Id::BRX, Type::Flow, "BRX"), INST("1111000011111---", Id::SYNC, Type::Flow, "SYNC"), - INST("111000110100---", Id::BRK, Type::Flow, "BRK"), + INST("111000110100----", Id::BRK, Type::Flow, "BRK"), INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"), INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"), INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"), @@ -1921,9 +1973,11 @@ private: INST("1101-01---------", Id::TLDS, Type::Texture, "TLDS"), INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"), INST("1101111011111---", Id::TLD4_B, Type::Texture, "TLD4_B"), - INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"), + INST("11011111--00----", Id::TLD4S, Type::Texture, "TLD4S"), INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), + INST("11011110011110--", Id::TXD_B, Type::Texture, "TXD_B"), + INST("11011110001110--", Id::TXD, Type::Texture, "TXD"), INST("11101011001-----", Id::SUST, Type::Image, "SUST"), INST("11101011000-----", Id::SULD, Type::Image, "SULD"), INST("1110101000------", Id::SUATOM, Type::Image, "SUATOM_D"), @@ -1931,6 +1985,7 @@ private: INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"), + INST("1110111110011---", Id::MEMBAR, Type::Trivial, "MEMBAR"), INST("01011111--------", Id::VMAD, Type::Video, "VMAD"), INST("0101000011110---", Id::VSETP, Type::Video, "VSETP"), INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"), @@ -1965,6 +2020,9 @@ private: INST("010110110100----", Id::ICMP_R, Type::ArithmeticInteger, "ICMP_R"), INST("010010110100----", Id::ICMP_CR, Type::ArithmeticInteger, "ICMP_CR"), INST("0011011-0100----", Id::ICMP_IMM, Type::ArithmeticInteger, "ICMP_IMM"), + INST("0101110000110---", Id::FLO_R, Type::ArithmeticInteger, "FLO_R"), + INST("0100110000110---", Id::FLO_C, Type::ArithmeticInteger, "FLO_C"), + INST("0011100-00110---", Id::FLO_IMM, Type::ArithmeticInteger, "FLO_IMM"), INST("0101101111011---", Id::LEA_R2, Type::ArithmeticInteger, "LEA_R2"), INST("0101101111010---", Id::LEA_R1, Type::ArithmeticInteger, "LEA_R1"), INST("001101101101----", Id::LEA_IMM, Type::ArithmeticInteger, "LEA_IMM"), @@ -2022,6 +2080,10 @@ private: INST("0100110000101---", Id::SHR_C, Type::Shift, "SHR_C"), INST("0101110000101---", Id::SHR_R, Type::Shift, "SHR_R"), INST("0011100-00101---", Id::SHR_IMM, Type::Shift, "SHR_IMM"), + INST("0101110011111---", Id::SHF_RIGHT_R, Type::Shift, "SHF_RIGHT_R"), + INST("0011100-11111---", Id::SHF_RIGHT_IMM, Type::Shift, "SHF_RIGHT_IMM"), + INST("0101101111111---", Id::SHF_LEFT_R, Type::Shift, "SHF_LEFT_R"), + INST("0011011-11111---", Id::SHF_LEFT_IMM, Type::Shift, "SHF_LEFT_IMM"), INST("0100110011100---", Id::I2I_C, Type::Conversion, "I2I_C"), INST("0101110011100---", Id::I2I_R, Type::Conversion, "I2I_R"), INST("0011101-11100---", Id::I2I_IMM, Type::Conversion, "I2I_IMM"), diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index fc6ecb899..d01db97da 100644 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp @@ -5,6 +5,7 @@ #include <mutex> #include <boost/icl/interval_map.hpp> +#include <boost/range/iterator_range.hpp> #include "common/assert.h" #include "common/common_types.h" diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index 413d8546b..1a2e2a9f7 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -5,6 +5,7 @@ #include <algorithm> #include <array> #include <cstddef> +#include <cstring> #include <optional> #include <vector> @@ -134,11 +135,13 @@ std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindin Device::Device() : base_bindings{BuildBaseBindings()} { const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR)); + const auto renderer = reinterpret_cast<const char*>(glGetString(GL_RENDERER)); const std::vector extensions = GetExtensions(); const bool is_nvidia = vendor == "NVIDIA Corporation"; const bool is_amd = vendor == "ATI Technologies Inc."; const bool is_intel = vendor == "Intel"; + const bool is_intel_proprietary = is_intel && std::strstr(renderer, "Mesa") == nullptr; uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT); shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT); @@ -152,7 +155,7 @@ Device::Device() : base_bindings{BuildBaseBindings()} { has_variable_aoffi = TestVariableAoffi(); has_component_indexing_bug = is_amd; has_precise_bug = TestPreciseBug(); - has_broken_compute = is_intel; + has_broken_compute = is_intel_proprietary; has_fast_buffer_sub_data = is_nvidia; LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi); diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp index a5d69d78d..874ed3c6e 100644 --- a/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp @@ -3,9 +3,12 @@ // Refer to the license.txt file included. #include <tuple> +#include <unordered_map> +#include <utility> -#include "common/cityhash.h" -#include "common/scope_exit.h" +#include <glad/glad.h> + +#include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_framebuffer_cache.h" #include "video_core/renderer_opengl/gl_state.h" @@ -13,6 +16,7 @@ namespace OpenGL { using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using VideoCore::Surface::SurfaceType; FramebufferCacheOpenGL::FramebufferCacheOpenGL() = default; @@ -35,36 +39,49 @@ OGLFramebuffer FramebufferCacheOpenGL::CreateFramebuffer(const FramebufferCacheK local_state.draw.draw_framebuffer = framebuffer.handle; local_state.ApplyFramebufferState(); + if (key.zeta) { + const bool stencil = key.zeta->GetSurfaceParams().type == SurfaceType::DepthStencil; + const GLenum attach_target = stencil ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT; + key.zeta->Attach(attach_target, GL_DRAW_FRAMEBUFFER); + } + + std::size_t num_buffers = 0; + std::array<GLenum, Maxwell::NumRenderTargets> targets; + for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) { - if (key.colors[index]) { - key.colors[index]->Attach(GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index), - GL_DRAW_FRAMEBUFFER); + if (!key.colors[index]) { + targets[index] = GL_NONE; + continue; } + const GLenum attach_target = GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index); + key.colors[index]->Attach(attach_target, GL_DRAW_FRAMEBUFFER); + + const u32 attachment = (key.color_attachments >> (BitsPerAttachment * index)) & 0b1111; + targets[index] = GL_COLOR_ATTACHMENT0 + attachment; + num_buffers = index + 1; } - if (key.colors_count) { - glDrawBuffers(key.colors_count, key.color_attachments.data()); + + if (num_buffers > 0) { + glDrawBuffers(static_cast<GLsizei>(num_buffers), std::data(targets)); } else { glDrawBuffer(GL_NONE); } - if (key.zeta) { - key.zeta->Attach(key.stencil_enable ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT, - GL_DRAW_FRAMEBUFFER); - } - return framebuffer; } -std::size_t FramebufferCacheKey::Hash() const { - static_assert(sizeof(*this) % sizeof(u64) == 0, "Unaligned struct"); - return static_cast<std::size_t>( - Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this))); +std::size_t FramebufferCacheKey::Hash() const noexcept { + std::size_t hash = std::hash<View>{}(zeta); + for (const auto& color : colors) { + hash ^= std::hash<View>{}(color); + } + hash ^= static_cast<std::size_t>(color_attachments) << 16; + return hash; } -bool FramebufferCacheKey::operator==(const FramebufferCacheKey& rhs) const { - return std::tie(stencil_enable, colors_count, color_attachments, colors, zeta) == - std::tie(rhs.stencil_enable, rhs.colors_count, rhs.color_attachments, rhs.colors, - rhs.zeta); +bool FramebufferCacheKey::operator==(const FramebufferCacheKey& rhs) const noexcept { + return std::tie(colors, zeta, color_attachments) == + std::tie(rhs.colors, rhs.zeta, rhs.color_attachments); } } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.h b/src/video_core/renderer_opengl/gl_framebuffer_cache.h index 424344c48..02ec80ae9 100644 --- a/src/video_core/renderer_opengl/gl_framebuffer_cache.h +++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.h @@ -18,21 +18,24 @@ namespace OpenGL { -struct alignas(sizeof(u64)) FramebufferCacheKey { - bool stencil_enable = false; - u16 colors_count = 0; +constexpr std::size_t BitsPerAttachment = 4; - std::array<GLenum, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_attachments{}; - std::array<View, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors; +struct FramebufferCacheKey { View zeta; + std::array<View, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors; + u32 color_attachments = 0; - std::size_t Hash() const; + std::size_t Hash() const noexcept; - bool operator==(const FramebufferCacheKey& rhs) const; + bool operator==(const FramebufferCacheKey& rhs) const noexcept; - bool operator!=(const FramebufferCacheKey& rhs) const { + bool operator!=(const FramebufferCacheKey& rhs) const noexcept { return !operator==(rhs); } + + void SetAttachment(std::size_t index, u32 attachment) { + color_attachments |= attachment << (BitsPerAttachment * index); + } }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index a568a4343..f20967d85 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -93,7 +93,6 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind shader_program_manager = std::make_unique<GLShader::ProgramManager>(); state.draw.shader_program = 0; state.Apply(); - clear_framebuffer.Create(); LOG_DEBUG(Render_OpenGL, "Sync fixed function OpenGL state here"); CheckExtensions(); @@ -278,6 +277,14 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { continue; } + // Currently this stages are not supported in the OpenGL backend. + // Todo(Blinkhawk): Port tesselation shaders from Vulkan to OpenGL + if (program == Maxwell::ShaderProgram::TesselationControl) { + continue; + } else if (program == Maxwell::ShaderProgram::TesselationEval) { + continue; + } + Shader shader{shader_cache.GetStageProgram(program)}; // Stage indices are 0 - 5 @@ -373,78 +380,58 @@ void RasterizerOpenGL::ConfigureFramebuffers() { UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); // Bind the framebuffer surfaces - FramebufferCacheKey fbkey; - for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) { + FramebufferCacheKey key; + const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); + for (std::size_t index = 0; index < colors_count; ++index) { View color_surface{texture_cache.GetColorBufferSurface(index, true)}; - - if (color_surface) { - // Assume that a surface will be written to if it is used as a framebuffer, even - // if the shader doesn't actually write to it. - texture_cache.MarkColorBufferInUse(index); + if (!color_surface) { + continue; } + // Assume that a surface will be written to if it is used as a framebuffer, even + // if the shader doesn't actually write to it. + texture_cache.MarkColorBufferInUse(index); - fbkey.color_attachments[index] = GL_COLOR_ATTACHMENT0 + regs.rt_control.GetMap(index); - fbkey.colors[index] = std::move(color_surface); + key.SetAttachment(index, regs.rt_control.GetMap(index)); + key.colors[index] = std::move(color_surface); } - fbkey.colors_count = static_cast<u16>(regs.rt_control.count); if (depth_surface) { // Assume that a surface will be written to if it is used as a framebuffer, even if // the shader doesn't actually write to it. texture_cache.MarkDepthBufferInUse(); - - fbkey.stencil_enable = depth_surface->GetSurfaceParams().type == SurfaceType::DepthStencil; - fbkey.zeta = std::move(depth_surface); + key.zeta = std::move(depth_surface); } texture_cache.GuardRenderTargets(false); - state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(fbkey); + state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(key); SyncViewport(state); } void RasterizerOpenGL::ConfigureClearFramebuffer(OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool using_stencil_fb) { + using VideoCore::Surface::SurfaceType; + auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; texture_cache.GuardRenderTargets(true); - View color_surface{}; + View color_surface; if (using_color_fb) { color_surface = texture_cache.GetColorBufferSurface(regs.clear_buffers.RT, false); } - View depth_surface{}; + View depth_surface; if (using_depth_fb || using_stencil_fb) { depth_surface = texture_cache.GetDepthBufferSurface(false); } texture_cache.GuardRenderTargets(false); - current_state.draw.draw_framebuffer = clear_framebuffer.handle; - current_state.ApplyFramebufferState(); - - if (color_surface) { - color_surface->Attach(GL_COLOR_ATTACHMENT0, GL_DRAW_FRAMEBUFFER); - } else { - glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); - } + FramebufferCacheKey key; + key.colors[0] = color_surface; + key.zeta = depth_surface; - if (depth_surface) { - const auto& params = depth_surface->GetSurfaceParams(); - switch (params.type) { - case VideoCore::Surface::SurfaceType::Depth: - depth_surface->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER); - glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); - break; - case VideoCore::Surface::SurfaceType::DepthStencil: - depth_surface->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_DRAW_FRAMEBUFFER); - break; - default: - UNIMPLEMENTED(); - } - } else { - glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, - 0); - } + current_state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(key); + current_state.ApplyFramebufferState(); } void RasterizerOpenGL::Clear() { @@ -1049,6 +1036,10 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { flip_y = !flip_y; } state.clip_control.origin = flip_y ? GL_UPPER_LEFT : GL_LOWER_LEFT; + state.clip_control.depth_mode = + regs.depth_mode == Tegra::Engines::Maxwell3D::Regs::DepthMode::ZeroToOne + ? GL_ZERO_TO_ONE + : GL_NEGATIVE_ONE_TO_ONE; } void RasterizerOpenGL::SyncClipEnabled( diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 0e47d71df..04c1ca551 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -223,8 +223,6 @@ private: enum class AccelDraw { Disabled, Arrays, Indexed }; AccelDraw accelerate_draw = AccelDraw::Disabled; - - OGLFramebuffer clear_framebuffer; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 370bdf052..270a9dc2b 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -281,11 +281,11 @@ CachedProgram BuildShader(const Device& device, u64 unique_identifier, ShaderTyp if (variant.shared_memory_size > 0) { // TODO(Rodrigo): We should divide by four here, but having a larger shared memory pool // avoids out of bound stores. Find out why shared memory size is being invalid. - source += fmt::format("shared uint smem[{}];", variant.shared_memory_size); + source += fmt::format("shared uint smem[{}];\n", variant.shared_memory_size); } if (variant.local_memory_size > 0) { - source += fmt::format("#define LOCAL_MEMORY_SIZE {}", + source += fmt::format("#define LOCAL_MEMORY_SIZE {}\n", Common::AlignUp(variant.local_memory_size, 4) / 4); } } diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 0e644564a..d1ae4be6d 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -49,8 +49,9 @@ class ExprDecompiler; enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; struct TextureAoffi {}; +struct TextureDerivates {}; using TextureArgument = std::pair<Type, Node>; -using TextureIR = std::variant<TextureAoffi, TextureArgument>; +using TextureIR = std::variant<TextureAoffi, TextureDerivates, TextureArgument>; constexpr u32 MAX_CONSTBUFFER_ELEMENTS = static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float)); @@ -1075,7 +1076,7 @@ private: } std::string GenerateTexture(Operation operation, const std::string& function_suffix, - const std::vector<TextureIR>& extras) { + const std::vector<TextureIR>& extras, bool sepparate_dc = false) { constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"}; const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); @@ -1090,7 +1091,8 @@ private: expr += "Offset"; } expr += '(' + GetSampler(meta->sampler) + ", "; - expr += coord_constructors.at(count + (has_array ? 1 : 0) + (has_shadow ? 1 : 0) - 1); + expr += coord_constructors.at(count + (has_array ? 1 : 0) + + (has_shadow && !sepparate_dc ? 1 : 0) - 1); expr += '('; for (std::size_t i = 0; i < count; ++i) { expr += Visit(operation[i]).AsFloat(); @@ -1103,15 +1105,22 @@ private: expr += ", float(" + Visit(meta->array).AsInt() + ')'; } if (has_shadow) { - expr += ", " + Visit(meta->depth_compare).AsFloat(); + if (sepparate_dc) { + expr += "), " + Visit(meta->depth_compare).AsFloat(); + } else { + expr += ", " + Visit(meta->depth_compare).AsFloat() + ')'; + } + } else { + expr += ')'; } - expr += ')'; for (const auto& variant : extras) { if (const auto argument = std::get_if<TextureArgument>(&variant)) { expr += GenerateTextureArgument(*argument); } else if (std::holds_alternative<TextureAoffi>(variant)) { expr += GenerateTextureAoffi(meta->aoffi); + } else if (std::holds_alternative<TextureDerivates>(variant)) { + expr += GenerateTextureDerivates(meta->derivates); } else { UNREACHABLE(); } @@ -1181,6 +1190,36 @@ private: return expr; } + std::string GenerateTextureDerivates(const std::vector<Node>& derivates) { + if (derivates.empty()) { + return {}; + } + constexpr std::array coord_constructors = {"float", "vec2", "vec3"}; + std::string expr = ", "; + const std::size_t components = derivates.size() / 2; + std::string dx = coord_constructors.at(components - 1); + std::string dy = coord_constructors.at(components - 1); + dx += '('; + dy += '('; + + for (std::size_t index = 0; index < components; ++index) { + const auto operand_x{derivates.at(index * 2)}; + const auto operand_y{derivates.at(index * 2 + 1)}; + dx += Visit(operand_x).AsFloat(); + dy += Visit(operand_y).AsFloat(); + + if (index + 1 < components) { + dx += ", "; + dy += ", "; + } + } + dx += ')'; + dy += ')'; + expr += dx + ", " + dy; + + return expr; + } + std::string BuildIntegerCoordinates(Operation operation) { constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; const std::size_t coords_count{operation.GetOperandsCount()}; @@ -1450,6 +1489,11 @@ private: return GenerateUnary(operation, "bitCount", type, type); } + template <Type type> + Expression BitMSB(Operation operation) { + return GenerateUnary(operation, "findMSB", type, type); + } + Expression HNegate(Operation operation) { const auto GetNegate = [&](std::size_t index) { return VisitOperand(operation, index).AsBool() + " ? -1 : 1"; @@ -1668,10 +1712,17 @@ private: ASSERT(meta); const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; - return {GenerateTexture(operation, "Gather", - {TextureAoffi{}, TextureArgument{type, meta->component}}) + - GetSwizzle(meta->element), - Type::Float}; + if (meta->sampler.IsShadow()) { + return {GenerateTexture(operation, "Gather", {TextureAoffi{}}, true) + + GetSwizzle(meta->element), + Type::Float}; + } else { + return {GenerateTexture(operation, "Gather", + {TextureAoffi{}, TextureArgument{type, meta->component}}, + false) + + GetSwizzle(meta->element), + Type::Float}; + } } Expression TextureQueryDimensions(Operation operation) { @@ -1738,6 +1789,14 @@ private: return {std::move(expr), Type::Float}; } + Expression TextureGradient(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); + + std::string expr = GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureAoffi{}}); + return {std::move(expr) + GetSwizzle(meta->element), Type::Float}; + } + Expression ImageLoad(Operation operation) { if (!device.HasImageLoadFormatted()) { LOG_ERROR(Render_OpenGL, @@ -1869,6 +1928,10 @@ private: return {}; } + Expression InvocationId(Operation operation) { + return {"gl_InvocationID", Type::Int}; + } + Expression YNegate(Operation operation) { return {"y_direction", Type::Float}; } @@ -1942,6 +2005,11 @@ private: return {fmt::format("readInvocationARB({}, {})", value, index), Type::Float}; } + Expression MemoryBarrierGL(Operation) { + code.AddLine("memoryBarrier();"); + return {}; + } + struct Func final { Func() = delete; ~Func() = delete; @@ -2003,6 +2071,7 @@ private: &GLSLDecompiler::BitfieldInsert<Type::Int>, &GLSLDecompiler::BitfieldExtract<Type::Int>, &GLSLDecompiler::BitCount<Type::Int>, + &GLSLDecompiler::BitMSB<Type::Int>, &GLSLDecompiler::Add<Type::Uint>, &GLSLDecompiler::Mul<Type::Uint>, @@ -2021,6 +2090,7 @@ private: &GLSLDecompiler::BitfieldInsert<Type::Uint>, &GLSLDecompiler::BitfieldExtract<Type::Uint>, &GLSLDecompiler::BitCount<Type::Uint>, + &GLSLDecompiler::BitMSB<Type::Uint>, &GLSLDecompiler::Add<Type::HalfFloat>, &GLSLDecompiler::Mul<Type::HalfFloat>, @@ -2084,6 +2154,7 @@ private: &GLSLDecompiler::TextureQueryDimensions, &GLSLDecompiler::TextureQueryLod, &GLSLDecompiler::TexelFetch, + &GLSLDecompiler::TextureGradient, &GLSLDecompiler::ImageLoad, &GLSLDecompiler::ImageStore, @@ -2104,6 +2175,7 @@ private: &GLSLDecompiler::EmitVertex, &GLSLDecompiler::EndPrimitive, + &GLSLDecompiler::InvocationId, &GLSLDecompiler::YNegate, &GLSLDecompiler::LocalInvocationId<0>, &GLSLDecompiler::LocalInvocationId<1>, @@ -2119,6 +2191,8 @@ private: &GLSLDecompiler::ThreadId, &GLSLDecompiler::ShuffleIndexed, + + &GLSLDecompiler::MemoryBarrierGL, }; static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index 39b3986d3..ccc1e050a 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -411,8 +411,9 @@ void OpenGLState::ApplyAlphaTest() { } void OpenGLState::ApplyClipControl() { - if (UpdateValue(cur_state.clip_control.origin, clip_control.origin)) { - glClipControl(clip_control.origin, GL_NEGATIVE_ONE_TO_ONE); + if (UpdateTie(std::tie(cur_state.clip_control.origin, cur_state.clip_control.depth_mode), + std::tie(clip_control.origin, clip_control.depth_mode))) { + glClipControl(clip_control.origin, clip_control.depth_mode); } } diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index e53c2c5f2..0b5895084 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h @@ -150,6 +150,7 @@ public: struct { GLenum origin = GL_LOWER_LEFT; + GLenum depth_mode = GL_NEGATIVE_ONE_TO_ONE; } clip_control; OpenGLState(); diff --git a/src/video_core/renderer_vulkan/declarations.h b/src/video_core/renderer_vulkan/declarations.h index ba25b5bc7..323bf6b39 100644 --- a/src/video_core/renderer_vulkan/declarations.h +++ b/src/video_core/renderer_vulkan/declarations.h @@ -4,6 +4,17 @@ #pragma once +namespace vk { +class DispatchLoaderDynamic; +} + +namespace Vulkan { +constexpr vk::DispatchLoaderDynamic* dont_use_me_dld = nullptr; +} + +#define VULKAN_HPP_DEFAULT_DISPATCHER (*::Vulkan::dont_use_me_dld) +#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0 +#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1 #include <vulkan/vulkan.hpp> namespace Vulkan { @@ -41,5 +52,7 @@ using UniqueSemaphore = UniqueHandle<vk::Semaphore>; using UniqueShaderModule = UniqueHandle<vk::ShaderModule>; using UniqueSwapchainKHR = UniqueHandle<vk::SwapchainKHR>; using UniqueValidationCacheEXT = UniqueHandle<vk::ValidationCacheEXT>; +using UniqueDebugReportCallbackEXT = UniqueHandle<vk::DebugReportCallbackEXT>; +using UniqueDebugUtilsMessengerEXT = UniqueHandle<vk::DebugUtilsMessengerEXT>; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 7f0eb6b74..000e3616d 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -44,7 +44,8 @@ vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filt return {}; } -vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode) { +vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode, + Tegra::Texture::TextureFilter filter) { switch (wrap_mode) { case Tegra::Texture::WrapMode::Wrap: return vk::SamplerAddressMode::eRepeat; @@ -55,10 +56,15 @@ vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode) { case Tegra::Texture::WrapMode::Border: return vk::SamplerAddressMode::eClampToBorder; case Tegra::Texture::WrapMode::Clamp: - // TODO(Rodrigo): GL_CLAMP was removed as of OpenGL 3.1, to implement GL_CLAMP, we can use - // eClampToBorder to get the border color of the texture, and then sample the edge to - // manually mix them. However the shader part of this is not yet implemented. - return vk::SamplerAddressMode::eClampToBorder; + // TODO(Rodrigo): Emulate GL_CLAMP properly + switch (filter) { + case Tegra::Texture::TextureFilter::Nearest: + return vk::SamplerAddressMode::eClampToEdge; + case Tegra::Texture::TextureFilter::Linear: + return vk::SamplerAddressMode::eClampToBorder; + } + UNREACHABLE(); + return vk::SamplerAddressMode::eClampToEdge; case Tegra::Texture::WrapMode::MirrorOnceClampToEdge: return vk::SamplerAddressMode::eMirrorClampToEdge; case Tegra::Texture::WrapMode::MirrorOnceBorder: @@ -96,106 +102,140 @@ vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compar } // namespace Sampler +namespace { + +enum : u32 { Attachable = 1, Storage = 2 }; + struct FormatTuple { vk::Format format; ///< Vulkan format - bool attachable; ///< True when this format can be used as an attachment -}; - -static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{ - {vk::Format::eA8B8G8R8UnormPack32, true}, // ABGR8U - {vk::Format::eUndefined, false}, // ABGR8S - {vk::Format::eUndefined, false}, // ABGR8UI - {vk::Format::eB5G6R5UnormPack16, false}, // B5G6R5U - {vk::Format::eA2B10G10R10UnormPack32, true}, // A2B10G10R10U - {vk::Format::eUndefined, false}, // A1B5G5R5U - {vk::Format::eR8Unorm, true}, // R8U - {vk::Format::eUndefined, false}, // R8UI - {vk::Format::eUndefined, false}, // RGBA16F - {vk::Format::eUndefined, false}, // RGBA16U - {vk::Format::eUndefined, false}, // RGBA16UI - {vk::Format::eUndefined, false}, // R11FG11FB10F - {vk::Format::eUndefined, false}, // RGBA32UI - {vk::Format::eBc1RgbaUnormBlock, false}, // DXT1 - {vk::Format::eBc2UnormBlock, false}, // DXT23 - {vk::Format::eBc3UnormBlock, false}, // DXT45 - {vk::Format::eBc4UnormBlock, false}, // DXN1 - {vk::Format::eUndefined, false}, // DXN2UNORM - {vk::Format::eUndefined, false}, // DXN2SNORM - {vk::Format::eUndefined, false}, // BC7U - {vk::Format::eUndefined, false}, // BC6H_UF16 - {vk::Format::eUndefined, false}, // BC6H_SF16 - {vk::Format::eUndefined, false}, // ASTC_2D_4X4 - {vk::Format::eUndefined, false}, // BGRA8 - {vk::Format::eUndefined, false}, // RGBA32F - {vk::Format::eUndefined, false}, // RG32F - {vk::Format::eUndefined, false}, // R32F - {vk::Format::eUndefined, false}, // R16F - {vk::Format::eUndefined, false}, // R16U - {vk::Format::eUndefined, false}, // R16S - {vk::Format::eUndefined, false}, // R16UI - {vk::Format::eUndefined, false}, // R16I - {vk::Format::eUndefined, false}, // RG16 - {vk::Format::eUndefined, false}, // RG16F - {vk::Format::eUndefined, false}, // RG16UI - {vk::Format::eUndefined, false}, // RG16I - {vk::Format::eUndefined, false}, // RG16S - {vk::Format::eUndefined, false}, // RGB32F - {vk::Format::eA8B8G8R8SrgbPack32, true}, // RGBA8_SRGB - {vk::Format::eUndefined, false}, // RG8U - {vk::Format::eUndefined, false}, // RG8S - {vk::Format::eUndefined, false}, // RG32UI - {vk::Format::eUndefined, false}, // RGBX16F - {vk::Format::eUndefined, false}, // R32UI - {vk::Format::eUndefined, false}, // ASTC_2D_8X8 - {vk::Format::eUndefined, false}, // ASTC_2D_8X5 - {vk::Format::eUndefined, false}, // ASTC_2D_5X4 - - // Compressed sRGB formats - {vk::Format::eUndefined, false}, // BGRA8_SRGB - {vk::Format::eUndefined, false}, // DXT1_SRGB - {vk::Format::eUndefined, false}, // DXT23_SRGB - {vk::Format::eUndefined, false}, // DXT45_SRGB - {vk::Format::eUndefined, false}, // BC7U_SRGB - {vk::Format::eUndefined, false}, // ASTC_2D_4X4_SRGB - {vk::Format::eUndefined, false}, // ASTC_2D_8X8_SRGB - {vk::Format::eUndefined, false}, // ASTC_2D_8X5_SRGB - {vk::Format::eUndefined, false}, // ASTC_2D_5X4_SRGB - {vk::Format::eUndefined, false}, // ASTC_2D_5X5 - {vk::Format::eUndefined, false}, // ASTC_2D_5X5_SRGB - {vk::Format::eUndefined, false}, // ASTC_2D_10X8 - {vk::Format::eUndefined, false}, // ASTC_2D_10X8_SRGB + int usage; ///< Describes image format usage +} constexpr tex_format_tuples[] = { + {vk::Format::eA8B8G8R8UnormPack32, Attachable | Storage}, // ABGR8U + {vk::Format::eA8B8G8R8SnormPack32, Attachable | Storage}, // ABGR8S + {vk::Format::eA8B8G8R8UintPack32, Attachable | Storage}, // ABGR8UI + {vk::Format::eB5G6R5UnormPack16, {}}, // B5G6R5U + {vk::Format::eA2B10G10R10UnormPack32, Attachable | Storage}, // A2B10G10R10U + {vk::Format::eA1R5G5B5UnormPack16, Attachable | Storage}, // A1B5G5R5U (flipped with swizzle) + {vk::Format::eR8Unorm, Attachable | Storage}, // R8U + {vk::Format::eR8Uint, Attachable | Storage}, // R8UI + {vk::Format::eR16G16B16A16Sfloat, Attachable | Storage}, // RGBA16F + {vk::Format::eR16G16B16A16Unorm, Attachable | Storage}, // RGBA16U + {vk::Format::eR16G16B16A16Uint, Attachable | Storage}, // RGBA16UI + {vk::Format::eB10G11R11UfloatPack32, Attachable | Storage}, // R11FG11FB10F + {vk::Format::eR32G32B32A32Uint, Attachable | Storage}, // RGBA32UI + {vk::Format::eBc1RgbaUnormBlock, {}}, // DXT1 + {vk::Format::eBc2UnormBlock, {}}, // DXT23 + {vk::Format::eBc3UnormBlock, {}}, // DXT45 + {vk::Format::eBc4UnormBlock, {}}, // DXN1 + {vk::Format::eBc5UnormBlock, {}}, // DXN2UNORM + {vk::Format::eBc5SnormBlock, {}}, // DXN2SNORM + {vk::Format::eBc7UnormBlock, {}}, // BC7U + {vk::Format::eBc6HUfloatBlock, {}}, // BC6H_UF16 + {vk::Format::eBc6HSfloatBlock, {}}, // BC6H_SF16 + {vk::Format::eAstc4x4UnormBlock, {}}, // ASTC_2D_4X4 + {vk::Format::eB8G8R8A8Unorm, {}}, // BGRA8 + {vk::Format::eR32G32B32A32Sfloat, Attachable | Storage}, // RGBA32F + {vk::Format::eR32G32Sfloat, Attachable | Storage}, // RG32F + {vk::Format::eR32Sfloat, Attachable | Storage}, // R32F + {vk::Format::eR16Sfloat, Attachable | Storage}, // R16F + {vk::Format::eR16Unorm, Attachable | Storage}, // R16U + {vk::Format::eUndefined, {}}, // R16S + {vk::Format::eUndefined, {}}, // R16UI + {vk::Format::eUndefined, {}}, // R16I + {vk::Format::eR16G16Unorm, Attachable | Storage}, // RG16 + {vk::Format::eR16G16Sfloat, Attachable | Storage}, // RG16F + {vk::Format::eUndefined, {}}, // RG16UI + {vk::Format::eUndefined, {}}, // RG16I + {vk::Format::eR16G16Snorm, Attachable | Storage}, // RG16S + {vk::Format::eUndefined, {}}, // RGB32F + {vk::Format::eR8G8B8A8Srgb, Attachable}, // RGBA8_SRGB + {vk::Format::eR8G8Unorm, Attachable | Storage}, // RG8U + {vk::Format::eR8G8Snorm, Attachable | Storage}, // RG8S + {vk::Format::eR32G32Uint, Attachable | Storage}, // RG32UI + {vk::Format::eUndefined, {}}, // RGBX16F + {vk::Format::eR32Uint, Attachable | Storage}, // R32UI + {vk::Format::eAstc8x8UnormBlock, {}}, // ASTC_2D_8X8 + {vk::Format::eUndefined, {}}, // ASTC_2D_8X5 + {vk::Format::eUndefined, {}}, // ASTC_2D_5X4 + {vk::Format::eUndefined, {}}, // BGRA8_SRGB + {vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB + {vk::Format::eUndefined, {}}, // DXT23_SRGB + {vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB + {vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB + {vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U + {vk::Format::eAstc4x4SrgbBlock, {}}, // ASTC_2D_4X4_SRGB + {vk::Format::eAstc8x8SrgbBlock, {}}, // ASTC_2D_8X8_SRGB + {vk::Format::eAstc8x5SrgbBlock, {}}, // ASTC_2D_8X5_SRGB + {vk::Format::eAstc5x4SrgbBlock, {}}, // ASTC_2D_5X4_SRGB + {vk::Format::eAstc5x5UnormBlock, {}}, // ASTC_2D_5X5 + {vk::Format::eAstc5x5SrgbBlock, {}}, // ASTC_2D_5X5_SRGB + {vk::Format::eAstc10x8UnormBlock, {}}, // ASTC_2D_10X8 + {vk::Format::eAstc10x8SrgbBlock, {}}, // ASTC_2D_10X8_SRGB + {vk::Format::eAstc6x6UnormBlock, {}}, // ASTC_2D_6X6 + {vk::Format::eAstc6x6SrgbBlock, {}}, // ASTC_2D_6X6_SRGB + {vk::Format::eAstc10x10UnormBlock, {}}, // ASTC_2D_10X10 + {vk::Format::eAstc10x10SrgbBlock, {}}, // ASTC_2D_10X10_SRGB + {vk::Format::eAstc12x12UnormBlock, {}}, // ASTC_2D_12X12 + {vk::Format::eAstc12x12SrgbBlock, {}}, // ASTC_2D_12X12_SRGB + {vk::Format::eAstc8x6UnormBlock, {}}, // ASTC_2D_8X6 + {vk::Format::eAstc8x6SrgbBlock, {}}, // ASTC_2D_8X6_SRGB + {vk::Format::eAstc6x5UnormBlock, {}}, // ASTC_2D_6X5 + {vk::Format::eAstc6x5SrgbBlock, {}}, // ASTC_2D_6X5_SRGB + {vk::Format::eE5B9G9R9UfloatPack32, {}}, // E5B9G9R9F // Depth formats - {vk::Format::eD32Sfloat, true}, // Z32F - {vk::Format::eD16Unorm, true}, // Z16 + {vk::Format::eD32Sfloat, Attachable}, // Z32F + {vk::Format::eD16Unorm, Attachable}, // Z16 // DepthStencil formats - {vk::Format::eD24UnormS8Uint, true}, // Z24S8 - {vk::Format::eD24UnormS8Uint, true}, // S8Z24 (emulated) - {vk::Format::eUndefined, false}, // Z32FS8 -}}; + {vk::Format::eD24UnormS8Uint, Attachable}, // Z24S8 + {vk::Format::eD24UnormS8Uint, Attachable}, // S8Z24 (emulated) + {vk::Format::eD32SfloatS8Uint, Attachable}, // Z32FS8 +}; +static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat); -static constexpr bool IsZetaFormat(PixelFormat pixel_format) { +constexpr bool IsZetaFormat(PixelFormat pixel_format) { return pixel_format >= PixelFormat::MaxColorFormat && pixel_format < PixelFormat::MaxDepthStencilFormat; } -std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type, - PixelFormat pixel_format) { - ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size()); +} // Anonymous namespace + +FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format) { + ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples)); - const auto tuple = tex_format_tuples[static_cast<u32>(pixel_format)]; - UNIMPLEMENTED_IF_MSG(tuple.format == vk::Format::eUndefined, - "Unimplemented texture format with pixel format={}", - static_cast<u32>(pixel_format)); + auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)]; + if (tuple.format == vk::Format::eUndefined) { + UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}", + static_cast<u32>(pixel_format)); + return {vk::Format::eA8B8G8R8UnormPack32, true, true}; + } + + // Use ABGR8 on hardware that doesn't support ASTC natively + if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) { + tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format) + ? vk::Format::eA8B8G8R8SrgbPack32 + : vk::Format::eA8B8G8R8UnormPack32; + } + const bool attachable = tuple.usage & Attachable; + const bool storage = tuple.usage & Storage; - auto usage = vk::FormatFeatureFlagBits::eSampledImage | - vk::FormatFeatureFlagBits::eTransferDst | vk::FormatFeatureFlagBits::eTransferSrc; - if (tuple.attachable) { - usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment - : vk::FormatFeatureFlagBits::eColorAttachment; + vk::FormatFeatureFlags usage; + if (format_type == FormatType::Buffer) { + usage = vk::FormatFeatureFlagBits::eStorageTexelBuffer | + vk::FormatFeatureFlagBits::eUniformTexelBuffer; + } else { + usage = vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eTransferDst | + vk::FormatFeatureFlagBits::eTransferSrc; + if (attachable) { + usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment + : vk::FormatFeatureFlagBits::eColorAttachment; + } + if (storage) { + usage |= vk::FormatFeatureFlagBits::eStorageImage; + } } - return {device.GetSupportedFormat(tuple.format, usage, format_type), tuple.attachable}; + return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage}; } vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) { @@ -215,7 +255,8 @@ vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) { return {}; } -vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { +vk::PrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device, + Maxwell::PrimitiveTopology topology) { switch (topology) { case Maxwell::PrimitiveTopology::Points: return vk::PrimitiveTopology::ePointList; @@ -227,6 +268,13 @@ vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { return vk::PrimitiveTopology::eTriangleList; case Maxwell::PrimitiveTopology::TriangleStrip: return vk::PrimitiveTopology::eTriangleStrip; + case Maxwell::PrimitiveTopology::TriangleFan: + return vk::PrimitiveTopology::eTriangleFan; + case Maxwell::PrimitiveTopology::Quads: + // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases + return vk::PrimitiveTopology::eTriangleList; + case Maxwell::PrimitiveTopology::Patches: + return vk::PrimitiveTopology::ePatchList; default: UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); return {}; @@ -236,37 +284,111 @@ vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { switch (type) { case Maxwell::VertexAttribute::Type::SignedNorm: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Snorm; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Snorm; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Snorm; + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Snorm; + case Maxwell::VertexAttribute::Size::Size_16: + return vk::Format::eR16Snorm; + case Maxwell::VertexAttribute::Size::Size_16_16: + return vk::Format::eR16G16Snorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return vk::Format::eR16G16B16Snorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Snorm; + case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + return vk::Format::eA2B10G10R10SnormPack32; + default: + break; + } break; case Maxwell::VertexAttribute::Type::UnsignedNorm: switch (size) { + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Unorm; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Unorm; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Unorm; case Maxwell::VertexAttribute::Size::Size_8_8_8_8: return vk::Format::eR8G8B8A8Unorm; + case Maxwell::VertexAttribute::Size::Size_16: + return vk::Format::eR16Unorm; + case Maxwell::VertexAttribute::Size::Size_16_16: + return vk::Format::eR16G16Unorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return vk::Format::eR16G16B16Unorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Unorm; default: break; } break; case Maxwell::VertexAttribute::Type::SignedInt: - break; + switch (size) { + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Sint; + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Sint; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Sint; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Sint; + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Sint; + case Maxwell::VertexAttribute::Size::Size_32: + return vk::Format::eR32Sint; + default: + break; + } case Maxwell::VertexAttribute::Type::UnsignedInt: switch (size) { + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Uint; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Uint; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Uint; + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Uint; case Maxwell::VertexAttribute::Size::Size_32: return vk::Format::eR32Uint; default: break; } case Maxwell::VertexAttribute::Type::UnsignedScaled: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Uscaled; + default: + break; + } + break; case Maxwell::VertexAttribute::Type::SignedScaled: break; case Maxwell::VertexAttribute::Type::Float: switch (size) { - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: - return vk::Format::eR32G32B32A32Sfloat; - case Maxwell::VertexAttribute::Size::Size_32_32_32: - return vk::Format::eR32G32B32Sfloat; - case Maxwell::VertexAttribute::Size::Size_32_32: - return vk::Format::eR32G32Sfloat; case Maxwell::VertexAttribute::Size::Size_32: return vk::Format::eR32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32: + return vk::Format::eR32G32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32_32: + return vk::Format::eR32G32B32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + return vk::Format::eR32G32B32A32Sfloat; + case Maxwell::VertexAttribute::Size::Size_16: + return vk::Format::eR16Sfloat; + case Maxwell::VertexAttribute::Size::Size_16_16: + return vk::Format::eR16G16Sfloat; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return vk::Format::eR16G16B16Sfloat; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Sfloat; default: break; } @@ -308,11 +430,14 @@ vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) { return {}; } -vk::IndexType IndexFormat(Maxwell::IndexFormat index_format) { +vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) { switch (index_format) { case Maxwell::IndexFormat::UnsignedByte: - UNIMPLEMENTED_MSG("Vulkan does not support native u8 index format"); - return vk::IndexType::eUint16; + if (!device.IsExtIndexTypeUint8Supported()) { + UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device"); + return vk::IndexType::eUint16; + } + return vk::IndexType::eUint8EXT; case Maxwell::IndexFormat::UnsignedShort: return vk::IndexType::eUint16; case Maxwell::IndexFormat::UnsignedInt: diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h index 904a32e01..1534b738b 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.h +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h @@ -4,7 +4,6 @@ #pragma once -#include <utility> #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/declarations.h" @@ -23,24 +22,31 @@ vk::Filter Filter(Tegra::Texture::TextureFilter filter); vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter); -vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode); +vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode, + Tegra::Texture::TextureFilter filter); vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func); } // namespace Sampler -std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type, - PixelFormat pixel_format); +struct FormatInfo { + vk::Format format; + bool attachable; + bool storage; +}; + +FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format); vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage); -vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology); +vk::PrimitiveTopology PrimitiveTopology(const VKDevice& device, + Maxwell::PrimitiveTopology topology); vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size); vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison); -vk::IndexType IndexFormat(Maxwell::IndexFormat index_format); +vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format); vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op); diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 897cbb4e8..92854a4b3 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include <bitset> +#include <cstdlib> #include <optional> #include <set> #include <string_view> @@ -15,6 +16,15 @@ namespace Vulkan { namespace { +namespace Alternatives { + +constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, + vk::Format::eD16UnormS8Uint, vk::Format{}}; +constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, + vk::Format::eD32SfloatS8Uint, vk::Format{}}; + +} // namespace Alternatives + template <typename T> void SetNext(void**& next, T& data) { *next = &data; @@ -22,7 +32,7 @@ void SetNext(void**& next, T& data) { } template <typename T> -T GetFeatures(vk::PhysicalDevice physical, vk::DispatchLoaderDynamic dldi) { +T GetFeatures(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dldi) { vk::PhysicalDeviceFeatures2 features; T extension_features; features.pNext = &extension_features; @@ -30,17 +40,14 @@ T GetFeatures(vk::PhysicalDevice physical, vk::DispatchLoaderDynamic dldi) { return extension_features; } -} // Anonymous namespace - -namespace Alternatives { - -constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, - vk::Format::eD16UnormS8Uint, vk::Format{}}; -constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, - vk::Format::eD32SfloatS8Uint, vk::Format{}}; -constexpr std::array Astc = {vk::Format::eA8B8G8R8UnormPack32, vk::Format{}}; - -} // namespace Alternatives +template <typename T> +T GetProperties(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dldi) { + vk::PhysicalDeviceProperties2 properties; + T extension_properties; + properties.pNext = &extension_properties; + physical.getProperties2(&properties, dldi); + return extension_properties; +} constexpr const vk::Format* GetFormatAlternatives(vk::Format format) { switch (format) { @@ -53,8 +60,7 @@ constexpr const vk::Format* GetFormatAlternatives(vk::Format format) { } } -constexpr vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, - FormatType format_type) { +vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, FormatType format_type) { switch (format_type) { case FormatType::Linear: return properties.linearTilingFeatures; @@ -67,11 +73,13 @@ constexpr vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properti } } +} // Anonymous namespace + VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, vk::SurfaceKHR surface) - : physical{physical}, format_properties{GetFormatProperties(dldi, physical)} { + : physical{physical}, properties{physical.getProperties(dldi)}, + format_properties{GetFormatProperties(dldi, physical)} { SetupFamilies(dldi, surface); - SetupProperties(dldi); SetupFeatures(dldi); } @@ -89,12 +97,22 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan features.depthClamp = true; features.samplerAnisotropy = true; features.largePoints = true; + features.multiViewport = true; + features.depthBiasClamp = true; + features.geometryShader = true; + features.tessellationShader = true; + features.fragmentStoresAndAtomics = true; + features.shaderImageGatherExtended = true; + features.shaderStorageImageWriteWithoutFormat = true; features.textureCompressionASTC_LDR = is_optimal_astc_supported; - vk::PhysicalDeviceVertexAttributeDivisorFeaturesEXT vertex_divisor; - vertex_divisor.vertexAttributeInstanceRateDivisor = true; - vertex_divisor.vertexAttributeInstanceRateZeroDivisor = true; - SetNext(next, vertex_divisor); + vk::PhysicalDevice16BitStorageFeaturesKHR bit16_storage; + bit16_storage.uniformAndStorageBuffer16BitAccess = true; + SetNext(next, bit16_storage); + + vk::PhysicalDevice8BitStorageFeaturesKHR bit8_storage; + bit8_storage.uniformAndStorageBuffer8BitAccess = true; + SetNext(next, bit8_storage); vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8; if (is_float16_supported) { @@ -120,6 +138,10 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes"); } + if (!ext_depth_range_unrestricted) { + LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted"); + } + vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0, nullptr, static_cast<u32>(extensions.size()), extensions.data(), nullptr); @@ -135,16 +157,7 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan logical = UniqueDevice( dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld)); - if (khr_driver_properties) { - vk::PhysicalDeviceDriverPropertiesKHR driver; - vk::PhysicalDeviceProperties2 properties; - properties.pNext = &driver; - physical.getProperties2(&properties, dld); - driver_id = driver.driverID; - LOG_INFO(Render_Vulkan, "Driver: {} {}", driver.driverName, driver.driverInfo); - } else { - LOG_INFO(Render_Vulkan, "Driver: Unknown"); - } + CollectTelemetryParameters(); graphics_queue = logical->getQueue(graphics_family, 0, dld); present_queue = logical->getQueue(present_family, 0, dld); @@ -190,6 +203,18 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features, const vk::DispatchLoaderDynamic& dldi) const { + // Disable for now to avoid converting ASTC twice. + return false; + static constexpr std::array astc_formats = { + vk::Format::eAstc4x4SrgbBlock, vk::Format::eAstc8x8SrgbBlock, + vk::Format::eAstc8x5SrgbBlock, vk::Format::eAstc5x4SrgbBlock, + vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock, + vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock, + vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock, + vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock, + vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock, + vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock, + vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock}; if (!features.textureCompressionASTC_LDR) { return false; } @@ -197,12 +222,6 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc | vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc | vk::FormatFeatureFlagBits::eTransferDst}; - constexpr std::array astc_formats = { - vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock, - vk::Format::eAstc8x8SrgbBlock, vk::Format::eAstc8x6SrgbBlock, - vk::Format::eAstc5x4SrgbBlock, vk::Format::eAstc5x5UnormBlock, - vk::Format::eAstc5x5SrgbBlock, vk::Format::eAstc10x8UnormBlock, - vk::Format::eAstc10x8SrgbBlock}; for (const auto format : astc_formats) { const auto format_properties{physical.getFormatProperties(format, dldi)}; if (!(format_properties.optimalTilingFeatures & format_feature_usage)) { @@ -225,11 +244,17 @@ bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlag bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, vk::SurfaceKHR surface) { - LOG_INFO(Render_Vulkan, "{}", physical.getProperties(dldi).deviceName); bool is_suitable = true; - constexpr std::array required_extensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME, - VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME}; + constexpr std::array required_extensions = { + VK_KHR_SWAPCHAIN_EXTENSION_NAME, + VK_KHR_16BIT_STORAGE_EXTENSION_NAME, + VK_KHR_8BIT_STORAGE_EXTENSION_NAME, + VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, + VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME, + VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, + VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, + }; std::bitset<required_extensions.size()> available_extensions{}; for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { @@ -246,7 +271,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev if (available_extensions[i]) { continue; } - LOG_INFO(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); + LOG_ERROR(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); is_suitable = false; } } @@ -263,7 +288,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0; } if (!has_graphics || !has_present) { - LOG_INFO(Render_Vulkan, "Device lacks a graphics and present queue"); + LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue"); is_suitable = false; } @@ -273,8 +298,15 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev constexpr u32 required_ubo_size = 65536; if (limits.maxUniformBufferRange < required_ubo_size) { - LOG_INFO(Render_Vulkan, "Device UBO size {} is too small, {} is required)", - limits.maxUniformBufferRange, required_ubo_size); + LOG_ERROR(Render_Vulkan, "Device UBO size {} is too small, {} is required", + limits.maxUniformBufferRange, required_ubo_size); + is_suitable = false; + } + + constexpr u32 required_num_viewports = 16; + if (limits.maxViewports < required_num_viewports) { + LOG_INFO(Render_Vulkan, "Device number of viewports {} is too small, {} is required", + limits.maxViewports, required_num_viewports); is_suitable = false; } @@ -285,24 +317,32 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev std::make_pair(features.depthClamp, "depthClamp"), std::make_pair(features.samplerAnisotropy, "samplerAnisotropy"), std::make_pair(features.largePoints, "largePoints"), + std::make_pair(features.multiViewport, "multiViewport"), + std::make_pair(features.depthBiasClamp, "depthBiasClamp"), + std::make_pair(features.geometryShader, "geometryShader"), + std::make_pair(features.tessellationShader, "tessellationShader"), + std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"), + std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"), + std::make_pair(features.shaderStorageImageWriteWithoutFormat, + "shaderStorageImageWriteWithoutFormat"), }; for (const auto& [supported, name] : feature_report) { if (supported) { continue; } - LOG_INFO(Render_Vulkan, "Missing required feature: {}", name); + LOG_ERROR(Render_Vulkan, "Missing required feature: {}", name); is_suitable = false; } + if (!is_suitable) { + LOG_ERROR(Render_Vulkan, "{} is not suitable", properties.deviceName); + } + return is_suitable; } std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynamic& dldi) { std::vector<const char*> extensions; - extensions.reserve(7); - extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); - extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); - const auto Test = [&](const vk::ExtensionProperties& extension, std::optional<std::reference_wrapper<bool>> status, const char* name, bool push) { @@ -317,13 +357,30 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami } }; + extensions.reserve(13); + extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); + extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME); + extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME); + extensions.push_back(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME); + extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); + extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME); + extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME); + + [[maybe_unused]] const bool nsight = + std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED"); bool khr_shader_float16_int8{}; + bool ext_subgroup_size_control{}; for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { Test(extension, khr_uniform_buffer_standard_layout, VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true); - Test(extension, ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true); - Test(extension, khr_driver_properties, VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, true); Test(extension, khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, false); + Test(extension, ext_depth_range_unrestricted, + VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME, true); + Test(extension, ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true); + Test(extension, ext_shader_viewport_index_layer, + VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, true); + Test(extension, ext_subgroup_size_control, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME, + false); } if (khr_shader_float16_int8) { @@ -332,6 +389,23 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); } + if (ext_subgroup_size_control) { + const auto features = + GetFeatures<vk::PhysicalDeviceSubgroupSizeControlFeaturesEXT>(physical, dldi); + const auto properties = + GetProperties<vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT>(physical, dldi); + + is_warp_potentially_bigger = properties.maxSubgroupSize > GuestWarpSize; + + if (features.subgroupSizeControl && properties.minSubgroupSize <= GuestWarpSize && + properties.maxSubgroupSize >= GuestWarpSize) { + extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); + guest_warp_stages = properties.requiredSubgroupSizeStages; + } + } else { + is_warp_potentially_bigger = true; + } + return extensions; } @@ -358,19 +432,23 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK present_family = *present_family_; } -void VKDevice::SetupProperties(const vk::DispatchLoaderDynamic& dldi) { - const auto props = physical.getProperties(dldi); - device_type = props.deviceType; - uniform_buffer_alignment = static_cast<u64>(props.limits.minUniformBufferOffsetAlignment); - storage_buffer_alignment = static_cast<u64>(props.limits.minStorageBufferOffsetAlignment); - max_storage_buffer_range = static_cast<u64>(props.limits.maxStorageBufferRange); -} - void VKDevice::SetupFeatures(const vk::DispatchLoaderDynamic& dldi) { const auto supported_features{physical.getFeatures(dldi)}; is_optimal_astc_supported = IsOptimalAstcSupported(supported_features, dldi); } +void VKDevice::CollectTelemetryParameters() { + const auto driver = GetProperties<vk::PhysicalDeviceDriverPropertiesKHR>(physical, dld); + driver_id = driver.driverID; + vendor_name = driver.driverName; + + const auto extensions = physical.enumerateDeviceExtensionProperties(nullptr, dld); + reported_extensions.reserve(std::size(extensions)); + for (const auto& extension : extensions) { + reported_extensions.push_back(extension.extensionName); + } +} + std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const { static const float QUEUE_PRIORITY = 1.0f; @@ -385,50 +463,70 @@ std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() con std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) { - constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, - vk::Format::eA8B8G8R8SnormPack32, - vk::Format::eA8B8G8R8SrgbPack32, - vk::Format::eB5G6R5UnormPack16, - vk::Format::eA2B10G10R10UnormPack32, - vk::Format::eR32G32B32A32Sfloat, - vk::Format::eR16G16B16A16Uint, - vk::Format::eR16G16Unorm, - vk::Format::eR16G16Snorm, - vk::Format::eR16G16Sfloat, - vk::Format::eR16Unorm, - vk::Format::eR8G8B8A8Srgb, - vk::Format::eR8G8Unorm, - vk::Format::eR8G8Snorm, - vk::Format::eR8Unorm, - vk::Format::eB10G11R11UfloatPack32, - vk::Format::eR32Sfloat, - vk::Format::eR16Sfloat, - vk::Format::eR16G16B16A16Sfloat, - vk::Format::eB8G8R8A8Unorm, - vk::Format::eD32Sfloat, - vk::Format::eD16Unorm, - vk::Format::eD16UnormS8Uint, - vk::Format::eD24UnormS8Uint, - vk::Format::eD32SfloatS8Uint, - vk::Format::eBc1RgbaUnormBlock, - vk::Format::eBc2UnormBlock, - vk::Format::eBc3UnormBlock, - vk::Format::eBc4UnormBlock, - vk::Format::eBc5UnormBlock, - vk::Format::eBc5SnormBlock, - vk::Format::eBc7UnormBlock, - vk::Format::eBc1RgbaSrgbBlock, - vk::Format::eBc3SrgbBlock, - vk::Format::eBc7SrgbBlock, - vk::Format::eAstc4x4UnormBlock, - vk::Format::eAstc4x4SrgbBlock, - vk::Format::eAstc8x8SrgbBlock, - vk::Format::eAstc8x6SrgbBlock, - vk::Format::eAstc5x4SrgbBlock, - vk::Format::eAstc5x5UnormBlock, - vk::Format::eAstc5x5SrgbBlock, - vk::Format::eAstc10x8UnormBlock, - vk::Format::eAstc10x8SrgbBlock}; + static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, + vk::Format::eA8B8G8R8SnormPack32, + vk::Format::eA8B8G8R8SrgbPack32, + vk::Format::eB5G6R5UnormPack16, + vk::Format::eA2B10G10R10UnormPack32, + vk::Format::eA1R5G5B5UnormPack16, + vk::Format::eR32G32B32A32Sfloat, + vk::Format::eR32G32B32A32Uint, + vk::Format::eR32G32Sfloat, + vk::Format::eR32G32Uint, + vk::Format::eR16G16B16A16Uint, + vk::Format::eR16G16B16A16Unorm, + vk::Format::eR16G16Unorm, + vk::Format::eR16G16Snorm, + vk::Format::eR16G16Sfloat, + vk::Format::eR16Unorm, + vk::Format::eR8G8B8A8Srgb, + vk::Format::eR8G8Unorm, + vk::Format::eR8G8Snorm, + vk::Format::eR8Unorm, + vk::Format::eR8Uint, + vk::Format::eB10G11R11UfloatPack32, + vk::Format::eR32Sfloat, + vk::Format::eR32Uint, + vk::Format::eR16Sfloat, + vk::Format::eR16G16B16A16Sfloat, + vk::Format::eB8G8R8A8Unorm, + vk::Format::eR4G4B4A4UnormPack16, + vk::Format::eD32Sfloat, + vk::Format::eD16Unorm, + vk::Format::eD16UnormS8Uint, + vk::Format::eD24UnormS8Uint, + vk::Format::eD32SfloatS8Uint, + vk::Format::eBc1RgbaUnormBlock, + vk::Format::eBc2UnormBlock, + vk::Format::eBc3UnormBlock, + vk::Format::eBc4UnormBlock, + vk::Format::eBc5UnormBlock, + vk::Format::eBc5SnormBlock, + vk::Format::eBc7UnormBlock, + vk::Format::eBc6HUfloatBlock, + vk::Format::eBc6HSfloatBlock, + vk::Format::eBc1RgbaSrgbBlock, + vk::Format::eBc3SrgbBlock, + vk::Format::eBc7SrgbBlock, + vk::Format::eAstc4x4SrgbBlock, + vk::Format::eAstc8x8SrgbBlock, + vk::Format::eAstc8x5SrgbBlock, + vk::Format::eAstc5x4SrgbBlock, + vk::Format::eAstc5x5UnormBlock, + vk::Format::eAstc5x5SrgbBlock, + vk::Format::eAstc10x8UnormBlock, + vk::Format::eAstc10x8SrgbBlock, + vk::Format::eAstc6x6UnormBlock, + vk::Format::eAstc6x6SrgbBlock, + vk::Format::eAstc10x10UnormBlock, + vk::Format::eAstc10x10SrgbBlock, + vk::Format::eAstc12x12UnormBlock, + vk::Format::eAstc12x12SrgbBlock, + vk::Format::eAstc8x6UnormBlock, + vk::Format::eAstc8x6SrgbBlock, + vk::Format::eAstc6x5UnormBlock, + vk::Format::eAstc6x5SrgbBlock, + vk::Format::eE5B9G9R9UfloatPack32}; std::unordered_map<vk::Format, vk::FormatProperties> format_properties; for (const auto format : formats) { format_properties.emplace(format, physical.getFormatProperties(format, dldi)); diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index 010d4c3d6..a844c52df 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -4,6 +4,8 @@ #pragma once +#include <string> +#include <string_view> #include <unordered_map> #include <vector> #include "common/common_types.h" @@ -14,6 +16,9 @@ namespace Vulkan { /// Format usage descriptor. enum class FormatType { Linear, Optimal, Buffer }; +/// Subgroup size of the guest emulated hardware (Nvidia has 32 threads per subgroup). +const u32 GuestWarpSize = 32; + /// Handles data specific to a physical device. class VKDevice final { public: @@ -71,7 +76,22 @@ public: /// Returns true if the device is integrated with the host CPU. bool IsIntegrated() const { - return device_type == vk::PhysicalDeviceType::eIntegratedGpu; + return properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; + } + + /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers. + u32 GetApiVersion() const { + return properties.apiVersion; + } + + /// Returns the current driver version provided in Vulkan-formatted version numbers. + u32 GetDriverVersion() const { + return properties.driverVersion; + } + + /// Returns the device name. + std::string_view GetModelName() const { + return properties.deviceName; } /// Returns the driver ID. @@ -80,18 +100,23 @@ public: } /// Returns uniform buffer alignment requeriment. - u64 GetUniformBufferAlignment() const { - return uniform_buffer_alignment; + vk::DeviceSize GetUniformBufferAlignment() const { + return properties.limits.minUniformBufferOffsetAlignment; } /// Returns storage alignment requeriment. - u64 GetStorageBufferAlignment() const { - return storage_buffer_alignment; + vk::DeviceSize GetStorageBufferAlignment() const { + return properties.limits.minStorageBufferOffsetAlignment; } /// Returns the maximum range for storage buffers. - u64 GetMaxStorageBufferRange() const { - return max_storage_buffer_range; + vk::DeviceSize GetMaxStorageBufferRange() const { + return properties.limits.maxStorageBufferRange; + } + + /// Returns the maximum size for push constants. + vk::DeviceSize GetMaxPushConstantsSize() const { + return properties.limits.maxPushConstantsSize; } /// Returns true if ASTC is natively supported. @@ -104,6 +129,16 @@ public: return is_float16_supported; } + /// Returns true if the device warp size can potentially be bigger than guest's warp size. + bool IsWarpSizePotentiallyBiggerThanGuest() const { + return is_warp_potentially_bigger; + } + + /// Returns true if the device can be forced to use the guest warp size. + bool IsGuestWarpSizeSupported(vk::ShaderStageFlagBits stage) const { + return (guest_warp_stages & stage) != vk::ShaderStageFlags{}; + } + /// Returns true if the device supports VK_EXT_scalar_block_layout. bool IsKhrUniformBufferStandardLayoutSupported() const { return khr_uniform_buffer_standard_layout; @@ -114,6 +149,26 @@ public: return ext_index_type_uint8; } + /// Returns true if the device supports VK_EXT_depth_range_unrestricted. + bool IsExtDepthRangeUnrestrictedSupported() const { + return ext_depth_range_unrestricted; + } + + /// Returns true if the device supports VK_EXT_shader_viewport_index_layer. + bool IsExtShaderViewportIndexLayerSupported() const { + return ext_shader_viewport_index_layer; + } + + /// Returns the vendor name reported from Vulkan. + std::string_view GetVendorName() const { + return vendor_name; + } + + /// Returns the list of available extensions. + const std::vector<std::string>& GetAvailableExtensions() const { + return reported_extensions; + } + /// Checks if the physical device is suitable. static bool IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, vk::SurfaceKHR surface); @@ -125,12 +180,12 @@ private: /// Sets up queue families. void SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceKHR surface); - /// Sets up device properties. - void SetupProperties(const vk::DispatchLoaderDynamic& dldi); - /// Sets up device features. void SetupFeatures(const vk::DispatchLoaderDynamic& dldi); + /// Collects telemetry information from the device. + void CollectTelemetryParameters(); + /// Returns a list of queue initialization descriptors. std::vector<vk::DeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const; @@ -148,23 +203,28 @@ private: const vk::PhysicalDevice physical; ///< Physical device. vk::DispatchLoaderDynamic dld; ///< Device function pointers. + vk::PhysicalDeviceProperties properties; ///< Device properties. UniqueDevice logical; ///< Logical device. vk::Queue graphics_queue; ///< Main graphics queue. vk::Queue present_queue; ///< Main present queue. u32 graphics_family{}; ///< Main graphics queue family index. u32 present_family{}; ///< Main present queue family index. - vk::PhysicalDeviceType device_type; ///< Physical device type. vk::DriverIdKHR driver_id{}; ///< Driver ID. - u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment. - u64 storage_buffer_alignment{}; ///< Storage buffer alignment requeriment. - u64 max_storage_buffer_range{}; ///< Max storage buffer size. + vk::ShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced. bool is_optimal_astc_supported{}; ///< Support for native ASTC. bool is_float16_supported{}; ///< Support for float16 arithmetics. + bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest. bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs. bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8. - bool khr_driver_properties{}; ///< Support for VK_KHR_driver_properties. - std::unordered_map<vk::Format, vk::FormatProperties> - format_properties; ///< Format properties dictionary. + bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. + bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. + + // Telemetry parameters + std::string vendor_name; ///< Device's driver name. + std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions. + + /// Format properties dictionary. + std::unordered_map<vk::Format, vk::FormatProperties> format_properties; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp index 801826d3d..1ce583f75 100644 --- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp @@ -46,9 +46,10 @@ UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter), MaxwellToVK::Sampler::Filter(tsc.min_filter), MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter), - MaxwellToVK::Sampler::WrapMode(tsc.wrap_u), MaxwellToVK::Sampler::WrapMode(tsc.wrap_v), - MaxwellToVK::Sampler::WrapMode(tsc.wrap_p), tsc.GetLodBias(), has_anisotropy, - max_anisotropy, tsc.depth_compare_enabled, + MaxwellToVK::Sampler::WrapMode(tsc.wrap_u, tsc.mag_filter), + MaxwellToVK::Sampler::WrapMode(tsc.wrap_v, tsc.mag_filter), + MaxwellToVK::Sampler::WrapMode(tsc.wrap_p, tsc.mag_filter), tsc.GetLodBias(), + has_anisotropy, max_anisotropy, tsc.depth_compare_enabled, MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(), tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack), unnormalized_coords); diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 80738d3d0..6227bc70b 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -3,8 +3,10 @@ // Refer to the license.txt file included. #include <functional> +#include <limits> #include <map> -#include <set> +#include <type_traits> +#include <utility> #include <fmt/format.h> @@ -23,7 +25,9 @@ #include "video_core/shader/node.h" #include "video_core/shader/shader_ir.h" -namespace Vulkan::VKShader { +namespace Vulkan { + +namespace { using Sirit::Id; using Tegra::Engines::ShaderType; @@ -35,22 +39,60 @@ using namespace VideoCommon::Shader; using Maxwell = Tegra::Engines::Maxwell3D::Regs; using Operation = const OperationNode&; +class ASTDecompiler; +class ExprDecompiler; + // TODO(Rodrigo): Use rasterizer's value -constexpr u32 MAX_CONSTBUFFER_FLOATS = 0x4000; -constexpr u32 MAX_CONSTBUFFER_ELEMENTS = MAX_CONSTBUFFER_FLOATS / 4; -constexpr u32 STAGE_BINDING_STRIDE = 0x100; +constexpr u32 MaxConstBufferFloats = 0x4000; +constexpr u32 MaxConstBufferElements = MaxConstBufferFloats / 4; + +constexpr u32 NumInputPatches = 32; // This value seems to be the standard + +enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; + +class Expression final { +public: + Expression(Id id, Type type) : id{id}, type{type} { + ASSERT(type != Type::Void); + } + Expression() : type{Type::Void} {} -enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat }; + Id id{}; + Type type{}; +}; +static_assert(std::is_standard_layout_v<Expression>); -struct SamplerImage { - Id image_type; - Id sampled_image_type; - Id sampler; +struct TexelBuffer { + Id image_type{}; + Id image{}; }; -namespace { +struct SampledImage { + Id image_type{}; + Id sampled_image_type{}; + Id sampler{}; +}; + +struct StorageImage { + Id image_type{}; + Id image{}; +}; + +struct AttributeType { + Type type; + Id scalar; + Id vector; +}; + +struct VertexIndices { + std::optional<u32> position; + std::optional<u32> viewport; + std::optional<u32> point_size; + std::optional<u32> clip_distances; +}; spv::Dim GetSamplerDim(const Sampler& sampler) { + ASSERT(!sampler.IsBuffer()); switch (sampler.GetType()) { case Tegra::Shader::TextureType::Texture1D: return spv::Dim::Dim1D; @@ -66,6 +108,138 @@ spv::Dim GetSamplerDim(const Sampler& sampler) { } } +std::pair<spv::Dim, bool> GetImageDim(const Image& image) { + switch (image.GetType()) { + case Tegra::Shader::ImageType::Texture1D: + return {spv::Dim::Dim1D, false}; + case Tegra::Shader::ImageType::TextureBuffer: + return {spv::Dim::Buffer, false}; + case Tegra::Shader::ImageType::Texture1DArray: + return {spv::Dim::Dim1D, true}; + case Tegra::Shader::ImageType::Texture2D: + return {spv::Dim::Dim2D, false}; + case Tegra::Shader::ImageType::Texture2DArray: + return {spv::Dim::Dim2D, true}; + case Tegra::Shader::ImageType::Texture3D: + return {spv::Dim::Dim3D, false}; + default: + UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType())); + return {spv::Dim::Dim2D, false}; + } +} + +/// Returns the number of vertices present in a primitive topology. +u32 GetNumPrimitiveTopologyVertices(Maxwell::PrimitiveTopology primitive_topology) { + switch (primitive_topology) { + case Maxwell::PrimitiveTopology::Points: + return 1; + case Maxwell::PrimitiveTopology::Lines: + case Maxwell::PrimitiveTopology::LineLoop: + case Maxwell::PrimitiveTopology::LineStrip: + return 2; + case Maxwell::PrimitiveTopology::Triangles: + case Maxwell::PrimitiveTopology::TriangleStrip: + case Maxwell::PrimitiveTopology::TriangleFan: + return 3; + case Maxwell::PrimitiveTopology::LinesAdjacency: + case Maxwell::PrimitiveTopology::LineStripAdjacency: + return 4; + case Maxwell::PrimitiveTopology::TrianglesAdjacency: + case Maxwell::PrimitiveTopology::TriangleStripAdjacency: + return 6; + case Maxwell::PrimitiveTopology::Quads: + UNIMPLEMENTED_MSG("Quads"); + return 3; + case Maxwell::PrimitiveTopology::QuadStrip: + UNIMPLEMENTED_MSG("QuadStrip"); + return 3; + case Maxwell::PrimitiveTopology::Polygon: + UNIMPLEMENTED_MSG("Polygon"); + return 3; + case Maxwell::PrimitiveTopology::Patches: + UNIMPLEMENTED_MSG("Patches"); + return 3; + default: + UNREACHABLE(); + return 3; + } +} + +spv::ExecutionMode GetExecutionMode(Maxwell::TessellationPrimitive primitive) { + switch (primitive) { + case Maxwell::TessellationPrimitive::Isolines: + return spv::ExecutionMode::Isolines; + case Maxwell::TessellationPrimitive::Triangles: + return spv::ExecutionMode::Triangles; + case Maxwell::TessellationPrimitive::Quads: + return spv::ExecutionMode::Quads; + } + UNREACHABLE(); + return spv::ExecutionMode::Triangles; +} + +spv::ExecutionMode GetExecutionMode(Maxwell::TessellationSpacing spacing) { + switch (spacing) { + case Maxwell::TessellationSpacing::Equal: + return spv::ExecutionMode::SpacingEqual; + case Maxwell::TessellationSpacing::FractionalOdd: + return spv::ExecutionMode::SpacingFractionalOdd; + case Maxwell::TessellationSpacing::FractionalEven: + return spv::ExecutionMode::SpacingFractionalEven; + } + UNREACHABLE(); + return spv::ExecutionMode::SpacingEqual; +} + +spv::ExecutionMode GetExecutionMode(Maxwell::PrimitiveTopology input_topology) { + switch (input_topology) { + case Maxwell::PrimitiveTopology::Points: + return spv::ExecutionMode::InputPoints; + case Maxwell::PrimitiveTopology::Lines: + case Maxwell::PrimitiveTopology::LineLoop: + case Maxwell::PrimitiveTopology::LineStrip: + return spv::ExecutionMode::InputLines; + case Maxwell::PrimitiveTopology::Triangles: + case Maxwell::PrimitiveTopology::TriangleStrip: + case Maxwell::PrimitiveTopology::TriangleFan: + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::LinesAdjacency: + case Maxwell::PrimitiveTopology::LineStripAdjacency: + return spv::ExecutionMode::InputLinesAdjacency; + case Maxwell::PrimitiveTopology::TrianglesAdjacency: + case Maxwell::PrimitiveTopology::TriangleStripAdjacency: + return spv::ExecutionMode::InputTrianglesAdjacency; + case Maxwell::PrimitiveTopology::Quads: + UNIMPLEMENTED_MSG("Quads"); + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::QuadStrip: + UNIMPLEMENTED_MSG("QuadStrip"); + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::Polygon: + UNIMPLEMENTED_MSG("Polygon"); + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::Patches: + UNIMPLEMENTED_MSG("Patches"); + return spv::ExecutionMode::Triangles; + } + UNREACHABLE(); + return spv::ExecutionMode::Triangles; +} + +spv::ExecutionMode GetExecutionMode(Tegra::Shader::OutputTopology output_topology) { + switch (output_topology) { + case Tegra::Shader::OutputTopology::PointList: + return spv::ExecutionMode::OutputPoints; + case Tegra::Shader::OutputTopology::LineStrip: + return spv::ExecutionMode::OutputLineStrip; + case Tegra::Shader::OutputTopology::TriangleStrip: + return spv::ExecutionMode::OutputTriangleStrip; + default: + UNREACHABLE(); + return spv::ExecutionMode::OutputPoints; + } +} + /// Returns true if an attribute index is one of the 32 generic attributes constexpr bool IsGenericAttribute(Attribute::Index attribute) { return attribute >= Attribute::Index::Attribute_0 && @@ -73,7 +247,7 @@ constexpr bool IsGenericAttribute(Attribute::Index attribute) { } /// Returns the location of a generic attribute -constexpr u32 GetGenericAttributeLocation(Attribute::Index attribute) { +u32 GetGenericAttributeLocation(Attribute::Index attribute) { ASSERT(IsGenericAttribute(attribute)); return static_cast<u32>(attribute) - static_cast<u32>(Attribute::Index::Attribute_0); } @@ -87,20 +261,146 @@ bool IsPrecise(Operation operand) { return false; } -} // namespace - -class ASTDecompiler; -class ExprDecompiler; - -class SPIRVDecompiler : public Sirit::Module { +class SPIRVDecompiler final : public Sirit::Module { public: - explicit SPIRVDecompiler(const VKDevice& device, const ShaderIR& ir, ShaderType stage) - : Module(0x00010300), device{device}, ir{ir}, stage{stage}, header{ir.GetHeader()} { + explicit SPIRVDecompiler(const VKDevice& device, const ShaderIR& ir, ShaderType stage, + const Specialization& specialization) + : Module(0x00010300), device{device}, ir{ir}, stage{stage}, header{ir.GetHeader()}, + specialization{specialization} { AddCapability(spv::Capability::Shader); + AddCapability(spv::Capability::UniformAndStorageBuffer16BitAccess); + AddCapability(spv::Capability::ImageQuery); + AddCapability(spv::Capability::Image1D); + AddCapability(spv::Capability::ImageBuffer); + AddCapability(spv::Capability::ImageGatherExtended); + AddCapability(spv::Capability::SampledBuffer); + AddCapability(spv::Capability::StorageImageWriteWithoutFormat); + AddCapability(spv::Capability::SubgroupBallotKHR); + AddCapability(spv::Capability::SubgroupVoteKHR); + AddExtension("SPV_KHR_shader_ballot"); + AddExtension("SPV_KHR_subgroup_vote"); AddExtension("SPV_KHR_storage_buffer_storage_class"); AddExtension("SPV_KHR_variable_pointers"); + + if (ir.UsesViewportIndex()) { + AddCapability(spv::Capability::MultiViewport); + if (device.IsExtShaderViewportIndexLayerSupported()) { + AddExtension("SPV_EXT_shader_viewport_index_layer"); + AddCapability(spv::Capability::ShaderViewportIndexLayerEXT); + } + } + + if (device.IsFloat16Supported()) { + AddCapability(spv::Capability::Float16); + } + t_scalar_half = Name(TypeFloat(device.IsFloat16Supported() ? 16 : 32), "scalar_half"); + t_half = Name(TypeVector(t_scalar_half, 2), "half"); + + const Id main = Decompile(); + + switch (stage) { + case ShaderType::Vertex: + AddEntryPoint(spv::ExecutionModel::Vertex, main, "main", interfaces); + break; + case ShaderType::TesselationControl: + AddCapability(spv::Capability::Tessellation); + AddEntryPoint(spv::ExecutionModel::TessellationControl, main, "main", interfaces); + AddExecutionMode(main, spv::ExecutionMode::OutputVertices, + header.common2.threads_per_input_primitive); + break; + case ShaderType::TesselationEval: + AddCapability(spv::Capability::Tessellation); + AddEntryPoint(spv::ExecutionModel::TessellationEvaluation, main, "main", interfaces); + AddExecutionMode(main, GetExecutionMode(specialization.tessellation.primitive)); + AddExecutionMode(main, GetExecutionMode(specialization.tessellation.spacing)); + AddExecutionMode(main, specialization.tessellation.clockwise + ? spv::ExecutionMode::VertexOrderCw + : spv::ExecutionMode::VertexOrderCcw); + break; + case ShaderType::Geometry: + AddCapability(spv::Capability::Geometry); + AddEntryPoint(spv::ExecutionModel::Geometry, main, "main", interfaces); + AddExecutionMode(main, GetExecutionMode(specialization.primitive_topology)); + AddExecutionMode(main, GetExecutionMode(header.common3.output_topology)); + AddExecutionMode(main, spv::ExecutionMode::OutputVertices, + header.common4.max_output_vertices); + // TODO(Rodrigo): Where can we get this info from? + AddExecutionMode(main, spv::ExecutionMode::Invocations, 1U); + break; + case ShaderType::Fragment: + AddEntryPoint(spv::ExecutionModel::Fragment, main, "main", interfaces); + AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft); + if (header.ps.omap.depth) { + AddExecutionMode(main, spv::ExecutionMode::DepthReplacing); + } + break; + case ShaderType::Compute: + const auto workgroup_size = specialization.workgroup_size; + AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0], + workgroup_size[1], workgroup_size[2]); + AddEntryPoint(spv::ExecutionModel::GLCompute, main, "main", interfaces); + break; + } } +private: + Id Decompile() { + DeclareCommon(); + DeclareVertex(); + DeclareTessControl(); + DeclareTessEval(); + DeclareGeometry(); + DeclareFragment(); + DeclareCompute(); + DeclareRegisters(); + DeclarePredicates(); + DeclareLocalMemory(); + DeclareSharedMemory(); + DeclareInternalFlags(); + DeclareInputAttributes(); + DeclareOutputAttributes(); + + u32 binding = specialization.base_binding; + binding = DeclareConstantBuffers(binding); + binding = DeclareGlobalBuffers(binding); + binding = DeclareTexelBuffers(binding); + binding = DeclareSamplers(binding); + binding = DeclareImages(binding); + + const Id main = OpFunction(t_void, {}, TypeFunction(t_void)); + AddLabel(); + + if (ir.IsDecompiled()) { + DeclareFlowVariables(); + DecompileAST(); + } else { + AllocateLabels(); + DecompileBranchMode(); + } + + OpReturn(); + OpFunctionEnd(); + + return main; + } + + void DefinePrologue() { + if (stage == ShaderType::Vertex) { + // Clear Position to avoid reading trash on the Z conversion. + const auto position_index = out_indices.position.value(); + const Id position = AccessElement(t_out_float4, out_vertex, position_index); + OpStore(position, v_varying_default); + + if (specialization.point_size) { + const u32 point_size_index = out_indices.point_size.value(); + const Id out_point_size = AccessElement(t_out_float, out_vertex, point_size_index); + OpStore(out_point_size, Constant(t_float, *specialization.point_size)); + } + } + } + + void DecompileAST(); + void DecompileBranchMode() { const u32 first_address = ir.GetBasicBlocks().begin()->first; const Id loop_label = OpLabel("loop"); @@ -111,14 +411,15 @@ public: std::vector<Sirit::Literal> literals; std::vector<Id> branch_labels; - for (const auto& pair : labels) { - const auto [literal, label] = pair; + for (const auto& [literal, label] : labels) { literals.push_back(literal); branch_labels.push_back(label); } - jmp_to = Emit(OpVariable(TypePointer(spv::StorageClass::Function, t_uint), - spv::StorageClass::Function, Constant(t_uint, first_address))); + jmp_to = OpVariable(TypePointer(spv::StorageClass::Function, t_uint), + spv::StorageClass::Function, Constant(t_uint, first_address)); + AddLocalVariable(jmp_to); + std::tie(ssy_flow_stack, ssy_flow_stack_top) = CreateFlowStack(); std::tie(pbk_flow_stack, pbk_flow_stack_top) = CreateFlowStack(); @@ -128,151 +429,118 @@ public: Name(pbk_flow_stack, "pbk_flow_stack"); Name(pbk_flow_stack_top, "pbk_flow_stack_top"); - Emit(OpBranch(loop_label)); - Emit(loop_label); - Emit(OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::Unroll)); - Emit(OpBranch(dummy_label)); + DefinePrologue(); + + OpBranch(loop_label); + AddLabel(loop_label); + OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::MaskNone); + OpBranch(dummy_label); - Emit(dummy_label); + AddLabel(dummy_label); const Id default_branch = OpLabel(); - const Id jmp_to_load = Emit(OpLoad(t_uint, jmp_to)); - Emit(OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone)); - Emit(OpSwitch(jmp_to_load, default_branch, literals, branch_labels)); + const Id jmp_to_load = OpLoad(t_uint, jmp_to); + OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone); + OpSwitch(jmp_to_load, default_branch, literals, branch_labels); - Emit(default_branch); - Emit(OpReturn()); + AddLabel(default_branch); + OpReturn(); - for (const auto& pair : ir.GetBasicBlocks()) { - const auto& [address, bb] = pair; - Emit(labels.at(address)); + for (const auto& [address, bb] : ir.GetBasicBlocks()) { + AddLabel(labels.at(address)); VisitBasicBlock(bb); const auto next_it = labels.lower_bound(address + 1); const Id next_label = next_it != labels.end() ? next_it->second : default_branch; - Emit(OpBranch(next_label)); + OpBranch(next_label); } - Emit(jump_label); - Emit(OpBranch(continue_label)); - Emit(continue_label); - Emit(OpBranch(loop_label)); - Emit(merge_label); + AddLabel(jump_label); + OpBranch(continue_label); + AddLabel(continue_label); + OpBranch(loop_label); + AddLabel(merge_label); } - void DecompileAST(); +private: + friend class ASTDecompiler; + friend class ExprDecompiler; - void Decompile() { - const bool is_fully_decompiled = ir.IsDecompiled(); - AllocateBindings(); - if (!is_fully_decompiled) { - AllocateLabels(); - } + static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount); - DeclareVertex(); - DeclareGeometry(); - DeclareFragment(); - DeclareRegisters(); - DeclarePredicates(); - if (is_fully_decompiled) { - DeclareFlowVariables(); + void AllocateLabels() { + for (const auto& pair : ir.GetBasicBlocks()) { + const u32 address = pair.first; + labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address))); } - DeclareLocalMemory(); - DeclareInternalFlags(); - DeclareInputAttributes(); - DeclareOutputAttributes(); - DeclareConstantBuffers(); - DeclareGlobalBuffers(); - DeclareSamplers(); + } - execute_function = - Emit(OpFunction(t_void, spv::FunctionControlMask::Inline, TypeFunction(t_void))); - Emit(OpLabel()); + void DeclareCommon() { + thread_id = + DeclareInputBuiltIn(spv::BuiltIn::SubgroupLocalInvocationId, t_in_uint, "thread_id"); + } - if (is_fully_decompiled) { - DecompileAST(); - } else { - DecompileBranchMode(); + void DeclareVertex() { + if (stage != ShaderType::Vertex) { + return; } + Id out_vertex_struct; + std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct(); + const Id vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct); + out_vertex = OpVariable(vertex_ptr, spv::StorageClass::Output); + interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); - Emit(OpReturn()); - Emit(OpFunctionEnd()); + // Declare input attributes + vertex_index = DeclareInputBuiltIn(spv::BuiltIn::VertexIndex, t_in_uint, "vertex_index"); + instance_index = + DeclareInputBuiltIn(spv::BuiltIn::InstanceIndex, t_in_uint, "instance_index"); } - ShaderEntries GetShaderEntries() const { - ShaderEntries entries; - entries.const_buffers_base_binding = const_buffers_base_binding; - entries.global_buffers_base_binding = global_buffers_base_binding; - entries.samplers_base_binding = samplers_base_binding; - for (const auto& cbuf : ir.GetConstantBuffers()) { - entries.const_buffers.emplace_back(cbuf.second, cbuf.first); - } - for (const auto& gmem_pair : ir.GetGlobalMemory()) { - const auto& [base, usage] = gmem_pair; - entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset); - } - for (const auto& sampler : ir.GetSamplers()) { - entries.samplers.emplace_back(sampler); - } - for (const auto& attribute : ir.GetInputAttributes()) { - if (IsGenericAttribute(attribute)) { - entries.attributes.insert(GetGenericAttributeLocation(attribute)); - } + void DeclareTessControl() { + if (stage != ShaderType::TesselationControl) { + return; } - entries.clip_distances = ir.GetClipDistances(); - entries.shader_length = ir.GetLength(); - entries.entry_function = execute_function; - entries.interfaces = interfaces; - return entries; - } - -private: - friend class ASTDecompiler; - friend class ExprDecompiler; - - static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount); - - void AllocateBindings() { - const u32 binding_base = static_cast<u32>(stage) * STAGE_BINDING_STRIDE; - u32 binding_iterator = binding_base; + DeclareInputVertexArray(NumInputPatches); + DeclareOutputVertexArray(header.common2.threads_per_input_primitive); - const auto Allocate = [&binding_iterator](std::size_t count) { - const u32 current_binding = binding_iterator; - binding_iterator += static_cast<u32>(count); - return current_binding; - }; - const_buffers_base_binding = Allocate(ir.GetConstantBuffers().size()); - global_buffers_base_binding = Allocate(ir.GetGlobalMemory().size()); - samplers_base_binding = Allocate(ir.GetSamplers().size()); + tess_level_outer = DeclareBuiltIn( + spv::BuiltIn::TessLevelOuter, spv::StorageClass::Output, + TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 4U))), + "tess_level_outer"); + Decorate(tess_level_outer, spv::Decoration::Patch); - ASSERT_MSG(binding_iterator - binding_base < STAGE_BINDING_STRIDE, - "Stage binding stride is too small"); - } + tess_level_inner = DeclareBuiltIn( + spv::BuiltIn::TessLevelInner, spv::StorageClass::Output, + TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 2U))), + "tess_level_inner"); + Decorate(tess_level_inner, spv::Decoration::Patch); - void AllocateLabels() { - for (const auto& pair : ir.GetBasicBlocks()) { - const u32 address = pair.first; - labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address))); - } + invocation_id = DeclareInputBuiltIn(spv::BuiltIn::InvocationId, t_in_int, "invocation_id"); } - void DeclareVertex() { - if (stage != ShaderType::Vertex) + void DeclareTessEval() { + if (stage != ShaderType::TesselationEval) { return; + } + DeclareInputVertexArray(NumInputPatches); + DeclareOutputVertex(); - DeclareVertexRedeclarations(); + tess_coord = DeclareInputBuiltIn(spv::BuiltIn::TessCoord, t_in_float3, "tess_coord"); } void DeclareGeometry() { - if (stage != ShaderType::Geometry) + if (stage != ShaderType::Geometry) { return; - - UNIMPLEMENTED(); + } + const u32 num_input = GetNumPrimitiveTopologyVertices(specialization.primitive_topology); + DeclareInputVertexArray(num_input); + DeclareOutputVertex(); } void DeclareFragment() { - if (stage != ShaderType::Fragment) + if (stage != ShaderType::Fragment) { return; + } for (u32 rt = 0; rt < static_cast<u32>(frag_colors.size()); ++rt) { if (!IsRenderTargetUsed(rt)) { @@ -296,10 +564,19 @@ private: interfaces.push_back(frag_depth); } - frag_coord = DeclareBuiltIn(spv::BuiltIn::FragCoord, spv::StorageClass::Input, t_in_float4, - "frag_coord"); - front_facing = DeclareBuiltIn(spv::BuiltIn::FrontFacing, spv::StorageClass::Input, - t_in_bool, "front_facing"); + frag_coord = DeclareInputBuiltIn(spv::BuiltIn::FragCoord, t_in_float4, "frag_coord"); + front_facing = DeclareInputBuiltIn(spv::BuiltIn::FrontFacing, t_in_bool, "front_facing"); + point_coord = DeclareInputBuiltIn(spv::BuiltIn::PointCoord, t_in_float2, "point_coord"); + } + + void DeclareCompute() { + if (stage != ShaderType::Compute) { + return; + } + + workgroup_id = DeclareInputBuiltIn(spv::BuiltIn::WorkgroupId, t_in_uint3, "workgroup_id"); + local_invocation_id = + DeclareInputBuiltIn(spv::BuiltIn::LocalInvocationId, t_in_uint3, "local_invocation_id"); } void DeclareRegisters() { @@ -327,21 +604,44 @@ private: } void DeclareLocalMemory() { - if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) { - const auto element_count = static_cast<u32>(Common::AlignUp(local_memory_size, 4) / 4); - const Id type_array = TypeArray(t_float, Constant(t_uint, element_count)); - const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array); - Name(type_pointer, "LocalMemory"); + // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at + // specialization time. + const u64 lmem_size = stage == ShaderType::Compute ? 0x400 : header.GetLocalMemorySize(); + if (lmem_size == 0) { + return; + } + const auto element_count = static_cast<u32>(Common::AlignUp(lmem_size, 4) / 4); + const Id type_array = TypeArray(t_float, Constant(t_uint, element_count)); + const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array); + Name(type_pointer, "LocalMemory"); - local_memory = - OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array)); - AddGlobalVariable(Name(local_memory, "local_memory")); + local_memory = + OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array)); + AddGlobalVariable(Name(local_memory, "local_memory")); + } + + void DeclareSharedMemory() { + if (stage != ShaderType::Compute) { + return; + } + t_smem_uint = TypePointer(spv::StorageClass::Workgroup, t_uint); + + const u32 smem_size = specialization.shared_memory_size; + if (smem_size == 0) { + // Avoid declaring an empty array. + return; } + const auto element_count = static_cast<u32>(Common::AlignUp(smem_size, 4) / 4); + const Id type_array = TypeArray(t_uint, Constant(t_uint, element_count)); + const Id type_pointer = TypePointer(spv::StorageClass::Workgroup, type_array); + Name(type_pointer, "SharedMemory"); + + shared_memory = OpVariable(type_pointer, spv::StorageClass::Workgroup); + AddGlobalVariable(Name(shared_memory, "shared_memory")); } void DeclareInternalFlags() { - constexpr std::array<const char*, INTERNAL_FLAGS_COUNT> names = {"zero", "sign", "carry", - "overflow"}; + constexpr std::array names = {"zero", "sign", "carry", "overflow"}; for (std::size_t flag = 0; flag < INTERNAL_FLAGS_COUNT; ++flag) { const auto flag_code = static_cast<InternalFlag>(flag); const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); @@ -349,17 +649,53 @@ private: } } + void DeclareInputVertexArray(u32 length) { + constexpr auto storage = spv::StorageClass::Input; + std::tie(in_indices, in_vertex) = DeclareVertexArray(storage, "in_indices", length); + } + + void DeclareOutputVertexArray(u32 length) { + constexpr auto storage = spv::StorageClass::Output; + std::tie(out_indices, out_vertex) = DeclareVertexArray(storage, "out_indices", length); + } + + std::tuple<VertexIndices, Id> DeclareVertexArray(spv::StorageClass storage_class, + std::string name, u32 length) { + const auto [struct_id, indices] = DeclareVertexStruct(); + const Id vertex_array = TypeArray(struct_id, Constant(t_uint, length)); + const Id vertex_ptr = TypePointer(storage_class, vertex_array); + const Id vertex = OpVariable(vertex_ptr, storage_class); + AddGlobalVariable(Name(vertex, std::move(name))); + interfaces.push_back(vertex); + return {indices, vertex}; + } + + void DeclareOutputVertex() { + Id out_vertex_struct; + std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct(); + const Id out_vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct); + out_vertex = OpVariable(out_vertex_ptr, spv::StorageClass::Output); + interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); + } + void DeclareInputAttributes() { for (const auto index : ir.GetInputAttributes()) { if (!IsGenericAttribute(index)) { continue; } - UNIMPLEMENTED_IF(stage == ShaderType::Geometry); - const u32 location = GetGenericAttributeLocation(index); - const Id id = OpVariable(t_in_float4, spv::StorageClass::Input); - Name(AddGlobalVariable(id), fmt::format("in_attr{}", location)); + const auto type_descriptor = GetAttributeType(location); + Id type; + if (IsInputAttributeArray()) { + type = GetTypeVectorDefinitionLut(type_descriptor.type).at(3); + type = TypeArray(type, Constant(t_uint, GetNumInputVertices())); + type = TypePointer(spv::StorageClass::Input, type); + } else { + type = type_descriptor.vector; + } + const Id id = OpVariable(type, spv::StorageClass::Input); + AddGlobalVariable(Name(id, fmt::format("in_attr{}", location))); input_attributes.emplace(index, id); interfaces.push_back(id); @@ -389,8 +725,21 @@ private: if (!IsGenericAttribute(index)) { continue; } - const auto location = GetGenericAttributeLocation(index); - const Id id = OpVariable(t_out_float4, spv::StorageClass::Output); + const u32 location = GetGenericAttributeLocation(index); + Id type = t_float4; + Id varying_default = v_varying_default; + if (IsOutputAttributeArray()) { + const u32 num = GetNumOutputVertices(); + type = TypeArray(type, Constant(t_uint, num)); + if (device.GetDriverID() != vk::DriverIdKHR::eIntelProprietaryWindows) { + // Intel's proprietary driver fails to setup defaults for arrayed output + // attributes. + varying_default = ConstantComposite(type, std::vector(num, varying_default)); + } + } + type = TypePointer(spv::StorageClass::Output, type); + + const Id id = OpVariable(type, spv::StorageClass::Output, varying_default); Name(AddGlobalVariable(id), fmt::format("out_attr{}", location)); output_attributes.emplace(index, id); interfaces.push_back(id); @@ -399,10 +748,8 @@ private: } } - void DeclareConstantBuffers() { - u32 binding = const_buffers_base_binding; - for (const auto& entry : ir.GetConstantBuffers()) { - const auto [index, size] = entry; + u32 DeclareConstantBuffers(u32 binding) { + for (const auto& [index, size] : ir.GetConstantBuffers()) { const Id type = device.IsKhrUniformBufferStandardLayoutSupported() ? t_cbuf_scalar_ubo : t_cbuf_std140_ubo; const Id id = OpVariable(type, spv::StorageClass::Uniform); @@ -412,12 +759,11 @@ private: Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); constant_buffers.emplace(index, id); } + return binding; } - void DeclareGlobalBuffers() { - u32 binding = global_buffers_base_binding; - for (const auto& entry : ir.GetGlobalMemory()) { - const auto [base, usage] = entry; + u32 DeclareGlobalBuffers(u32 binding) { + for (const auto& [base, usage] : ir.GetGlobalMemory()) { const Id id = OpVariable(t_gmem_ssbo, spv::StorageClass::StorageBuffer); AddGlobalVariable( Name(id, fmt::format("gmem_{}_{}", base.cbuf_index, base.cbuf_offset))); @@ -426,89 +772,187 @@ private: Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); global_buffers.emplace(base, id); } + return binding; } - void DeclareSamplers() { - u32 binding = samplers_base_binding; + u32 DeclareTexelBuffers(u32 binding) { for (const auto& sampler : ir.GetSamplers()) { + if (!sampler.IsBuffer()) { + continue; + } + ASSERT(!sampler.IsArray()); + ASSERT(!sampler.IsShadow()); + + constexpr auto dim = spv::Dim::Buffer; + constexpr int depth = 0; + constexpr int arrayed = 0; + constexpr bool ms = false; + constexpr int sampled = 1; + constexpr auto format = spv::ImageFormat::Unknown; + const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); + const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); + const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); + AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + + texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id}); + } + return binding; + } + + u32 DeclareSamplers(u32 binding) { + for (const auto& sampler : ir.GetSamplers()) { + if (sampler.IsBuffer()) { + continue; + } const auto dim = GetSamplerDim(sampler); const int depth = sampler.IsShadow() ? 1 : 0; const int arrayed = sampler.IsArray() ? 1 : 0; - // TODO(Rodrigo): Sampled 1 indicates that the image will be used with a sampler. When - // SULD and SUST instructions are implemented, replace this value. - const int sampled = 1; - const Id image_type = - TypeImage(t_float, dim, depth, arrayed, false, sampled, spv::ImageFormat::Unknown); + constexpr bool ms = false; + constexpr int sampled = 1; + constexpr auto format = spv::ImageFormat::Unknown; + const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); const Id sampled_image_type = TypeSampledImage(image_type); const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, sampled_image_type); const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); - sampler_images.insert( - {static_cast<u32>(sampler.GetIndex()), {image_type, sampled_image_type, id}}); + sampled_images.emplace(sampler.GetIndex(), + SampledImage{image_type, sampled_image_type, id}); + } + return binding; + } + + u32 DeclareImages(u32 binding) { + for (const auto& image : ir.GetImages()) { + const auto [dim, arrayed] = GetImageDim(image); + constexpr int depth = 0; + constexpr bool ms = false; + constexpr int sampled = 2; // This won't be accessed with a sampler + constexpr auto format = spv::ImageFormat::Unknown; + const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {}); + const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); + const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); + AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex()))); Decorate(id, spv::Decoration::Binding, binding++); Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + if (image.IsRead() && !image.IsWritten()) { + Decorate(id, spv::Decoration::NonWritable); + } else if (image.IsWritten() && !image.IsRead()) { + Decorate(id, spv::Decoration::NonReadable); + } + + images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id}); } + return binding; } - void DeclareVertexRedeclarations() { - vertex_index = DeclareBuiltIn(spv::BuiltIn::VertexIndex, spv::StorageClass::Input, - t_in_uint, "vertex_index"); - instance_index = DeclareBuiltIn(spv::BuiltIn::InstanceIndex, spv::StorageClass::Input, - t_in_uint, "instance_index"); + bool IsInputAttributeArray() const { + return stage == ShaderType::TesselationControl || stage == ShaderType::TesselationEval || + stage == ShaderType::Geometry; + } - bool is_clip_distances_declared = false; - for (const auto index : ir.GetOutputAttributes()) { - if (index == Attribute::Index::ClipDistances0123 || - index == Attribute::Index::ClipDistances4567) { - is_clip_distances_declared = true; - } + bool IsOutputAttributeArray() const { + return stage == ShaderType::TesselationControl; + } + + u32 GetNumInputVertices() const { + switch (stage) { + case ShaderType::Geometry: + return GetNumPrimitiveTopologyVertices(specialization.primitive_topology); + case ShaderType::TesselationControl: + case ShaderType::TesselationEval: + return NumInputPatches; + default: + UNREACHABLE(); + return 1; } + } - std::vector<Id> members; - members.push_back(t_float4); - if (ir.UsesPointSize()) { - members.push_back(t_float); - } - if (is_clip_distances_declared) { - members.push_back(TypeArray(t_float, Constant(t_uint, 8))); - } - - const Id gl_per_vertex_struct = Name(TypeStruct(members), "PerVertex"); - Decorate(gl_per_vertex_struct, spv::Decoration::Block); - - u32 declaration_index = 0; - const auto MemberDecorateBuiltIn = [&](spv::BuiltIn builtin, std::string name, - bool condition) { - if (!condition) - return u32{}; - MemberName(gl_per_vertex_struct, declaration_index, name); - MemberDecorate(gl_per_vertex_struct, declaration_index, spv::Decoration::BuiltIn, - static_cast<u32>(builtin)); - return declaration_index++; + u32 GetNumOutputVertices() const { + switch (stage) { + case ShaderType::TesselationControl: + return header.common2.threads_per_input_primitive; + default: + UNREACHABLE(); + return 1; + } + } + + std::tuple<Id, VertexIndices> DeclareVertexStruct() { + struct BuiltIn { + Id type; + spv::BuiltIn builtin; + const char* name; }; + std::vector<BuiltIn> members; + members.reserve(4); + + const auto AddBuiltIn = [&](Id type, spv::BuiltIn builtin, const char* name) { + const auto index = static_cast<u32>(members.size()); + members.push_back(BuiltIn{type, builtin, name}); + return index; + }; + + VertexIndices indices; + indices.position = AddBuiltIn(t_float4, spv::BuiltIn::Position, "position"); + + if (ir.UsesViewportIndex()) { + if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) { + indices.viewport = AddBuiltIn(t_int, spv::BuiltIn::ViewportIndex, "viewport_index"); + } else { + LOG_ERROR(Render_Vulkan, + "Shader requires ViewportIndex but it's not supported on this " + "stage with this device."); + } + } - position_index = MemberDecorateBuiltIn(spv::BuiltIn::Position, "position", true); - point_size_index = - MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", ir.UsesPointSize()); - clip_distances_index = MemberDecorateBuiltIn(spv::BuiltIn::ClipDistance, "clip_distances", - is_clip_distances_declared); + if (ir.UsesPointSize() || specialization.point_size) { + indices.point_size = AddBuiltIn(t_float, spv::BuiltIn::PointSize, "point_size"); + } + + const auto& output_attributes = ir.GetOutputAttributes(); + const bool declare_clip_distances = + std::any_of(output_attributes.begin(), output_attributes.end(), [](const auto& index) { + return index == Attribute::Index::ClipDistances0123 || + index == Attribute::Index::ClipDistances4567; + }); + if (declare_clip_distances) { + indices.clip_distances = AddBuiltIn(TypeArray(t_float, Constant(t_uint, 8)), + spv::BuiltIn::ClipDistance, "clip_distances"); + } + + std::vector<Id> member_types; + member_types.reserve(members.size()); + for (std::size_t i = 0; i < members.size(); ++i) { + member_types.push_back(members[i].type); + } + const Id per_vertex_struct = Name(TypeStruct(member_types), "PerVertex"); + Decorate(per_vertex_struct, spv::Decoration::Block); + + for (std::size_t index = 0; index < members.size(); ++index) { + const auto& member = members[index]; + MemberName(per_vertex_struct, static_cast<u32>(index), member.name); + MemberDecorate(per_vertex_struct, static_cast<u32>(index), spv::Decoration::BuiltIn, + static_cast<u32>(member.builtin)); + } - const Id type_pointer = TypePointer(spv::StorageClass::Output, gl_per_vertex_struct); - per_vertex = OpVariable(type_pointer, spv::StorageClass::Output); - AddGlobalVariable(Name(per_vertex, "per_vertex")); - interfaces.push_back(per_vertex); + return {per_vertex_struct, indices}; } void VisitBasicBlock(const NodeBlock& bb) { for (const auto& node : bb) { - static_cast<void>(Visit(node)); + [[maybe_unused]] const Type type = Visit(node).type; + ASSERT(type == Type::Void); } } - Id Visit(const Node& node) { + Expression Visit(const Node& node) { if (const auto operation = std::get_if<OperationNode>(&*node)) { const auto operation_index = static_cast<std::size_t>(operation->GetCode()); const auto decompiler = operation_decompilers[operation_index]; @@ -516,18 +960,21 @@ private: UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index); } return (this->*decompiler)(*operation); + } - } else if (const auto gpr = std::get_if<GprNode>(&*node)) { + if (const auto gpr = std::get_if<GprNode>(&*node)) { const u32 index = gpr->GetIndex(); if (index == Register::ZeroIndex) { - return Constant(t_float, 0.0f); + return {v_float_zero, Type::Float}; } - return Emit(OpLoad(t_float, registers.at(index))); + return {OpLoad(t_float, registers.at(index)), Type::Float}; + } - } else if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { - return BitcastTo<Type::Float>(Constant(t_uint, immediate->GetValue())); + if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { + return {Constant(t_uint, immediate->GetValue()), Type::Uint}; + } - } else if (const auto predicate = std::get_if<PredicateNode>(&*node)) { + if (const auto predicate = std::get_if<PredicateNode>(&*node)) { const auto value = [&]() -> Id { switch (const auto index = predicate->GetIndex(); index) { case Tegra::Shader::Pred::UnusedIndex: @@ -535,74 +982,108 @@ private: case Tegra::Shader::Pred::NeverExecute: return v_false; default: - return Emit(OpLoad(t_bool, predicates.at(index))); + return OpLoad(t_bool, predicates.at(index)); } }(); if (predicate->IsNegated()) { - return Emit(OpLogicalNot(t_bool, value)); + return {OpLogicalNot(t_bool, value), Type::Bool}; } - return value; + return {value, Type::Bool}; + } - } else if (const auto abuf = std::get_if<AbufNode>(&*node)) { + if (const auto abuf = std::get_if<AbufNode>(&*node)) { const auto attribute = abuf->GetIndex(); - const auto element = abuf->GetElement(); + const u32 element = abuf->GetElement(); + const auto& buffer = abuf->GetBuffer(); + + const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) { + std::vector<Id> members; + members.reserve(std::size(indices) + 1); + + if (buffer && IsInputAttributeArray()) { + members.push_back(AsUint(Visit(buffer))); + } + for (const u32 index : indices) { + members.push_back(Constant(t_uint, index)); + } + return OpAccessChain(pointer_type, composite, members); + }; switch (attribute) { - case Attribute::Index::Position: - if (stage != ShaderType::Fragment) { - UNIMPLEMENTED(); - break; - } else { + case Attribute::Index::Position: { + if (stage == ShaderType::Fragment) { if (element == 3) { - return Constant(t_float, 1.0f); + return {Constant(t_float, 1.0f), Type::Float}; } - return Emit(OpLoad(t_float, AccessElement(t_in_float, frag_coord, element))); + return {OpLoad(t_float, AccessElement(t_in_float, frag_coord, element)), + Type::Float}; } + const std::vector elements = {in_indices.position.value(), element}; + return {OpLoad(t_float, ArrayPass(t_in_float, in_vertex, elements)), Type::Float}; + } + case Attribute::Index::PointCoord: { + switch (element) { + case 0: + case 1: + return {OpCompositeExtract(t_float, OpLoad(t_float2, point_coord), element), + Type::Float}; + } + UNIMPLEMENTED_MSG("Unimplemented point coord element={}", element); + return {v_float_zero, Type::Float}; + } case Attribute::Index::TessCoordInstanceIDVertexID: // TODO(Subv): Find out what the values are for the first two elements when inside a // vertex shader, and what's the value of the fourth element when inside a Tess Eval // shader. - ASSERT(stage == ShaderType::Vertex); switch (element) { + case 0: + case 1: + return {OpLoad(t_float, AccessElement(t_in_float, tess_coord, element)), + Type::Float}; case 2: - return BitcastFrom<Type::Uint>(Emit(OpLoad(t_uint, instance_index))); + return {OpLoad(t_uint, instance_index), Type::Uint}; case 3: - return BitcastFrom<Type::Uint>(Emit(OpLoad(t_uint, vertex_index))); + return {OpLoad(t_uint, vertex_index), Type::Uint}; } UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); - return Constant(t_float, 0); + return {Constant(t_uint, 0U), Type::Uint}; case Attribute::Index::FrontFacing: // TODO(Subv): Find out what the values are for the other elements. ASSERT(stage == ShaderType::Fragment); if (element == 3) { - const Id is_front_facing = Emit(OpLoad(t_bool, front_facing)); - const Id true_value = - BitcastTo<Type::Float>(Constant(t_int, static_cast<s32>(-1))); - const Id false_value = BitcastTo<Type::Float>(Constant(t_int, 0)); - return Emit(OpSelect(t_float, is_front_facing, true_value, false_value)); + const Id is_front_facing = OpLoad(t_bool, front_facing); + const Id true_value = Constant(t_int, static_cast<s32>(-1)); + const Id false_value = Constant(t_int, 0); + return {OpSelect(t_int, is_front_facing, true_value, false_value), Type::Int}; } UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); - return Constant(t_float, 0.0f); + return {v_float_zero, Type::Float}; default: if (IsGenericAttribute(attribute)) { - const Id pointer = - AccessElement(t_in_float, input_attributes.at(attribute), element); - return Emit(OpLoad(t_float, pointer)); + const u32 location = GetGenericAttributeLocation(attribute); + const auto type_descriptor = GetAttributeType(location); + const Type type = type_descriptor.type; + const Id attribute_id = input_attributes.at(attribute); + const std::vector elements = {element}; + const Id pointer = ArrayPass(type_descriptor.scalar, attribute_id, elements); + return {OpLoad(GetTypeDefinition(type), pointer), type}; } break; } UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); + return {v_float_zero, Type::Float}; + } - } else if (const auto cbuf = std::get_if<CbufNode>(&*node)) { + if (const auto cbuf = std::get_if<CbufNode>(&*node)) { const Node& offset = cbuf->GetOffset(); const Id buffer_id = constant_buffers.at(cbuf->GetIndex()); Id pointer{}; if (device.IsKhrUniformBufferStandardLayoutSupported()) { - const Id buffer_offset = Emit(OpShiftRightLogical( - t_uint, BitcastTo<Type::Uint>(Visit(offset)), Constant(t_uint, 2u))); - pointer = Emit( - OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0u), buffer_offset)); + const Id buffer_offset = + OpShiftRightLogical(t_uint, AsUint(Visit(offset)), Constant(t_uint, 2U)); + pointer = + OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0U), buffer_offset); } else { Id buffer_index{}; Id buffer_element{}; @@ -614,53 +1095,76 @@ private: buffer_element = Constant(t_uint, (offset_imm / 4) % 4); } else if (std::holds_alternative<OperationNode>(*offset)) { // Indirect access - const Id offset_id = BitcastTo<Type::Uint>(Visit(offset)); - const Id unsafe_offset = Emit(OpUDiv(t_uint, offset_id, Constant(t_uint, 4))); - const Id final_offset = Emit(OpUMod( - t_uint, unsafe_offset, Constant(t_uint, MAX_CONSTBUFFER_ELEMENTS - 1))); - buffer_index = Emit(OpUDiv(t_uint, final_offset, Constant(t_uint, 4))); - buffer_element = Emit(OpUMod(t_uint, final_offset, Constant(t_uint, 4))); + const Id offset_id = AsUint(Visit(offset)); + const Id unsafe_offset = OpUDiv(t_uint, offset_id, Constant(t_uint, 4)); + const Id final_offset = + OpUMod(t_uint, unsafe_offset, Constant(t_uint, MaxConstBufferElements - 1)); + buffer_index = OpUDiv(t_uint, final_offset, Constant(t_uint, 4)); + buffer_element = OpUMod(t_uint, final_offset, Constant(t_uint, 4)); } else { UNREACHABLE_MSG("Unmanaged offset node type"); } - pointer = Emit(OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), - buffer_index, buffer_element)); + pointer = OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), buffer_index, + buffer_element); } - return Emit(OpLoad(t_float, pointer)); + return {OpLoad(t_float, pointer), Type::Float}; + } - } else if (const auto gmem = std::get_if<GmemNode>(&*node)) { + if (const auto gmem = std::get_if<GmemNode>(&*node)) { const Id gmem_buffer = global_buffers.at(gmem->GetDescriptor()); - const Id real = BitcastTo<Type::Uint>(Visit(gmem->GetRealAddress())); - const Id base = BitcastTo<Type::Uint>(Visit(gmem->GetBaseAddress())); + const Id real = AsUint(Visit(gmem->GetRealAddress())); + const Id base = AsUint(Visit(gmem->GetBaseAddress())); + + Id offset = OpISub(t_uint, real, base); + offset = OpUDiv(t_uint, offset, Constant(t_uint, 4U)); + return {OpLoad(t_float, + OpAccessChain(t_gmem_float, gmem_buffer, Constant(t_uint, 0U), offset)), + Type::Float}; + } - Id offset = Emit(OpISub(t_uint, real, base)); - offset = Emit(OpUDiv(t_uint, offset, Constant(t_uint, 4u))); - return Emit(OpLoad(t_float, Emit(OpAccessChain(t_gmem_float, gmem_buffer, - Constant(t_uint, 0u), offset)))); + if (const auto lmem = std::get_if<LmemNode>(&*node)) { + Id address = AsUint(Visit(lmem->GetAddress())); + address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); + const Id pointer = OpAccessChain(t_prv_float, local_memory, address); + return {OpLoad(t_float, pointer), Type::Float}; + } - } else if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { + if (const auto smem = std::get_if<SmemNode>(&*node)) { + Id address = AsUint(Visit(smem->GetAddress())); + address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); + const Id pointer = OpAccessChain(t_smem_uint, shared_memory, address); + return {OpLoad(t_uint, pointer), Type::Uint}; + } + + if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { + const Id flag = internal_flags.at(static_cast<std::size_t>(internal_flag->GetFlag())); + return {OpLoad(t_bool, flag), Type::Bool}; + } + + if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { // It's invalid to call conditional on nested nodes, use an operation instead const Id true_label = OpLabel(); const Id skip_label = OpLabel(); - const Id condition = Visit(conditional->GetCondition()); - Emit(OpSelectionMerge(skip_label, spv::SelectionControlMask::MaskNone)); - Emit(OpBranchConditional(condition, true_label, skip_label)); - Emit(true_label); + const Id condition = AsBool(Visit(conditional->GetCondition())); + OpSelectionMerge(skip_label, spv::SelectionControlMask::MaskNone); + OpBranchConditional(condition, true_label, skip_label); + AddLabel(true_label); - ++conditional_nest_count; + conditional_branch_set = true; + inside_branch = false; VisitBasicBlock(conditional->GetCode()); - --conditional_nest_count; - - if (inside_branch == 0) { - Emit(OpBranch(skip_label)); + conditional_branch_set = false; + if (!inside_branch) { + OpBranch(skip_label); } else { - inside_branch--; + inside_branch = false; } - Emit(skip_label); + AddLabel(skip_label); return {}; + } - } else if (const auto comment = std::get_if<CommentNode>(&*node)) { - Name(Emit(OpUndef(t_void)), comment->GetText()); + if (const auto comment = std::get_if<CommentNode>(&*node)) { + Name(OpUndef(t_void), comment->GetText()); return {}; } @@ -669,94 +1173,126 @@ private: } template <Id (Module::*func)(Id, Id), Type result_type, Type type_a = result_type> - Id Unary(Operation operation) { + Expression Unary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); + const Id op_a = As(Visit(operation[0]), type_a); - const Id value = BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a))); + const Id value = (this->*func)(type_def, op_a); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } template <Id (Module::*func)(Id, Id, Id), Type result_type, Type type_a = result_type, Type type_b = type_a> - Id Binary(Operation operation) { + Expression Binary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); - const Id op_b = VisitOperand<type_b>(operation, 1); + const Id op_a = As(Visit(operation[0]), type_a); + const Id op_b = As(Visit(operation[1]), type_b); - const Id value = BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a, op_b))); + const Id value = (this->*func)(type_def, op_a, op_b); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } template <Id (Module::*func)(Id, Id, Id, Id), Type result_type, Type type_a = result_type, Type type_b = type_a, Type type_c = type_b> - Id Ternary(Operation operation) { + Expression Ternary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); - const Id op_b = VisitOperand<type_b>(operation, 1); - const Id op_c = VisitOperand<type_c>(operation, 2); + const Id op_a = As(Visit(operation[0]), type_a); + const Id op_b = As(Visit(operation[1]), type_b); + const Id op_c = As(Visit(operation[2]), type_c); - const Id value = BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a, op_b, op_c))); + const Id value = (this->*func)(type_def, op_a, op_b, op_c); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type, Type type_a = result_type, Type type_b = type_a, Type type_c = type_b, Type type_d = type_c> - Id Quaternary(Operation operation) { + Expression Quaternary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); - const Id op_b = VisitOperand<type_b>(operation, 1); - const Id op_c = VisitOperand<type_c>(operation, 2); - const Id op_d = VisitOperand<type_d>(operation, 3); + const Id op_a = As(Visit(operation[0]), type_a); + const Id op_b = As(Visit(operation[1]), type_b); + const Id op_c = As(Visit(operation[2]), type_c); + const Id op_d = As(Visit(operation[3]), type_d); - const Id value = - BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a, op_b, op_c, op_d))); + const Id value = (this->*func)(type_def, op_a, op_b, op_c, op_d); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } - Id Assign(Operation operation) { + Expression Assign(Operation operation) { const Node& dest = operation[0]; const Node& src = operation[1]; - Id target{}; + Expression target{}; if (const auto gpr = std::get_if<GprNode>(&*dest)) { if (gpr->GetIndex() == Register::ZeroIndex) { // Writing to Register::ZeroIndex is a no op return {}; } - target = registers.at(gpr->GetIndex()); + target = {registers.at(gpr->GetIndex()), Type::Float}; } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) { - target = [&]() -> Id { + const auto& buffer = abuf->GetBuffer(); + const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) { + std::vector<Id> members; + members.reserve(std::size(indices) + 1); + + if (buffer && IsOutputAttributeArray()) { + members.push_back(AsUint(Visit(buffer))); + } + for (const u32 index : indices) { + members.push_back(Constant(t_uint, index)); + } + return OpAccessChain(pointer_type, composite, members); + }; + + target = [&]() -> Expression { + const u32 element = abuf->GetElement(); switch (const auto attribute = abuf->GetIndex(); attribute) { - case Attribute::Index::Position: - return AccessElement(t_out_float, per_vertex, position_index, - abuf->GetElement()); + case Attribute::Index::Position: { + const u32 index = out_indices.position.value(); + return {ArrayPass(t_out_float, out_vertex, {index, element}), Type::Float}; + } case Attribute::Index::LayerViewportPointSize: - UNIMPLEMENTED_IF(abuf->GetElement() != 3); - return AccessElement(t_out_float, per_vertex, point_size_index); - case Attribute::Index::ClipDistances0123: - return AccessElement(t_out_float, per_vertex, clip_distances_index, - abuf->GetElement()); - case Attribute::Index::ClipDistances4567: - return AccessElement(t_out_float, per_vertex, clip_distances_index, - abuf->GetElement() + 4); + switch (element) { + case 2: { + if (!out_indices.viewport) { + return {}; + } + const u32 index = out_indices.viewport.value(); + return {AccessElement(t_out_int, out_vertex, index), Type::Int}; + } + case 3: { + const auto index = out_indices.point_size.value(); + return {AccessElement(t_out_float, out_vertex, index), Type::Float}; + } + default: + UNIMPLEMENTED_MSG("LayerViewportPoint element={}", abuf->GetElement()); + return {}; + } + case Attribute::Index::ClipDistances0123: { + const u32 index = out_indices.clip_distances.value(); + return {AccessElement(t_out_float, out_vertex, index, element), Type::Float}; + } + case Attribute::Index::ClipDistances4567: { + const u32 index = out_indices.clip_distances.value(); + return {AccessElement(t_out_float, out_vertex, index, element + 4), + Type::Float}; + } default: if (IsGenericAttribute(attribute)) { - return AccessElement(t_out_float, output_attributes.at(attribute), - abuf->GetElement()); + const Id composite = output_attributes.at(attribute); + return {ArrayPass(t_out_float, composite, {element}), Type::Float}; } UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute)); @@ -764,72 +1300,154 @@ private: } }(); + } else if (const auto patch = std::get_if<PatchNode>(&*dest)) { + target = [&]() -> Expression { + const u32 offset = patch->GetOffset(); + switch (offset) { + case 0: + case 1: + case 2: + case 3: + return {AccessElement(t_out_float, tess_level_outer, offset % 4), Type::Float}; + case 4: + case 5: + return {AccessElement(t_out_float, tess_level_inner, offset % 4), Type::Float}; + } + UNIMPLEMENTED_MSG("Unhandled patch output offset: {}", offset); + return {}; + }(); + } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { - Id address = BitcastTo<Type::Uint>(Visit(lmem->GetAddress())); - address = Emit(OpUDiv(t_uint, address, Constant(t_uint, 4))); - target = Emit(OpAccessChain(t_prv_float, local_memory, {address})); + Id address = AsUint(Visit(lmem->GetAddress())); + address = OpUDiv(t_uint, address, Constant(t_uint, 4)); + target = {OpAccessChain(t_prv_float, local_memory, address), Type::Float}; + + } else if (const auto smem = std::get_if<SmemNode>(&*dest)) { + ASSERT(stage == ShaderType::Compute); + Id address = AsUint(Visit(smem->GetAddress())); + address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); + target = {OpAccessChain(t_smem_uint, shared_memory, address), Type::Uint}; + + } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { + const Id real = AsUint(Visit(gmem->GetRealAddress())); + const Id base = AsUint(Visit(gmem->GetBaseAddress())); + const Id diff = OpISub(t_uint, real, base); + const Id offset = OpShiftRightLogical(t_uint, diff, Constant(t_uint, 2)); + + const Id gmem_buffer = global_buffers.at(gmem->GetDescriptor()); + target = {OpAccessChain(t_gmem_float, gmem_buffer, Constant(t_uint, 0), offset), + Type::Float}; + + } else { + UNIMPLEMENTED(); } - Emit(OpStore(target, Visit(src))); + OpStore(target.id, As(Visit(src), target.type)); return {}; } - Id FCastHalf0(Operation operation) { - UNIMPLEMENTED(); - return {}; + template <u32 offset> + Expression FCastHalf(Operation operation) { + const Id value = AsHalfFloat(Visit(operation[0])); + return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, offset)), + Type::Float}; } - Id FCastHalf1(Operation operation) { - UNIMPLEMENTED(); - return {}; - } + Expression FSwizzleAdd(Operation operation) { + const Id minus = Constant(t_float, -1.0f); + const Id plus = v_float_one; + const Id zero = v_float_zero; + const Id lut_a = ConstantComposite(t_float4, minus, plus, minus, zero); + const Id lut_b = ConstantComposite(t_float4, minus, minus, plus, minus); - Id FSwizzleAdd(Operation operation) { - UNIMPLEMENTED(); - return {}; - } + Id mask = OpLoad(t_uint, thread_id); + mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3)); + mask = OpShiftLeftLogical(t_uint, mask, Constant(t_uint, 1)); + mask = OpShiftRightLogical(t_uint, AsUint(Visit(operation[2])), mask); + mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3)); - Id HNegate(Operation operation) { - UNIMPLEMENTED(); - return {}; + const Id modifier_a = OpVectorExtractDynamic(t_float, lut_a, mask); + const Id modifier_b = OpVectorExtractDynamic(t_float, lut_b, mask); + + const Id op_a = OpFMul(t_float, AsFloat(Visit(operation[0])), modifier_a); + const Id op_b = OpFMul(t_float, AsFloat(Visit(operation[1])), modifier_b); + return {OpFAdd(t_float, op_a, op_b), Type::Float}; } - Id HClamp(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HNegate(Operation operation) { + const bool is_f16 = device.IsFloat16Supported(); + const Id minus_one = Constant(t_scalar_half, is_f16 ? 0xbc00 : 0xbf800000); + const Id one = Constant(t_scalar_half, is_f16 ? 0x3c00 : 0x3f800000); + const auto GetNegate = [&](std::size_t index) { + return OpSelect(t_scalar_half, AsBool(Visit(operation[index])), minus_one, one); + }; + const Id negation = OpCompositeConstruct(t_half, GetNegate(1), GetNegate(2)); + return {OpFMul(t_half, AsHalfFloat(Visit(operation[0])), negation), Type::HalfFloat}; } - Id HCastFloat(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HClamp(Operation operation) { + const auto Pack = [&](std::size_t index) { + const Id scalar = GetHalfScalarFromFloat(AsFloat(Visit(operation[index]))); + return OpCompositeConstruct(t_half, scalar, scalar); + }; + const Id value = AsHalfFloat(Visit(operation[0])); + const Id min = Pack(1); + const Id max = Pack(2); + + const Id clamped = OpFClamp(t_half, value, min, max); + if (IsPrecise(operation)) { + Decorate(clamped, spv::Decoration::NoContraction); + } + return {clamped, Type::HalfFloat}; } - Id HUnpack(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HCastFloat(Operation operation) { + const Id value = GetHalfScalarFromFloat(AsFloat(Visit(operation[0]))); + return {OpCompositeConstruct(t_half, value, Constant(t_scalar_half, 0)), Type::HalfFloat}; } - Id HMergeF32(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HUnpack(Operation operation) { + Expression operand = Visit(operation[0]); + const auto type = std::get<Tegra::Shader::HalfType>(operation.GetMeta()); + if (type == Tegra::Shader::HalfType::H0_H1) { + return operand; + } + const auto value = [&] { + switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) { + case Tegra::Shader::HalfType::F32: + return GetHalfScalarFromFloat(AsFloat(operand)); + case Tegra::Shader::HalfType::H0_H0: + return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 0); + case Tegra::Shader::HalfType::H1_H1: + return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 1); + default: + UNREACHABLE(); + return ConstantNull(t_half); + } + }(); + return {OpCompositeConstruct(t_half, value, value), Type::HalfFloat}; } - Id HMergeH0(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HMergeF32(Operation operation) { + const Id value = AsHalfFloat(Visit(operation[0])); + return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, 0)), Type::Float}; } - Id HMergeH1(Operation operation) { - UNIMPLEMENTED(); - return {}; + template <u32 offset> + Expression HMergeHN(Operation operation) { + const Id target = AsHalfFloat(Visit(operation[0])); + const Id source = AsHalfFloat(Visit(operation[1])); + const Id object = OpCompositeExtract(t_scalar_half, source, offset); + return {OpCompositeInsert(t_half, object, target, offset), Type::HalfFloat}; } - Id HPack2(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HPack2(Operation operation) { + const Id low = GetHalfScalarFromFloat(AsFloat(Visit(operation[0]))); + const Id high = GetHalfScalarFromFloat(AsFloat(Visit(operation[1]))); + return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat}; } - Id LogicalAssign(Operation operation) { + Expression LogicalAssign(Operation operation) { const Node& dest = operation[0]; const Node& src = operation[1]; @@ -850,106 +1468,190 @@ private: target = internal_flags.at(static_cast<u32>(flag->GetFlag())); } - Emit(OpStore(target, Visit(src))); + OpStore(target, AsBool(Visit(src))); return {}; } - Id LogicalPick2(Operation operation) { - UNIMPLEMENTED(); - return {}; + Id GetTextureSampler(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + ASSERT(!meta.sampler.IsBuffer()); + + const auto& entry = sampled_images.at(meta.sampler.GetIndex()); + return OpLoad(entry.sampled_image_type, entry.sampler); } - Id LogicalAnd2(Operation operation) { - UNIMPLEMENTED(); - return {}; + Id GetTextureImage(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + const u32 index = meta.sampler.GetIndex(); + if (meta.sampler.IsBuffer()) { + const auto& entry = texel_buffers.at(index); + return OpLoad(entry.image_type, entry.image); + } else { + const auto& entry = sampled_images.at(index); + return OpImage(entry.image_type, GetTextureSampler(operation)); + } } - Id GetTextureSampler(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto entry = sampler_images.at(static_cast<u32>(meta->sampler.GetIndex())); - return Emit(OpLoad(entry.sampled_image_type, entry.sampler)); + Id GetImage(Operation operation) { + const auto& meta = std::get<MetaImage>(operation.GetMeta()); + const auto entry = images.at(meta.image.GetIndex()); + return OpLoad(entry.image_type, entry.image); } - Id GetTextureImage(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto entry = sampler_images.at(static_cast<u32>(meta->sampler.GetIndex())); - return Emit(OpImage(entry.image_type, GetTextureSampler(operation))); + Id AssembleVector(const std::vector<Id>& coords, Type type) { + const Id coords_type = GetTypeVectorDefinitionLut(type).at(coords.size() - 1); + return coords.size() == 1 ? coords[0] : OpCompositeConstruct(coords_type, coords); } - Id GetTextureCoordinates(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + Id GetCoordinates(Operation operation, Type type) { std::vector<Id> coords; for (std::size_t i = 0; i < operation.GetOperandsCount(); ++i) { - coords.push_back(Visit(operation[i])); + coords.push_back(As(Visit(operation[i]), type)); } - if (meta->sampler.IsArray()) { - const Id array_integer = BitcastTo<Type::Int>(Visit(meta->array)); - coords.push_back(Emit(OpConvertSToF(t_float, array_integer))); + if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) { + // Add array coordinate for textures + if (meta->sampler.IsArray()) { + Id array = AsInt(Visit(meta->array)); + if (type == Type::Float) { + array = OpConvertSToF(t_float, array); + } + coords.push_back(array); + } } - if (meta->sampler.IsShadow()) { - coords.push_back(Visit(meta->depth_compare)); + return AssembleVector(coords, type); + } + + Id GetOffsetCoordinates(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + std::vector<Id> coords; + coords.reserve(meta.aoffi.size()); + for (const auto& coord : meta.aoffi) { + coords.push_back(AsInt(Visit(coord))); } + return AssembleVector(coords, Type::Int); + } - const std::array<Id, 4> t_float_lut = {nullptr, t_float2, t_float3, t_float4}; - return coords.size() == 1 - ? coords[0] - : Emit(OpCompositeConstruct(t_float_lut.at(coords.size() - 1), coords)); + std::pair<Id, Id> GetDerivatives(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + const auto& derivatives = meta.derivates; + ASSERT(derivatives.size() % 2 == 0); + + const std::size_t components = derivatives.size() / 2; + std::vector<Id> dx, dy; + dx.reserve(components); + dy.reserve(components); + for (std::size_t index = 0; index < components; ++index) { + dx.push_back(AsFloat(Visit(derivatives.at(index * 2 + 0)))); + dy.push_back(AsFloat(Visit(derivatives.at(index * 2 + 1)))); + } + return {AssembleVector(dx, Type::Float), AssembleVector(dy, Type::Float)}; } - Id GetTextureElement(Operation operation, Id sample_value) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - ASSERT(meta); - return Emit(OpCompositeExtract(t_float, sample_value, meta->element)); + Expression GetTextureElement(Operation operation, Id sample_value, Type type) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + const auto type_def = GetTypeDefinition(type); + return {OpCompositeExtract(type_def, sample_value, meta.element), type}; } - Id Texture(Operation operation) { - const Id texture = Emit(OpImageSampleImplicitLod(t_float4, GetTextureSampler(operation), - GetTextureCoordinates(operation))); - return GetTextureElement(operation, texture); + Expression Texture(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + + const bool can_implicit = stage == ShaderType::Fragment; + const Id sampler = GetTextureSampler(operation); + const Id coords = GetCoordinates(operation, Type::Float); + + if (meta.depth_compare) { + // Depth sampling + UNIMPLEMENTED_IF(meta.bias); + const Id dref = AsFloat(Visit(meta.depth_compare)); + if (can_implicit) { + return {OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, {}), + Type::Float}; + } else { + return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, + spv::ImageOperandsMask::Lod, v_float_zero), + Type::Float}; + } + } + + std::vector<Id> operands; + spv::ImageOperandsMask mask{}; + if (meta.bias) { + mask = mask | spv::ImageOperandsMask::Bias; + operands.push_back(AsFloat(Visit(meta.bias))); + } + + Id texture; + if (can_implicit) { + texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, operands); + } else { + texture = OpImageSampleExplicitLod(t_float4, sampler, coords, + mask | spv::ImageOperandsMask::Lod, v_float_zero, + operands); + } + return GetTextureElement(operation, texture, Type::Float); } - Id TextureLod(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const Id texture = Emit(OpImageSampleExplicitLod( - t_float4, GetTextureSampler(operation), GetTextureCoordinates(operation), - spv::ImageOperandsMask::Lod, Visit(meta->lod))); - return GetTextureElement(operation, texture); + Expression TextureLod(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + + const Id sampler = GetTextureSampler(operation); + const Id coords = GetCoordinates(operation, Type::Float); + const Id lod = AsFloat(Visit(meta.lod)); + + spv::ImageOperandsMask mask = spv::ImageOperandsMask::Lod; + std::vector<Id> operands; + if (!meta.aoffi.empty()) { + mask = mask | spv::ImageOperandsMask::Offset; + operands.push_back(GetOffsetCoordinates(operation)); + } + + if (meta.sampler.IsShadow()) { + const Id dref = AsFloat(Visit(meta.depth_compare)); + return { + OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, lod, operands), + Type::Float}; + } + const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, lod, operands); + return GetTextureElement(operation, texture, Type::Float); } - Id TextureGather(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto coords = GetTextureCoordinates(operation); + Expression TextureGather(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); - Id texture; - if (meta->sampler.IsShadow()) { - texture = Emit(OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, - Visit(meta->component))); + const Id coords = GetCoordinates(operation, Type::Float); + Id texture{}; + if (meta.sampler.IsShadow()) { + texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, + AsFloat(Visit(meta.depth_compare))); } else { u32 component_value = 0; - if (meta->component) { - const auto component = std::get_if<ImmediateNode>(&*meta->component); + if (meta.component) { + const auto component = std::get_if<ImmediateNode>(&*meta.component); ASSERT_MSG(component, "Component is not an immediate value"); component_value = component->GetValue(); } - texture = Emit(OpImageGather(t_float4, GetTextureSampler(operation), coords, - Constant(t_uint, component_value))); + texture = OpImageGather(t_float4, GetTextureSampler(operation), coords, + Constant(t_uint, component_value)); } - - return GetTextureElement(operation, texture); + return GetTextureElement(operation, texture, Type::Float); } - Id TextureQueryDimensions(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto image_id = GetTextureImage(operation); - AddCapability(spv::Capability::ImageQuery); + Expression TextureQueryDimensions(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + UNIMPLEMENTED_IF(meta.depth_compare); - if (meta->element == 3) { - return BitcastTo<Type::Float>(Emit(OpImageQueryLevels(t_int, image_id))); + const auto image_id = GetTextureImage(operation); + if (meta.element == 3) { + return {OpImageQueryLevels(t_int, image_id), Type::Int}; } - const Id lod = VisitOperand<Type::Uint>(operation, 0); + const Id lod = AsUint(Visit(operation[0])); const std::size_t coords_count = [&]() { - switch (const auto type = meta->sampler.GetType(); type) { + switch (const auto type = meta.sampler.GetType(); type) { case Tegra::Shader::TextureType::Texture1D: return 1; case Tegra::Shader::TextureType::Texture2D: @@ -963,136 +1665,190 @@ private: } }(); - if (meta->element >= coords_count) { - return Constant(t_float, 0.0f); + if (meta.element >= coords_count) { + return {v_float_zero, Type::Float}; } const std::array<Id, 3> types = {t_int, t_int2, t_int3}; - const Id sizes = Emit(OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod)); - const Id size = Emit(OpCompositeExtract(t_int, sizes, meta->element)); - return BitcastTo<Type::Float>(size); + const Id sizes = OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod); + const Id size = OpCompositeExtract(t_int, sizes, meta.element); + return {size, Type::Int}; + } + + Expression TextureQueryLod(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + UNIMPLEMENTED_IF(meta.depth_compare); + + if (meta.element >= 2) { + UNREACHABLE_MSG("Invalid element"); + return {v_float_zero, Type::Float}; + } + const auto sampler_id = GetTextureSampler(operation); + + const Id multiplier = Constant(t_float, 256.0f); + const Id multipliers = ConstantComposite(t_float2, multiplier, multiplier); + + const Id coords = GetCoordinates(operation, Type::Float); + Id size = OpImageQueryLod(t_float2, sampler_id, coords); + size = OpFMul(t_float2, size, multipliers); + size = OpConvertFToS(t_int2, size); + return GetTextureElement(operation, size, Type::Int); } - Id TextureQueryLod(Operation operation) { + Expression TexelFetch(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(meta.depth_compare); + + const Id image = GetTextureImage(operation); + const Id coords = GetCoordinates(operation, Type::Int); + Id fetch; + if (meta.lod && !meta.sampler.IsBuffer()) { + fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod, + AsInt(Visit(meta.lod))); + } else { + fetch = OpImageFetch(t_float4, image, coords); + } + return GetTextureElement(operation, fetch, Type::Float); + } + + Expression TextureGradient(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + + const Id sampler = GetTextureSampler(operation); + const Id coords = GetCoordinates(operation, Type::Float); + const auto [dx, dy] = GetDerivatives(operation); + const std::vector grad = {dx, dy}; + + static constexpr auto mask = spv::ImageOperandsMask::Grad; + const Id texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, grad); + return GetTextureElement(operation, texture, Type::Float); + } + + Expression ImageLoad(Operation operation) { UNIMPLEMENTED(); return {}; } - Id TexelFetch(Operation operation) { - UNIMPLEMENTED(); + Expression ImageStore(Operation operation) { + const auto meta{std::get<MetaImage>(operation.GetMeta())}; + std::vector<Id> colors; + for (const auto& value : meta.values) { + colors.push_back(AsUint(Visit(value))); + } + + const Id coords = GetCoordinates(operation, Type::Int); + const Id texel = OpCompositeConstruct(t_uint4, colors); + + OpImageWrite(GetImage(operation), coords, texel, {}); return {}; } - Id ImageLoad(Operation operation) { + Expression AtomicImageAdd(Operation operation) { UNIMPLEMENTED(); return {}; } - Id ImageStore(Operation operation) { + Expression AtomicImageMin(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageAdd(Operation operation) { + Expression AtomicImageMax(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageAnd(Operation operation) { + Expression AtomicImageAnd(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageOr(Operation operation) { + Expression AtomicImageOr(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageXor(Operation operation) { + Expression AtomicImageXor(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageExchange(Operation operation) { + Expression AtomicImageExchange(Operation operation) { UNIMPLEMENTED(); return {}; } - Id Branch(Operation operation) { - const auto target = std::get_if<ImmediateNode>(&*operation[0]); - UNIMPLEMENTED_IF(!target); - - Emit(OpStore(jmp_to, Constant(t_uint, target->GetValue()))); - Emit(OpBranch(continue_label)); - inside_branch = conditional_nest_count; - if (conditional_nest_count == 0) { - Emit(OpLabel()); + Expression Branch(Operation operation) { + const auto& target = std::get<ImmediateNode>(*operation[0]); + OpStore(jmp_to, Constant(t_uint, target.GetValue())); + OpBranch(continue_label); + inside_branch = true; + if (!conditional_branch_set) { + AddLabel(); } return {}; } - Id BranchIndirect(Operation operation) { - const Id op_a = VisitOperand<Type::Uint>(operation, 0); + Expression BranchIndirect(Operation operation) { + const Id op_a = AsUint(Visit(operation[0])); - Emit(OpStore(jmp_to, op_a)); - Emit(OpBranch(continue_label)); - inside_branch = conditional_nest_count; - if (conditional_nest_count == 0) { - Emit(OpLabel()); + OpStore(jmp_to, op_a); + OpBranch(continue_label); + inside_branch = true; + if (!conditional_branch_set) { + AddLabel(); } return {}; } - Id PushFlowStack(Operation operation) { - const auto target = std::get_if<ImmediateNode>(&*operation[0]); - ASSERT(target); - + Expression PushFlowStack(Operation operation) { + const auto& target = std::get<ImmediateNode>(*operation[0]); const auto [flow_stack, flow_stack_top] = GetFlowStack(operation); - const Id current = Emit(OpLoad(t_uint, flow_stack_top)); - const Id next = Emit(OpIAdd(t_uint, current, Constant(t_uint, 1))); - const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, current)); + const Id current = OpLoad(t_uint, flow_stack_top); + const Id next = OpIAdd(t_uint, current, Constant(t_uint, 1)); + const Id access = OpAccessChain(t_func_uint, flow_stack, current); - Emit(OpStore(access, Constant(t_uint, target->GetValue()))); - Emit(OpStore(flow_stack_top, next)); + OpStore(access, Constant(t_uint, target.GetValue())); + OpStore(flow_stack_top, next); return {}; } - Id PopFlowStack(Operation operation) { + Expression PopFlowStack(Operation operation) { const auto [flow_stack, flow_stack_top] = GetFlowStack(operation); - const Id current = Emit(OpLoad(t_uint, flow_stack_top)); - const Id previous = Emit(OpISub(t_uint, current, Constant(t_uint, 1))); - const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, previous)); - const Id target = Emit(OpLoad(t_uint, access)); - - Emit(OpStore(flow_stack_top, previous)); - Emit(OpStore(jmp_to, target)); - Emit(OpBranch(continue_label)); - inside_branch = conditional_nest_count; - if (conditional_nest_count == 0) { - Emit(OpLabel()); + const Id current = OpLoad(t_uint, flow_stack_top); + const Id previous = OpISub(t_uint, current, Constant(t_uint, 1)); + const Id access = OpAccessChain(t_func_uint, flow_stack, previous); + const Id target = OpLoad(t_uint, access); + + OpStore(flow_stack_top, previous); + OpStore(jmp_to, target); + OpBranch(continue_label); + inside_branch = true; + if (!conditional_branch_set) { + AddLabel(); } return {}; } - Id PreExit() { - switch (stage) { - case ShaderType::Vertex: { - // TODO(Rodrigo): We should use VK_EXT_depth_range_unrestricted instead, but it doesn't - // seem to be working on Nvidia's drivers and Intel (mesa and blob) doesn't support it. - const Id z_pointer = AccessElement(t_out_float, per_vertex, position_index, 2u); - Id depth = Emit(OpLoad(t_float, z_pointer)); - depth = Emit(OpFAdd(t_float, depth, Constant(t_float, 1.0f))); - depth = Emit(OpFMul(t_float, depth, Constant(t_float, 0.5f))); - Emit(OpStore(z_pointer, depth)); - break; + void PreExit() { + if (stage == ShaderType::Vertex) { + const u32 position_index = out_indices.position.value(); + const Id z_pointer = AccessElement(t_out_float, out_vertex, position_index, 2U); + const Id w_pointer = AccessElement(t_out_float, out_vertex, position_index, 3U); + Id depth = OpLoad(t_float, z_pointer); + depth = OpFAdd(t_float, depth, OpLoad(t_float, w_pointer)); + depth = OpFMul(t_float, depth, Constant(t_float, 0.5f)); + OpStore(z_pointer, depth); } - case ShaderType::Fragment: { + if (stage == ShaderType::Fragment) { const auto SafeGetRegister = [&](u32 reg) { // TODO(Rodrigo): Replace with contains once C++20 releases if (const auto it = registers.find(reg); it != registers.end()) { - return Emit(OpLoad(t_float, it->second)); + return OpLoad(t_float, it->second); } - return Constant(t_float, 0.0f); + return v_float_zero; }; UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, @@ -1107,8 +1863,8 @@ private: // TODO(Subv): Figure out how dual-source blending is configured in the Switch. for (u32 component = 0; component < 4; ++component) { if (header.ps.IsColorComponentOutputEnabled(rt, component)) { - Emit(OpStore(AccessElement(t_out_float, frag_colors.at(rt), component), - SafeGetRegister(current_reg))); + OpStore(AccessElement(t_out_float, frag_colors.at(rt), component), + SafeGetRegister(current_reg)); ++current_reg; } } @@ -1116,110 +1872,129 @@ private: if (header.ps.omap.depth) { // The depth output is always 2 registers after the last color output, and // current_reg already contains one past the last color register. - Emit(OpStore(frag_depth, SafeGetRegister(current_reg + 1))); + OpStore(frag_depth, SafeGetRegister(current_reg + 1)); } - break; - } } - - return {}; } - Id Exit(Operation operation) { + Expression Exit(Operation operation) { PreExit(); - inside_branch = conditional_nest_count; - if (conditional_nest_count > 0) { - Emit(OpReturn()); + inside_branch = true; + if (conditional_branch_set) { + OpReturn(); } else { const Id dummy = OpLabel(); - Emit(OpBranch(dummy)); - Emit(dummy); - Emit(OpReturn()); - Emit(OpLabel()); + OpBranch(dummy); + AddLabel(dummy); + OpReturn(); + AddLabel(); } return {}; } - Id Discard(Operation operation) { - inside_branch = conditional_nest_count; - if (conditional_nest_count > 0) { - Emit(OpKill()); + Expression Discard(Operation operation) { + inside_branch = true; + if (conditional_branch_set) { + OpKill(); } else { const Id dummy = OpLabel(); - Emit(OpBranch(dummy)); - Emit(dummy); - Emit(OpKill()); - Emit(OpLabel()); + OpBranch(dummy); + AddLabel(dummy); + OpKill(); + AddLabel(); } return {}; } - Id EmitVertex(Operation operation) { - UNIMPLEMENTED(); + Expression EmitVertex(Operation) { + OpEmitVertex(); return {}; } - Id EndPrimitive(Operation operation) { - UNIMPLEMENTED(); + Expression EndPrimitive(Operation operation) { + OpEndPrimitive(); return {}; } - Id YNegate(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression InvocationId(Operation) { + return {OpLoad(t_int, invocation_id), Type::Int}; } - template <u32 element> - Id LocalInvocationId(Operation) { - UNIMPLEMENTED(); - return {}; + Expression YNegate(Operation) { + LOG_WARNING(Render_Vulkan, "(STUBBED)"); + return {Constant(t_float, 1.0f), Type::Float}; } template <u32 element> - Id WorkGroupId(Operation) { - UNIMPLEMENTED(); - return {}; + Expression LocalInvocationId(Operation) { + const Id id = OpLoad(t_uint3, local_invocation_id); + return {OpCompositeExtract(t_uint, id, element), Type::Uint}; } - Id BallotThread(Operation) { - UNIMPLEMENTED(); - return {}; + template <u32 element> + Expression WorkGroupId(Operation operation) { + const Id id = OpLoad(t_uint3, workgroup_id); + return {OpCompositeExtract(t_uint, id, element), Type::Uint}; } - Id VoteAll(Operation) { - UNIMPLEMENTED(); - return {}; + Expression BallotThread(Operation operation) { + const Id predicate = AsBool(Visit(operation[0])); + const Id ballot = OpSubgroupBallotKHR(t_uint4, predicate); + + if (!device.IsWarpSizePotentiallyBiggerThanGuest()) { + // Guest-like devices can just return the first index. + return {OpCompositeExtract(t_uint, ballot, 0U), Type::Uint}; + } + + // The others will have to return what is local to the current thread. + // For instance a device with a warp size of 64 will return the upper uint when the current + // thread is 38. + const Id tid = OpLoad(t_uint, thread_id); + const Id thread_index = OpShiftRightLogical(t_uint, tid, Constant(t_uint, 5)); + return {OpVectorExtractDynamic(t_uint, ballot, thread_index), Type::Uint}; } - Id VoteAny(Operation) { - UNIMPLEMENTED(); - return {}; + template <Id (Module::*func)(Id, Id)> + Expression Vote(Operation operation) { + // TODO(Rodrigo): Handle devices with different warp sizes + const Id predicate = AsBool(Visit(operation[0])); + return {(this->*func)(t_bool, predicate), Type::Bool}; } - Id VoteEqual(Operation) { - UNIMPLEMENTED(); - return {}; + Expression ThreadId(Operation) { + return {OpLoad(t_uint, thread_id), Type::Uint}; } - Id ThreadId(Operation) { - UNIMPLEMENTED(); - return {}; + Expression ShuffleIndexed(Operation operation) { + const Id value = AsFloat(Visit(operation[0])); + const Id index = AsUint(Visit(operation[1])); + return {OpSubgroupReadInvocationKHR(t_float, value, index), Type::Float}; } - Id ShuffleIndexed(Operation) { - UNIMPLEMENTED(); + Expression MemoryBarrierGL(Operation) { + const auto scope = spv::Scope::Device; + const auto semantics = + spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory | + spv::MemorySemanticsMask::WorkgroupMemory | + spv::MemorySemanticsMask::AtomicCounterMemory | spv::MemorySemanticsMask::ImageMemory; + + OpMemoryBarrier(Constant(t_uint, static_cast<u32>(scope)), + Constant(t_uint, static_cast<u32>(semantics))); return {}; } - Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, - const std::string& name) { + Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, std::string name) { const Id id = OpVariable(type, storage); Decorate(id, spv::Decoration::BuiltIn, static_cast<u32>(builtin)); - AddGlobalVariable(Name(id, name)); + AddGlobalVariable(Name(id, std::move(name))); interfaces.push_back(id); return id; } + Id DeclareInputBuiltIn(spv::BuiltIn builtin, Id type, std::string name) { + return DeclareBuiltIn(builtin, spv::StorageClass::Input, type, std::move(name)); + } + bool IsRenderTargetUsed(u32 rt) const { for (u32 component = 0; component < 4; ++component) { if (header.ps.IsColorComponentOutputEnabled(rt, component)) { @@ -1237,66 +2012,148 @@ private: members.push_back(Constant(t_uint, element)); } - return Emit(OpAccessChain(pointer_type, composite, members)); + return OpAccessChain(pointer_type, composite, members); } - template <Type type> - Id VisitOperand(Operation operation, std::size_t operand_index) { - const Id value = Visit(operation[operand_index]); - - switch (type) { + Id As(Expression expr, Type wanted_type) { + switch (wanted_type) { case Type::Bool: + return AsBool(expr); case Type::Bool2: + return AsBool2(expr); case Type::Float: - return value; + return AsFloat(expr); case Type::Int: - return Emit(OpBitcast(t_int, value)); + return AsInt(expr); case Type::Uint: - return Emit(OpBitcast(t_uint, value)); + return AsUint(expr); case Type::HalfFloat: - UNIMPLEMENTED(); + return AsHalfFloat(expr); + default: + UNREACHABLE(); + return expr.id; } - UNREACHABLE(); - return value; } - template <Type type> - Id BitcastFrom(Id value) { - switch (type) { - case Type::Bool: - case Type::Bool2: + Id AsBool(Expression expr) { + ASSERT(expr.type == Type::Bool); + return expr.id; + } + + Id AsBool2(Expression expr) { + ASSERT(expr.type == Type::Bool2); + return expr.id; + } + + Id AsFloat(Expression expr) { + switch (expr.type) { case Type::Float: - return value; + return expr.id; case Type::Int: case Type::Uint: - return Emit(OpBitcast(t_float, value)); + return OpBitcast(t_float, expr.id); case Type::HalfFloat: - UNIMPLEMENTED(); + if (device.IsFloat16Supported()) { + return OpBitcast(t_float, expr.id); + } + return OpBitcast(t_float, OpPackHalf2x16(t_uint, expr.id)); + default: + UNREACHABLE(); + return expr.id; } - UNREACHABLE(); - return value; } - template <Type type> - Id BitcastTo(Id value) { - switch (type) { - case Type::Bool: - case Type::Bool2: + Id AsInt(Expression expr) { + switch (expr.type) { + case Type::Int: + return expr.id; + case Type::Float: + case Type::Uint: + return OpBitcast(t_int, expr.id); + case Type::HalfFloat: + if (device.IsFloat16Supported()) { + return OpBitcast(t_int, expr.id); + } + return OpPackHalf2x16(t_int, expr.id); + default: UNREACHABLE(); + return expr.id; + } + } + + Id AsUint(Expression expr) { + switch (expr.type) { + case Type::Uint: + return expr.id; case Type::Float: - return Emit(OpBitcast(t_float, value)); case Type::Int: - return Emit(OpBitcast(t_int, value)); - case Type::Uint: - return Emit(OpBitcast(t_uint, value)); + return OpBitcast(t_uint, expr.id); case Type::HalfFloat: - UNIMPLEMENTED(); + if (device.IsFloat16Supported()) { + return OpBitcast(t_uint, expr.id); + } + return OpPackHalf2x16(t_uint, expr.id); + default: + UNREACHABLE(); + return expr.id; + } + } + + Id AsHalfFloat(Expression expr) { + switch (expr.type) { + case Type::HalfFloat: + return expr.id; + case Type::Float: + case Type::Int: + case Type::Uint: + if (device.IsFloat16Supported()) { + return OpBitcast(t_half, expr.id); + } + return OpUnpackHalf2x16(t_half, AsUint(expr)); + default: + UNREACHABLE(); + return expr.id; + } + } + + Id GetHalfScalarFromFloat(Id value) { + if (device.IsFloat16Supported()) { + return OpFConvert(t_scalar_half, value); } - UNREACHABLE(); return value; } - Id GetTypeDefinition(Type type) { + Id GetFloatFromHalfScalar(Id value) { + if (device.IsFloat16Supported()) { + return OpFConvert(t_float, value); + } + return value; + } + + AttributeType GetAttributeType(u32 location) const { + if (stage != ShaderType::Vertex) { + return {Type::Float, t_in_float, t_in_float4}; + } + switch (specialization.attribute_types.at(location)) { + case Maxwell::VertexAttribute::Type::SignedNorm: + case Maxwell::VertexAttribute::Type::UnsignedNorm: + case Maxwell::VertexAttribute::Type::Float: + return {Type::Float, t_in_float, t_in_float4}; + case Maxwell::VertexAttribute::Type::SignedInt: + return {Type::Int, t_in_int, t_in_int4}; + case Maxwell::VertexAttribute::Type::UnsignedInt: + return {Type::Uint, t_in_uint, t_in_uint4}; + case Maxwell::VertexAttribute::Type::UnsignedScaled: + case Maxwell::VertexAttribute::Type::SignedScaled: + UNIMPLEMENTED(); + return {Type::Float, t_in_float, t_in_float4}; + default: + UNREACHABLE(); + return {Type::Float, t_in_float, t_in_float4}; + } + } + + Id GetTypeDefinition(Type type) const { switch (type) { case Type::Bool: return t_bool; @@ -1309,10 +2166,25 @@ private: case Type::Uint: return t_uint; case Type::HalfFloat: + return t_half; + default: + UNREACHABLE(); + return {}; + } + } + + std::array<Id, 4> GetTypeVectorDefinitionLut(Type type) const { + switch (type) { + case Type::Float: + return {nullptr, t_float2, t_float3, t_float4}; + case Type::Int: + return {nullptr, t_int2, t_int3, t_int4}; + case Type::Uint: + return {nullptr, t_uint2, t_uint3, t_uint4}; + default: UNIMPLEMENTED(); + return {}; } - UNREACHABLE(); - return {}; } std::tuple<Id, Id> CreateFlowStack() { @@ -1322,9 +2194,11 @@ private: constexpr auto storage_class = spv::StorageClass::Function; const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE)); - const Id stack = Emit(OpVariable(TypePointer(storage_class, flow_stack_type), storage_class, - ConstantNull(flow_stack_type))); - const Id top = Emit(OpVariable(t_func_uint, storage_class, Constant(t_uint, 0))); + const Id stack = OpVariable(TypePointer(storage_class, flow_stack_type), storage_class, + ConstantNull(flow_stack_type)); + const Id top = OpVariable(t_func_uint, storage_class, Constant(t_uint, 0)); + AddLocalVariable(stack); + AddLocalVariable(top); return std::tie(stack, top); } @@ -1353,8 +2227,8 @@ private: &SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>, &SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>, - &SPIRVDecompiler::FCastHalf0, - &SPIRVDecompiler::FCastHalf1, + &SPIRVDecompiler::FCastHalf<0>, + &SPIRVDecompiler::FCastHalf<1>, &SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>, @@ -1391,6 +2265,7 @@ private: &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Int>, &SPIRVDecompiler::Ternary<&Module::OpBitFieldSExtract, Type::Int>, &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpFindSMsb, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Uint>, @@ -1401,7 +2276,7 @@ private: &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Uint, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, - &SPIRVDecompiler::Binary<&Module::OpShiftRightArithmetic, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Uint>, @@ -1409,6 +2284,7 @@ private: &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Uint>, &SPIRVDecompiler::Ternary<&Module::OpBitFieldUExtract, Type::Uint>, &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpFindUMsb, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::HalfFloat>, &SPIRVDecompiler::Binary<&Module::OpFMul, Type::HalfFloat>, @@ -1419,8 +2295,8 @@ private: &SPIRVDecompiler::HCastFloat, &SPIRVDecompiler::HUnpack, &SPIRVDecompiler::HMergeF32, - &SPIRVDecompiler::HMergeH0, - &SPIRVDecompiler::HMergeH1, + &SPIRVDecompiler::HMergeHN<0>, + &SPIRVDecompiler::HMergeHN<1>, &SPIRVDecompiler::HPack2, &SPIRVDecompiler::LogicalAssign, @@ -1428,8 +2304,9 @@ private: &SPIRVDecompiler::Binary<&Module::OpLogicalOr, Type::Bool>, &SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>, &SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>, - &SPIRVDecompiler::LogicalPick2, - &SPIRVDecompiler::LogicalAnd2, + &SPIRVDecompiler::Binary<&Module::OpVectorExtractDynamic, Type::Bool, Type::Bool2, + Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpAll, Type::Bool, Type::Bool2>, &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>, @@ -1437,7 +2314,7 @@ private: &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::Float>, - &SPIRVDecompiler::Unary<&Module::OpIsNan, Type::Bool>, + &SPIRVDecompiler::Unary<&Module::OpIsNan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpSLessThan, Type::Bool, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Int>, @@ -1453,19 +2330,19 @@ private: &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>, - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>, // TODO(Rodrigo): Should these use the OpFUnord* variants? - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>, &SPIRVDecompiler::Texture, &SPIRVDecompiler::TextureLod, @@ -1473,6 +2350,7 @@ private: &SPIRVDecompiler::TextureQueryDimensions, &SPIRVDecompiler::TextureQueryLod, &SPIRVDecompiler::TexelFetch, + &SPIRVDecompiler::TextureGradient, &SPIRVDecompiler::ImageLoad, &SPIRVDecompiler::ImageStore, @@ -1492,6 +2370,7 @@ private: &SPIRVDecompiler::EmitVertex, &SPIRVDecompiler::EndPrimitive, + &SPIRVDecompiler::InvocationId, &SPIRVDecompiler::YNegate, &SPIRVDecompiler::LocalInvocationId<0>, &SPIRVDecompiler::LocalInvocationId<1>, @@ -1501,12 +2380,14 @@ private: &SPIRVDecompiler::WorkGroupId<2>, &SPIRVDecompiler::BallotThread, - &SPIRVDecompiler::VoteAll, - &SPIRVDecompiler::VoteAny, - &SPIRVDecompiler::VoteEqual, + &SPIRVDecompiler::Vote<&Module::OpSubgroupAllKHR>, + &SPIRVDecompiler::Vote<&Module::OpSubgroupAnyKHR>, + &SPIRVDecompiler::Vote<&Module::OpSubgroupAllEqualKHR>, &SPIRVDecompiler::ThreadId, &SPIRVDecompiler::ShuffleIndexed, + + &SPIRVDecompiler::MemoryBarrierGL, }; static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); @@ -1514,8 +2395,7 @@ private: const ShaderIR& ir; const ShaderType stage; const Tegra::Shader::Header header; - u64 conditional_nest_count{}; - u64 inside_branch{}; + const Specialization& specialization; const Id t_void = Name(TypeVoid(), "void"); @@ -1543,20 +2423,28 @@ private: const Id t_func_uint = Name(TypePointer(spv::StorageClass::Function, t_uint), "func_uint"); const Id t_in_bool = Name(TypePointer(spv::StorageClass::Input, t_bool), "in_bool"); + const Id t_in_int = Name(TypePointer(spv::StorageClass::Input, t_int), "in_int"); + const Id t_in_int4 = Name(TypePointer(spv::StorageClass::Input, t_int4), "in_int4"); const Id t_in_uint = Name(TypePointer(spv::StorageClass::Input, t_uint), "in_uint"); + const Id t_in_uint3 = Name(TypePointer(spv::StorageClass::Input, t_uint3), "in_uint3"); + const Id t_in_uint4 = Name(TypePointer(spv::StorageClass::Input, t_uint4), "in_uint4"); const Id t_in_float = Name(TypePointer(spv::StorageClass::Input, t_float), "in_float"); + const Id t_in_float2 = Name(TypePointer(spv::StorageClass::Input, t_float2), "in_float2"); + const Id t_in_float3 = Name(TypePointer(spv::StorageClass::Input, t_float3), "in_float3"); const Id t_in_float4 = Name(TypePointer(spv::StorageClass::Input, t_float4), "in_float4"); + const Id t_out_int = Name(TypePointer(spv::StorageClass::Output, t_int), "out_int"); + const Id t_out_float = Name(TypePointer(spv::StorageClass::Output, t_float), "out_float"); const Id t_out_float4 = Name(TypePointer(spv::StorageClass::Output, t_float4), "out_float4"); const Id t_cbuf_float = TypePointer(spv::StorageClass::Uniform, t_float); const Id t_cbuf_std140 = Decorate( - Name(TypeArray(t_float4, Constant(t_uint, MAX_CONSTBUFFER_ELEMENTS)), "CbufStd140Array"), - spv::Decoration::ArrayStride, 16u); + Name(TypeArray(t_float4, Constant(t_uint, MaxConstBufferElements)), "CbufStd140Array"), + spv::Decoration::ArrayStride, 16U); const Id t_cbuf_scalar = Decorate( - Name(TypeArray(t_float, Constant(t_uint, MAX_CONSTBUFFER_FLOATS)), "CbufScalarArray"), - spv::Decoration::ArrayStride, 4u); + Name(TypeArray(t_float, Constant(t_uint, MaxConstBufferFloats)), "CbufScalarArray"), + spv::Decoration::ArrayStride, 4U); const Id t_cbuf_std140_struct = MemberDecorate( Decorate(TypeStruct(t_cbuf_std140), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); const Id t_cbuf_scalar_struct = MemberDecorate( @@ -1564,28 +2452,43 @@ private: const Id t_cbuf_std140_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_std140_struct); const Id t_cbuf_scalar_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_scalar_struct); + Id t_smem_uint{}; + const Id t_gmem_float = TypePointer(spv::StorageClass::StorageBuffer, t_float); const Id t_gmem_array = - Name(Decorate(TypeRuntimeArray(t_float), spv::Decoration::ArrayStride, 4u), "GmemArray"); + Name(Decorate(TypeRuntimeArray(t_float), spv::Decoration::ArrayStride, 4U), "GmemArray"); const Id t_gmem_struct = MemberDecorate( Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); const Id v_float_zero = Constant(t_float, 0.0f); + const Id v_float_one = Constant(t_float, 1.0f); + + // Nvidia uses these defaults for varyings (e.g. position and generic attributes) + const Id v_varying_default = + ConstantComposite(t_float4, v_float_zero, v_float_zero, v_float_zero, v_float_one); + const Id v_true = ConstantTrue(t_bool); const Id v_false = ConstantFalse(t_bool); - Id per_vertex{}; + Id t_scalar_half{}; + Id t_half{}; + + Id out_vertex{}; + Id in_vertex{}; std::map<u32, Id> registers; std::map<Tegra::Shader::Pred, Id> predicates; std::map<u32, Id> flow_variables; Id local_memory{}; + Id shared_memory{}; std::array<Id, INTERNAL_FLAGS_COUNT> internal_flags{}; std::map<Attribute::Index, Id> input_attributes; std::map<Attribute::Index, Id> output_attributes; std::map<u32, Id> constant_buffers; std::map<GlobalMemoryBase, Id> global_buffers; - std::map<u32, SamplerImage> sampler_images; + std::map<u32, TexelBuffer> texel_buffers; + std::map<u32, SampledImage> sampled_images; + std::map<u32, StorageImage> images; Id instance_index{}; Id vertex_index{}; @@ -1593,18 +2496,20 @@ private: Id frag_depth{}; Id frag_coord{}; Id front_facing{}; - - u32 position_index{}; - u32 point_size_index{}; - u32 clip_distances_index{}; + Id point_coord{}; + Id tess_level_outer{}; + Id tess_level_inner{}; + Id tess_coord{}; + Id invocation_id{}; + Id workgroup_id{}; + Id local_invocation_id{}; + Id thread_id{}; + + VertexIndices in_indices; + VertexIndices out_indices; std::vector<Id> interfaces; - u32 const_buffers_base_binding{}; - u32 global_buffers_base_binding{}; - u32 samplers_base_binding{}; - - Id execute_function{}; Id jmp_to{}; Id ssy_flow_stack_top{}; Id pbk_flow_stack_top{}; @@ -1612,6 +2517,9 @@ private: Id pbk_flow_stack{}; Id continue_label{}; std::map<u32, Id> labels; + + bool conditional_branch_set{}; + bool inside_branch{}; }; class ExprDecompiler { @@ -1622,25 +2530,25 @@ public: const Id type_def = decomp.GetTypeDefinition(Type::Bool); const Id op1 = Visit(expr.operand1); const Id op2 = Visit(expr.operand2); - return decomp.Emit(decomp.OpLogicalAnd(type_def, op1, op2)); + return decomp.OpLogicalAnd(type_def, op1, op2); } Id operator()(const ExprOr& expr) { const Id type_def = decomp.GetTypeDefinition(Type::Bool); const Id op1 = Visit(expr.operand1); const Id op2 = Visit(expr.operand2); - return decomp.Emit(decomp.OpLogicalOr(type_def, op1, op2)); + return decomp.OpLogicalOr(type_def, op1, op2); } Id operator()(const ExprNot& expr) { const Id type_def = decomp.GetTypeDefinition(Type::Bool); const Id op1 = Visit(expr.operand1); - return decomp.Emit(decomp.OpLogicalNot(type_def, op1)); + return decomp.OpLogicalNot(type_def, op1); } Id operator()(const ExprPredicate& expr) { const auto pred = static_cast<Tegra::Shader::Pred>(expr.predicate); - return decomp.Emit(decomp.OpLoad(decomp.t_bool, decomp.predicates.at(pred))); + return decomp.OpLoad(decomp.t_bool, decomp.predicates.at(pred)); } Id operator()(const ExprCondCode& expr) { @@ -1662,12 +2570,15 @@ public: } } else if (const auto flag = std::get_if<InternalFlagNode>(&*cc)) { target = decomp.internal_flags.at(static_cast<u32>(flag->GetFlag())); + } else { + UNREACHABLE(); } - return decomp.Emit(decomp.OpLoad(decomp.t_bool, target)); + + return decomp.OpLoad(decomp.t_bool, target); } Id operator()(const ExprVar& expr) { - return decomp.Emit(decomp.OpLoad(decomp.t_bool, decomp.flow_variables.at(expr.var_index))); + return decomp.OpLoad(decomp.t_bool, decomp.flow_variables.at(expr.var_index)); } Id operator()(const ExprBoolean& expr) { @@ -1676,9 +2587,9 @@ public: Id operator()(const ExprGprEqual& expr) { const Id target = decomp.Constant(decomp.t_uint, expr.value); - const Id gpr = decomp.BitcastTo<Type::Uint>( - decomp.Emit(decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)))); - return decomp.Emit(decomp.OpLogicalEqual(decomp.t_uint, gpr, target)); + Id gpr = decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)); + gpr = decomp.OpBitcast(decomp.t_uint, gpr); + return decomp.OpLogicalEqual(decomp.t_uint, gpr, target); } Id Visit(const Expr& node) { @@ -1706,16 +2617,16 @@ public: const Id condition = expr_parser.Visit(ast.condition); const Id then_label = decomp.OpLabel(); const Id endif_label = decomp.OpLabel(); - decomp.Emit(decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone)); - decomp.Emit(decomp.OpBranchConditional(condition, then_label, endif_label)); - decomp.Emit(then_label); + decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + decomp.OpBranchConditional(condition, then_label, endif_label); + decomp.AddLabel(then_label); ASTNode current = ast.nodes.GetFirst(); while (current) { Visit(current); current = current->GetNext(); } - decomp.Emit(decomp.OpBranch(endif_label)); - decomp.Emit(endif_label); + decomp.OpBranch(endif_label); + decomp.AddLabel(endif_label); } void operator()([[maybe_unused]] const ASTIfElse& ast) { @@ -1733,7 +2644,7 @@ public: void operator()(const ASTVarSet& ast) { ExprDecompiler expr_parser{decomp}; const Id condition = expr_parser.Visit(ast.condition); - decomp.Emit(decomp.OpStore(decomp.flow_variables.at(ast.index), condition)); + decomp.OpStore(decomp.flow_variables.at(ast.index), condition); } void operator()([[maybe_unused]] const ASTLabel& ast) { @@ -1750,12 +2661,11 @@ public: const Id loop_start_block = decomp.OpLabel(); const Id loop_end_block = decomp.OpLabel(); current_loop_exit = endloop_label; - decomp.Emit(decomp.OpBranch(loop_label)); - decomp.Emit(loop_label); - decomp.Emit( - decomp.OpLoopMerge(endloop_label, loop_end_block, spv::LoopControlMask::MaskNone)); - decomp.Emit(decomp.OpBranch(loop_start_block)); - decomp.Emit(loop_start_block); + decomp.OpBranch(loop_label); + decomp.AddLabel(loop_label); + decomp.OpLoopMerge(endloop_label, loop_end_block, spv::LoopControlMask::MaskNone); + decomp.OpBranch(loop_start_block); + decomp.AddLabel(loop_start_block); ASTNode current = ast.nodes.GetFirst(); while (current) { Visit(current); @@ -1763,8 +2673,8 @@ public: } ExprDecompiler expr_parser{decomp}; const Id condition = expr_parser.Visit(ast.condition); - decomp.Emit(decomp.OpBranchConditional(condition, loop_label, endloop_label)); - decomp.Emit(endloop_label); + decomp.OpBranchConditional(condition, loop_label, endloop_label); + decomp.AddLabel(endloop_label); } void operator()(const ASTReturn& ast) { @@ -1773,27 +2683,27 @@ public: const Id condition = expr_parser.Visit(ast.condition); const Id then_label = decomp.OpLabel(); const Id endif_label = decomp.OpLabel(); - decomp.Emit(decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone)); - decomp.Emit(decomp.OpBranchConditional(condition, then_label, endif_label)); - decomp.Emit(then_label); + decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + decomp.OpBranchConditional(condition, then_label, endif_label); + decomp.AddLabel(then_label); if (ast.kills) { - decomp.Emit(decomp.OpKill()); + decomp.OpKill(); } else { decomp.PreExit(); - decomp.Emit(decomp.OpReturn()); + decomp.OpReturn(); } - decomp.Emit(endif_label); + decomp.AddLabel(endif_label); } else { const Id next_block = decomp.OpLabel(); - decomp.Emit(decomp.OpBranch(next_block)); - decomp.Emit(next_block); + decomp.OpBranch(next_block); + decomp.AddLabel(next_block); if (ast.kills) { - decomp.Emit(decomp.OpKill()); + decomp.OpKill(); } else { decomp.PreExit(); - decomp.Emit(decomp.OpReturn()); + decomp.OpReturn(); } - decomp.Emit(decomp.OpLabel()); + decomp.AddLabel(decomp.OpLabel()); } } @@ -1803,17 +2713,17 @@ public: const Id condition = expr_parser.Visit(ast.condition); const Id then_label = decomp.OpLabel(); const Id endif_label = decomp.OpLabel(); - decomp.Emit(decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone)); - decomp.Emit(decomp.OpBranchConditional(condition, then_label, endif_label)); - decomp.Emit(then_label); - decomp.Emit(decomp.OpBranch(current_loop_exit)); - decomp.Emit(endif_label); + decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + decomp.OpBranchConditional(condition, then_label, endif_label); + decomp.AddLabel(then_label); + decomp.OpBranch(current_loop_exit); + decomp.AddLabel(endif_label); } else { const Id next_block = decomp.OpLabel(); - decomp.Emit(decomp.OpBranch(next_block)); - decomp.Emit(next_block); - decomp.Emit(decomp.OpBranch(current_loop_exit)); - decomp.Emit(decomp.OpLabel()); + decomp.OpBranch(next_block); + decomp.AddLabel(next_block); + decomp.OpBranch(current_loop_exit); + decomp.AddLabel(decomp.OpLabel()); } } @@ -1834,20 +2744,51 @@ void SPIRVDecompiler::DecompileAST() { flow_variables.emplace(i, AddGlobalVariable(id)); } + DefinePrologue(); + const ASTNode program = ir.GetASTProgram(); ASTDecompiler decompiler{*this}; decompiler.Visit(program); const Id next_block = OpLabel(); - Emit(OpBranch(next_block)); - Emit(next_block); + OpBranch(next_block); + AddLabel(next_block); +} + +} // Anonymous namespace + +ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) { + ShaderEntries entries; + for (const auto& cbuf : ir.GetConstantBuffers()) { + entries.const_buffers.emplace_back(cbuf.second, cbuf.first); + } + for (const auto& [base, usage] : ir.GetGlobalMemory()) { + entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written); + } + for (const auto& sampler : ir.GetSamplers()) { + if (sampler.IsBuffer()) { + entries.texel_buffers.emplace_back(sampler); + } else { + entries.samplers.emplace_back(sampler); + } + } + for (const auto& image : ir.GetImages()) { + entries.images.emplace_back(image); + } + for (const auto& attribute : ir.GetInputAttributes()) { + if (IsGenericAttribute(attribute)) { + entries.attributes.insert(GetGenericAttributeLocation(attribute)); + } + } + entries.clip_distances = ir.GetClipDistances(); + entries.shader_length = ir.GetLength(); + entries.uses_warps = ir.UsesWarps(); + return entries; } -DecompilerResult Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, - ShaderType stage) { - auto decompiler = std::make_unique<SPIRVDecompiler>(device, ir, stage); - decompiler->Decompile(); - return {std::move(decompiler), decompiler->GetShaderEntries()}; +std::vector<u32> Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, + ShaderType stage, const Specialization& specialization) { + return SPIRVDecompiler(device, ir, stage, specialization).Assemble(); } -} // namespace Vulkan::VKShader +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h index 203fc00d0..2b01321b6 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h @@ -5,29 +5,28 @@ #pragma once #include <array> +#include <bitset> #include <memory> #include <set> +#include <type_traits> #include <utility> #include <vector> -#include <sirit/sirit.h> - #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/shader/shader_ir.h" -namespace VideoCommon::Shader { -class ShaderIR; -} - namespace Vulkan { class VKDevice; } -namespace Vulkan::VKShader { +namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using TexelBufferEntry = VideoCommon::Shader::Sampler; using SamplerEntry = VideoCommon::Shader::Sampler; +using ImageEntry = VideoCommon::Shader::Image; constexpr u32 DESCRIPTOR_SET = 0; @@ -46,39 +45,74 @@ private: class GlobalBufferEntry { public: - explicit GlobalBufferEntry(u32 cbuf_index, u32 cbuf_offset) - : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset} {} + constexpr explicit GlobalBufferEntry(u32 cbuf_index, u32 cbuf_offset, bool is_written) + : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_written{is_written} {} - u32 GetCbufIndex() const { + constexpr u32 GetCbufIndex() const { return cbuf_index; } - u32 GetCbufOffset() const { + constexpr u32 GetCbufOffset() const { return cbuf_offset; } + constexpr bool IsWritten() const { + return is_written; + } + private: u32 cbuf_index{}; u32 cbuf_offset{}; + bool is_written{}; }; struct ShaderEntries { - u32 const_buffers_base_binding{}; - u32 global_buffers_base_binding{}; - u32 samplers_base_binding{}; + u32 NumBindings() const { + return static_cast<u32>(const_buffers.size() + global_buffers.size() + + texel_buffers.size() + samplers.size() + images.size()); + } + std::vector<ConstBufferEntry> const_buffers; std::vector<GlobalBufferEntry> global_buffers; + std::vector<TexelBufferEntry> texel_buffers; std::vector<SamplerEntry> samplers; + std::vector<ImageEntry> images; std::set<u32> attributes; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; std::size_t shader_length{}; - Sirit::Id entry_function{}; - std::vector<Sirit::Id> interfaces; + bool uses_warps{}; +}; + +struct Specialization final { + u32 base_binding{}; + + // Compute specific + std::array<u32, 3> workgroup_size{}; + u32 shared_memory_size{}; + + // Graphics specific + Maxwell::PrimitiveTopology primitive_topology{}; + std::optional<float> point_size{}; + std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{}; + + // Tessellation specific + struct { + Maxwell::TessellationPrimitive primitive{}; + Maxwell::TessellationSpacing spacing{}; + bool clockwise{}; + } tessellation; +}; +// Old gcc versions don't consider this trivially copyable. +// static_assert(std::is_trivially_copyable_v<Specialization>); + +struct SPIRVShader { + std::vector<u32> code; + ShaderEntries entries; }; -using DecompilerResult = std::pair<std::unique_ptr<Sirit::Module>, ShaderEntries>; +ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir); -DecompilerResult Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, - Tegra::Engines::ShaderType stage); +std::vector<u32> Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, + Tegra::Engines::ShaderType stage, const Specialization& specialization); -} // namespace Vulkan::VKShader +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp index 08279e562..ebc68f030 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.cpp +++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp @@ -19,12 +19,18 @@ namespace Vulkan { namespace { -vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats) { + +vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats, + bool srgb) { if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) { - return {vk::Format::eB8G8R8A8Unorm, vk::ColorSpaceKHR::eSrgbNonlinear}; + vk::SurfaceFormatKHR format; + format.format = vk::Format::eB8G8R8A8Unorm; + format.colorSpace = vk::ColorSpaceKHR::eSrgbNonlinear; + return format; } - const auto& found = std::find_if(formats.begin(), formats.end(), [](const auto& format) { - return format.format == vk::Format::eB8G8R8A8Unorm && + const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) { + const auto request_format = srgb ? vk::Format::eB8G8R8A8Srgb : vk::Format::eB8G8R8A8Unorm; + return format.format == request_format && format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear; }); return found != formats.end() ? *found : formats[0]; @@ -51,28 +57,26 @@ vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u3 std::min(capabilities.maxImageExtent.height, extent.height)); return extent; } -} // namespace + +} // Anonymous namespace VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device) : surface{surface}, device{device} {} VKSwapchain::~VKSwapchain() = default; -void VKSwapchain::Create(u32 width, u32 height) { - const auto dev = device.GetLogical(); +void VKSwapchain::Create(u32 width, u32 height, bool srgb) { const auto& dld = device.GetDispatchLoader(); const auto physical_device = device.GetPhysical(); - - const vk::SurfaceCapabilitiesKHR capabilities{ - physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; + const auto capabilities{physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) { return; } - dev.waitIdle(dld); + device.GetLogical().waitIdle(dld); Destroy(); - CreateSwapchain(capabilities, width, height); + CreateSwapchain(capabilities, width, height, srgb); CreateSemaphores(); CreateImageViews(); @@ -107,7 +111,7 @@ bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) { break; case vk::Result::eErrorOutOfDateKHR: if (current_width > 0 && current_height > 0) { - Create(current_width, current_height); + Create(current_width, current_height, current_srgb); recreated = true; } break; @@ -129,23 +133,19 @@ bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebu } void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, - u32 height) { - const auto dev{device.GetLogical()}; + u32 height, bool srgb) { const auto& dld{device.GetDispatchLoader()}; const auto physical_device{device.GetPhysical()}; + const auto formats{physical_device.getSurfaceFormatsKHR(surface, dld)}; + const auto present_modes{physical_device.getSurfacePresentModesKHR(surface, dld)}; - const std::vector<vk::SurfaceFormatKHR> formats{ - physical_device.getSurfaceFormatsKHR(surface, dld)}; - - const std::vector<vk::PresentModeKHR> present_modes{ - physical_device.getSurfacePresentModesKHR(surface, dld)}; - - const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats)}; + const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)}; const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)}; extent = ChooseSwapExtent(capabilities, width, height); current_width = extent.width; current_height = extent.height; + current_srgb = srgb; u32 requested_image_count{capabilities.minImageCount + 1}; if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { @@ -169,6 +169,7 @@ void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive; } + const auto dev{device.GetLogical()}; swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld); images = dev.getSwapchainImagesKHR(*swapchain, dld); diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h index 2ad84f185..a1e7938d2 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.h +++ b/src/video_core/renderer_vulkan/vk_swapchain.h @@ -24,7 +24,7 @@ public: ~VKSwapchain(); /// Creates (or recreates) the swapchain with a given size. - void Create(u32 width, u32 height); + void Create(u32 width, u32 height, bool srgb); /// Acquires the next image in the swapchain, waits as needed. void AcquireNextImage(); @@ -60,8 +60,13 @@ public: return image_format; } + bool GetSrgbState() const { + return current_srgb; + } + private: - void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height); + void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height, + bool srgb); void CreateSemaphores(); void CreateImageViews(); @@ -87,6 +92,7 @@ private: u32 current_width{}; u32 current_height{}; + bool current_srgb{}; }; } // namespace Vulkan diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp index a33d242e9..371fae127 100644 --- a/src/video_core/shader/decode/arithmetic_integer.cpp +++ b/src/video_core/shader/decode/arithmetic_integer.cpp @@ -130,6 +130,25 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) { SetRegister(bb, instr.gpr0, value); break; } + case OpCode::Id::FLO_R: + case OpCode::Id::FLO_C: + case OpCode::Id::FLO_IMM: { + Node value; + if (instr.flo.invert) { + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, std::move(op_b)); + } + if (instr.flo.is_signed) { + value = Operation(OperationCode::IBitMSB, NO_PRECISE, std::move(op_b)); + } else { + value = Operation(OperationCode::UBitMSB, NO_PRECISE, std::move(op_b)); + } + if (instr.flo.sh) { + value = + Operation(OperationCode::UBitwiseXor, NO_PRECISE, std::move(value), Immediate(31)); + } + SetRegister(bb, instr.gpr0, std::move(value)); + break; + } case OpCode::Id::SEL_C: case OpCode::Id::SEL_R: case OpCode::Id::SEL_IMM: { diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 335d78146..78e92f52e 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -21,6 +21,7 @@ using Tegra::Shader::OpCode; using Tegra::Shader::Register; namespace { + u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) { switch (uniform_type) { case Tegra::Shader::UniformType::Single: @@ -35,6 +36,7 @@ u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) { return 1; } } + } // Anonymous namespace u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { @@ -196,28 +198,28 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, "Unaligned attribute loads are not supported"); - u64 next_element = instr.attribute.fmt20.element; - auto next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); + u64 element = instr.attribute.fmt20.element; + auto index = static_cast<u64>(instr.attribute.fmt20.index.Value()); - const auto StoreNextElement = [&](u32 reg_offset) { - const auto dest = GetOutputAttribute(static_cast<Attribute::Index>(next_index), - next_element, GetRegister(instr.gpr39)); + const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; + for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { + Node dest; + if (instr.attribute.fmt20.patch) { + const u32 offset = static_cast<u32>(index) * 4 + static_cast<u32>(element); + dest = MakeNode<PatchNode>(offset); + } else { + dest = GetOutputAttribute(static_cast<Attribute::Index>(index), element, + GetRegister(instr.gpr39)); + } const auto src = GetRegister(instr.gpr0.Value() + reg_offset); bb.push_back(Operation(OperationCode::Assign, dest, src)); - // Load the next attribute element into the following register. If the element - // to load goes beyond the vec4 size, load the first element of the next - // attribute. - next_element = (next_element + 1) % 4; - next_index = next_index + (next_element == 0 ? 1 : 0); - }; - - const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; - for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { - StoreNextElement(reg_offset); + // Load the next attribute element into the following register. If the element to load + // goes beyond the vec4 size, load the first element of the next attribute. + element = (element + 1) % 4; + index = index + (element == 0 ? 1 : 0); } - break; } case OpCode::Id::ST_L: diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp index 17cd45d3c..7321698b2 100644 --- a/src/video_core/shader/decode/other.cpp +++ b/src/video_core/shader/decode/other.cpp @@ -69,6 +69,8 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { case OpCode::Id::MOV_SYS: { const Node value = [this, instr] { switch (instr.sys20) { + case SystemVariable::InvocationId: + return Operation(OperationCode::InvocationId); case SystemVariable::Ydirection: return Operation(OperationCode::YNegate); case SystemVariable::InvocationInfo: @@ -255,6 +257,12 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { SetRegister(bb, instr.gpr0, GetRegister(instr.gpr8)); break; } + case OpCode::Id::MEMBAR: { + UNIMPLEMENTED_IF(instr.membar.type != Tegra::Shader::MembarType::GL); + UNIMPLEMENTED_IF(instr.membar.unknown != Tegra::Shader::MembarUnknown::Default); + bb.push_back(Operation(OperationCode::MemoryBarrierGL)); + break; + } case OpCode::Id::DEPBAR: { LOG_DEBUG(HW_GPU, "DEPBAR instruction is stubbed"); break; diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp index b094e5a06..994c05611 100644 --- a/src/video_core/shader/decode/texture.cpp +++ b/src/video_core/shader/decode/texture.cpp @@ -107,8 +107,8 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { break; } case OpCode::Id::TLD4S: { - UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), - "AOFFI is not implemented"); + const bool uses_aoffi = instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI); + UNIMPLEMENTED_IF_MSG(uses_aoffi, "AOFFI is not implemented"); const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); const Node op_a = GetRegister(instr.gpr8); @@ -116,29 +116,86 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. std::vector<Node> coords; + Node dc_reg; if (depth_compare) { // Note: TLD4S coordinate encoding works just like TEXS's const Node op_y = GetRegister(instr.gpr8.Value() + 1); coords.push_back(op_a); coords.push_back(op_y); - coords.push_back(op_b); + dc_reg = uses_aoffi ? GetRegister(instr.gpr20.Value() + 1) : op_b; } else { coords.push_back(op_a); - coords.push_back(op_b); + if (uses_aoffi) { + const Node op_y = GetRegister(instr.gpr8.Value() + 1); + coords.push_back(op_y); + } else { + coords.push_back(op_b); + } + dc_reg = {}; } const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); const SamplerInfo info{TextureType::Texture2D, false, depth_compare}; - const auto& sampler = GetSampler(instr.sampler, info); + const Sampler& sampler = *GetSampler(instr.sampler, info); Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, {}, {}, {}, {}, {}, component, element}; + MetaTexture meta{sampler, {}, dc_reg, {}, {}, {}, {}, component, element}; values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } - WriteTexsInstructionFloat(bb, instr, values, true); + if (instr.tld4s.fp16_flag) { + WriteTexsInstructionHalfFloat(bb, instr, values, true); + } else { + WriteTexsInstructionFloat(bb, instr, values, true); + } + break; + } + case OpCode::Id::TXD_B: + is_bindless = true; + [[fallthrough]]; + case OpCode::Id::TXD: { + UNIMPLEMENTED_IF_MSG(instr.txd.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.txd.is_array != 0, "TXD Array is not implemented"); + + u64 base_reg = instr.gpr8.Value(); + const auto derivate_reg = instr.gpr20.Value(); + const auto texture_type = instr.txd.texture_type.Value(); + const auto coord_count = GetCoordCount(texture_type); + + const Sampler* sampler = is_bindless + ? GetBindlessSampler(base_reg, {{texture_type, false, false}}) + : GetSampler(instr.sampler, {{texture_type, false, false}}); + Node4 values; + if (sampler == nullptr) { + for (u32 element = 0; element < values.size(); ++element) { + values[element] = Immediate(0); + } + WriteTexInstructionFloat(bb, instr, values); + break; + } + if (is_bindless) { + base_reg++; + } + + std::vector<Node> coords; + std::vector<Node> derivates; + for (std::size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(base_reg + i)); + const std::size_t derivate = i * 2; + derivates.push_back(GetRegister(derivate_reg + derivate)); + derivates.push_back(GetRegister(derivate_reg + derivate + 1)); + } + + for (u32 element = 0; element < values.size(); ++element) { + MetaTexture meta{*sampler, {}, {}, {}, derivates, {}, {}, {}, element}; + values[element] = Operation(OperationCode::TextureGradient, std::move(meta), coords); + } + + WriteTexInstructionFloat(bb, instr, values); + break; } case OpCode::Id::TXQ_B: @@ -148,9 +205,24 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { // TODO: The new commits on the texture refactor, change the way samplers work. // Sadly, not all texture instructions specify the type of texture their sampler // uses. This must be fixed at a later instance. - const auto& sampler = + const Sampler* sampler = is_bindless ? GetBindlessSampler(instr.gpr8) : GetSampler(instr.sampler); + if (sampler == nullptr) { + u32 indexer = 0; + for (u32 element = 0; element < 4; ++element) { + if (!instr.txq.IsComponentEnabled(element)) { + continue; + } + const Node value = Immediate(0); + SetTemporary(bb, indexer++, value); + } + for (u32 i = 0; i < indexer; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); + } + break; + } + u32 indexer = 0; switch (instr.txq.query_type) { case Tegra::Shader::TextureQueryType::Dimension: { @@ -158,7 +230,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { if (!instr.txq.IsComponentEnabled(element)) { continue; } - MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0))); @@ -184,9 +256,24 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { auto texture_type = instr.tmml.texture_type.Value(); const bool is_array = instr.tmml.array != 0; - const auto& sampler = + const Sampler* sampler = is_bindless ? GetBindlessSampler(instr.gpr20) : GetSampler(instr.sampler); + if (sampler == nullptr) { + u32 indexer = 0; + for (u32 element = 0; element < 2; ++element) { + if (!instr.tmml.IsComponentEnabled(element)) { + continue; + } + const Node value = Immediate(0); + SetTemporary(bb, indexer++, value); + } + for (u32 i = 0; i < indexer; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); + } + break; + } + std::vector<Node> coords; // TODO: Add coordinates for different samplers once other texture types are implemented. @@ -212,7 +299,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { continue; } auto params = coords; - MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); SetTemporary(bb, indexer++, value); } @@ -268,7 +355,7 @@ ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sample sampler->is_buffer != 0}; } -const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, +const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, std::optional<SamplerInfo> sampler_info) { const auto offset = static_cast<u32>(sampler.index.Value()); const auto info = GetSamplerInfo(sampler_info, offset); @@ -280,21 +367,24 @@ const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, if (it != used_samplers.end()) { ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); - return *it; + return &(*it); } // Otherwise create a new mapping for this sampler const auto next_index = static_cast<u32>(used_samplers.size()); - return used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow, - info.is_buffer); + return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow, + info.is_buffer); } -const Sampler& ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, +const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, std::optional<SamplerInfo> sampler_info) { const Node sampler_register = GetRegister(reg); const auto [base_sampler, buffer, offset] = TrackCbuf(sampler_register, global_code, static_cast<s64>(global_code.size())); ASSERT(base_sampler != nullptr); + if (base_sampler == nullptr) { + return nullptr; + } const auto info = GetSamplerInfo(sampler_info, offset, buffer); @@ -307,13 +397,13 @@ const Sampler& ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, if (it != used_samplers.end()) { ASSERT(it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow); - return *it; + return &(*it); } // Otherwise create a new mapping for this sampler const auto next_index = static_cast<u32>(used_samplers.size()); - return used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array, - info.is_shadow, info.is_buffer); + return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array, + info.is_shadow, info.is_buffer); } void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { @@ -356,14 +446,14 @@ void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, const } void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, - const Node4& components) { + const Node4& components, bool ignore_mask) { // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half // float instruction). Node4 values; u32 dest_elem = 0; for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) + if (!instr.texs.IsComponentEnabled(component) && !ignore_mask) continue; values[dest_elem++] = components[component]; } @@ -399,8 +489,15 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, "This method is not supported."); const SamplerInfo info{texture_type, is_array, is_shadow, false}; - const auto& sampler = + const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, info) : GetSampler(instr.sampler, info); + Node4 values; + if (sampler == nullptr) { + for (u32 element = 0; element < values.size(); ++element) { + values[element] = Immediate(0); + } + return values; + } const bool lod_needed = process_mode == TextureProcessMode::LZ || process_mode == TextureProcessMode::LL || @@ -439,10 +536,9 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, } } - Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto copy_coords = coords; - MetaTexture meta{sampler, array, depth_compare, aoffi, bias, lod, {}, element}; + MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, bias, lod, {}, element}; values[element] = Operation(read_method, meta, std::move(copy_coords)); } @@ -555,8 +651,15 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de u64 parameter_register = instr.gpr20.Value(); const SamplerInfo info{texture_type, is_array, depth_compare, false}; - const auto& sampler = is_bindless ? GetBindlessSampler(parameter_register++, info) - : GetSampler(instr.sampler, info); + const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, info) + : GetSampler(instr.sampler, info); + Node4 values; + if (sampler == nullptr) { + for (u32 element = 0; element < values.size(); ++element) { + values[element] = Immediate(0); + } + return values; + } std::vector<Node> aoffi; if (is_aoffi) { @@ -571,10 +674,9 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de const Node component = is_bindless ? Immediate(static_cast<u32>(instr.tld4_b.component)) : Immediate(static_cast<u32>(instr.tld4.component)); - Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, GetRegister(array_register), dc, aoffi, {}, {}, component, + MetaTexture meta{*sampler, GetRegister(array_register), dc, aoffi, {}, {}, {}, component, element}; values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } @@ -603,12 +705,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) { // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr}; // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr}; - const auto& sampler = GetSampler(instr.sampler); + const auto& sampler = *GetSampler(instr.sampler); Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, array_register, {}, {}, {}, lod, {}, element}; + MetaTexture meta{sampler, array_register, {}, {}, {}, {}, lod, {}, element}; values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); } @@ -616,7 +718,7 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) { } Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { - const auto& sampler = GetSampler(instr.sampler); + const Sampler& sampler = *GetSampler(instr.sampler); const std::size_t type_coord_count = GetCoordCount(texture_type); const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; @@ -653,7 +755,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, array, {}, {}, {}, lod, {}, element}; + MetaTexture meta{sampler, array, {}, {}, {}, {}, lod, {}, element}; values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); } return values; diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp index d98d0e1dd..11b77f795 100644 --- a/src/video_core/shader/decode/warp.cpp +++ b/src/video_core/shader/decode/warp.cpp @@ -38,6 +38,9 @@ u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); + // Signal the backend that this shader uses warp instructions. + uses_warps = true; + switch (opcode->get().GetId()) { case OpCode::Id::VOTE: { const Node value = GetPredicate(instr.vote.value, instr.vote.negate_value != 0); diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index 44d85d434..abd40f582 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h @@ -68,6 +68,7 @@ enum class OperationCode { IBitfieldInsert, /// (MetaArithmetic, int base, int insert, int offset, int bits) -> int IBitfieldExtract, /// (MetaArithmetic, int value, int offset, int offset) -> int IBitCount, /// (MetaArithmetic, int) -> int + IBitMSB, /// (MetaArithmetic, int) -> int UAdd, /// (MetaArithmetic, uint a, uint b) -> uint UMul, /// (MetaArithmetic, uint a, uint b) -> uint @@ -86,6 +87,7 @@ enum class OperationCode { UBitfieldInsert, /// (MetaArithmetic, uint base, uint insert, int offset, int bits) -> uint UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint UBitCount, /// (MetaArithmetic, uint) -> uint + UBitMSB, /// (MetaArithmetic, uint) -> uint HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 @@ -149,6 +151,7 @@ enum class OperationCode { TextureQueryDimensions, /// (MetaTexture, float a) -> float4 TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4 TexelFetch, /// (MetaTexture, int[N], int) -> float4 + TextureGradient, /// (MetaTexture, float[N] coords, float[N*2] derivates) -> float4 ImageLoad, /// (MetaImage, int[N] coords) -> void ImageStore, /// (MetaImage, int[N] coords) -> void @@ -169,6 +172,7 @@ enum class OperationCode { EmitVertex, /// () -> void EndPrimitive, /// () -> void + InvocationId, /// () -> int YNegate, /// () -> float LocalInvocationIdX, /// () -> uint LocalInvocationIdY, /// () -> uint @@ -185,6 +189,8 @@ enum class OperationCode { ThreadId, /// () -> uint ShuffleIndexed, /// (uint value, uint index) -> uint + MemoryBarrierGL, /// () -> void + Amount, }; @@ -210,13 +216,14 @@ class PredicateNode; class AbufNode; class CbufNode; class LmemNode; +class PatchNode; class SmemNode; class GmemNode; class CommentNode; -using NodeData = - std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, InternalFlagNode, - PredicateNode, AbufNode, CbufNode, LmemNode, SmemNode, GmemNode, CommentNode>; +using NodeData = std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, + InternalFlagNode, PredicateNode, AbufNode, PatchNode, CbufNode, + LmemNode, SmemNode, GmemNode, CommentNode>; using Node = std::shared_ptr<NodeData>; using Node4 = std::array<Node, 4>; using NodeBlock = std::vector<Node>; @@ -367,6 +374,7 @@ struct MetaTexture { Node array; Node depth_compare; std::vector<Node> aoffi; + std::vector<Node> derivates; Node bias; Node lod; Node component{}; @@ -538,6 +546,19 @@ private: u32 element{}; }; +/// Patch memory (used to communicate tessellation stages). +class PatchNode final { +public: + explicit PatchNode(u32 offset) : offset{offset} {} + + u32 GetOffset() const { + return offset; + } + +private: + u32 offset{}; +}; + /// Constant buffer node, usually mapped to uniform buffers in GLSL class CbufNode final { public: diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index 2f71a50d2..04ae5f822 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -137,6 +137,10 @@ public: return uses_vertex_id; } + bool UsesWarps() const { + return uses_warps; + } + bool HasPhysicalAttributes() const { return uses_physical_attributes; } @@ -309,11 +313,11 @@ private: std::optional<u32> buffer = std::nullopt); /// Accesses a texture sampler - const Sampler& GetSampler(const Tegra::Shader::Sampler& sampler, + const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler, std::optional<SamplerInfo> sampler_info = std::nullopt); /// Accesses a texture sampler for a bindless texture. - const Sampler& GetBindlessSampler(Tegra::Shader::Register reg, + const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, std::optional<SamplerInfo> sampler_info = std::nullopt); /// Accesses an image. @@ -334,7 +338,7 @@ private: void WriteTexsInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, const Node4& components, bool ignore_mask = false); void WriteTexsInstructionHalfFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, - const Node4& components); + const Node4& components, bool ignore_mask = false); Node4 GetTexCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, @@ -415,6 +419,7 @@ private: bool uses_physical_attributes{}; // Shader uses AL2P or physical attribute read/writes bool uses_instance_id{}; bool uses_vertex_id{}; + bool uses_warps{}; Tegra::Shader::Header header; }; diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index 55f5949e4..165c79330 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -7,6 +7,7 @@ #include <variant> #include "common/common_types.h" +#include "video_core/shader/node.h" #include "video_core/shader/shader_ir.h" namespace VideoCommon::Shader { diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index 1bed82898..5f79bb0aa 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h @@ -254,16 +254,14 @@ public: if (!layer_mipmap) { return {}; } - const u32 end_layer{layer_mipmap->first}; - const u32 end_mipmap{layer_mipmap->second}; + const auto [end_layer, end_mipmap] = *layer_mipmap; if (layer != end_layer) { if (mipmap == 0 && end_mipmap == 0) { - return GetView(ViewParams(view_params.target, layer, end_layer - layer + 1, 0, 1)); + return GetView(ViewParams(view_params.target, layer, end_layer - layer, 0, 1)); } return {}; } else { - return GetView( - ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap + 1)); + return GetView(ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap)); } } @@ -278,8 +276,7 @@ public: if (!layer_mipmap) { return {}; } - const u32 layer{layer_mipmap->first}; - const u32 mipmap{layer_mipmap->second}; + const auto [layer, mipmap] = *layer_mipmap; if (GetMipmapSize(mipmap) != candidate_size) { return EmplaceIrregularView(view_params, view_addr, candidate_size, mipmap, layer); } diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index 858e17e08..a4f1edd9a 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp @@ -246,6 +246,16 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface( return params; } +VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( + const VideoCommon::Shader::Sampler& entry) { + return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); +} + +VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( + const VideoCommon::Shader::Image& entry) { + return ImageTypeToSurfaceTarget(entry.GetType()); +} + bool SurfaceParams::IsLayered() const { switch (target) { case SurfaceTarget::Texture1DArray: diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index 709aa0dc2..129817ad3 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h @@ -45,6 +45,14 @@ public: static SurfaceParams CreateForFermiCopySurface( const Tegra::Engines::Fermi2D::Regs::Surface& config); + /// Obtains the texture target from a shader's sampler entry. + static VideoCore::Surface::SurfaceTarget ExpectedTarget( + const VideoCommon::Shader::Sampler& entry); + + /// Obtains the texture target from a shader's sampler entry. + static VideoCore::Surface::SurfaceTarget ExpectedTarget( + const VideoCommon::Shader::Image& entry); + std::size_t Hash() const { return static_cast<std::size_t>( Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this))); diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 41309ebea..02d2e9136 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -95,10 +95,16 @@ public: std::lock_guard lock{mutex}; const auto gpu_addr{tic.Address()}; if (!gpu_addr) { - return {}; + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); + } + + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; - const auto [surface, view] = GetSurface(gpu_addr, params, true, false); + const auto [surface, view] = GetSurface(gpu_addr, cache_addr, params, true, false); if (guard_samplers) { sampled_textures.push_back(surface); } @@ -110,10 +116,15 @@ public: std::lock_guard lock{mutex}; const auto gpu_addr{tic.Address()}; if (!gpu_addr) { - return {}; + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); + } + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; - const auto [surface, view] = GetSurface(gpu_addr, params, true, false); + const auto [surface, view] = GetSurface(gpu_addr, cache_addr, params, true, false); if (guard_samplers) { sampled_textures.push_back(surface); } @@ -143,11 +154,17 @@ public: SetEmptyDepthBuffer(); return {}; } + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + SetEmptyDepthBuffer(); + return {}; + } const auto depth_params{SurfaceParams::CreateForDepthBuffer( system, regs.zeta_width, regs.zeta_height, regs.zeta.format, regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height, regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)}; - auto surface_view = GetSurface(gpu_addr, depth_params, preserve_contents, true); + auto surface_view = GetSurface(gpu_addr, cache_addr, depth_params, preserve_contents, true); if (depth_buffer.target) depth_buffer.target->MarkAsRenderTarget(false, NO_RT); depth_buffer.target = surface_view.first; @@ -180,8 +197,16 @@ public: return {}; } - auto surface_view = GetSurface(gpu_addr, SurfaceParams::CreateForFramebuffer(system, index), - preserve_contents, true); + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + SetEmptyColorBuffer(index); + return {}; + } + + auto surface_view = + GetSurface(gpu_addr, cache_addr, SurfaceParams::CreateForFramebuffer(system, index), + preserve_contents, true); if (render_targets[index].target) render_targets[index].target->MarkAsRenderTarget(false, NO_RT); render_targets[index].target = surface_view.first; @@ -230,8 +255,14 @@ public: const GPUVAddr src_gpu_addr = src_config.Address(); const GPUVAddr dst_gpu_addr = dst_config.Address(); DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr); - std::pair<TSurface, TView> dst_surface = GetSurface(dst_gpu_addr, dst_params, true, false); - std::pair<TSurface, TView> src_surface = GetSurface(src_gpu_addr, src_params, true, false); + const auto dst_host_ptr{system.GPU().MemoryManager().GetPointer(dst_gpu_addr)}; + const auto dst_cache_addr{ToCacheAddr(dst_host_ptr)}; + const auto src_host_ptr{system.GPU().MemoryManager().GetPointer(src_gpu_addr)}; + const auto src_cache_addr{ToCacheAddr(src_host_ptr)}; + std::pair<TSurface, TView> dst_surface = + GetSurface(dst_gpu_addr, dst_cache_addr, dst_params, true, false); + std::pair<TSurface, TView> src_surface = + GetSurface(src_gpu_addr, src_cache_addr, src_params, true, false); ImageBlit(src_surface.second, dst_surface.second, copy_config); dst_surface.first->MarkAsModified(true, Tick()); } @@ -347,13 +378,6 @@ protected: return new_surface; } - std::pair<TSurface, TView> GetFermiSurface( - const Tegra::Engines::Fermi2D::Regs::Surface& config) { - SurfaceParams params = SurfaceParams::CreateForFermiCopySurface(config); - const GPUVAddr gpu_addr = config.Address(); - return GetSurface(gpu_addr, params, true, false); - } - Core::System& system; private: @@ -614,22 +638,9 @@ private: * left blank. * @param is_render Whether or not the surface is a render target. **/ - std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, - bool preserve_contents, bool is_render) { - const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; - const auto cache_addr{ToCacheAddr(host_ptr)}; - - // Step 0: guarantee a valid surface - if (!cache_addr) { - // Return a null surface if it's invalid - SurfaceParams new_params = params; - new_params.width = 1; - new_params.height = 1; - new_params.depth = 1; - new_params.block_height = 0; - new_params.block_depth = 0; - return InitializeSurface(gpu_addr, new_params, false); - } + std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const CacheAddr cache_addr, + const SurfaceParams& params, bool preserve_contents, + bool is_render) { // Step 1 // Check Level 1 Cache for a fast structural match. If candidate surface @@ -794,6 +805,41 @@ private: } /** + * Gets a null surface based on a target texture. + * @param target The target of the null surface. + */ + TView GetNullSurface(SurfaceTarget target) { + const u32 i_target = static_cast<u32>(target); + if (const auto it = invalid_cache.find(i_target); it != invalid_cache.end()) { + return it->second->GetMainView(); + } + SurfaceParams params{}; + params.target = target; + params.is_tiled = false; + params.srgb_conversion = false; + params.is_layered = false; + params.block_width = 0; + params.block_height = 0; + params.block_depth = 0; + params.tile_width_spacing = 1; + params.width = 1; + params.height = 1; + params.depth = 1; + params.pitch = 4; + params.num_levels = 1; + params.emulated_levels = 1; + params.pixel_format = VideoCore::Surface::PixelFormat::RGBA16F; + params.type = VideoCore::Surface::SurfaceType::ColorTexture; + auto surface = CreateSurface(0ULL, params); + invalid_memory.clear(); + invalid_memory.resize(surface->GetHostSizeInBytes(), 0U); + surface->UploadTexture(invalid_memory); + surface->MarkAsModified(false, Tick()); + invalid_cache.emplace(i_target, surface); + return surface->GetMainView(); + } + + /** * Gets the a source and destination starting address and parameters, * and tries to deduce if they are supposed to be depth textures. If so, their * parameters are modified and fixed into so. @@ -991,6 +1037,11 @@ private: std::vector<TSurface> sampled_textures; + /// This cache stores null surfaces in order to be used as a placeholder + /// for invalid texture calls. + std::unordered_map<u32, TSurface> invalid_cache; + std::vector<u8> invalid_memory; + StagingCache staging_cache; std::recursive_mutex mutex; }; |