summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler/backend/spirv
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend/spirv')
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.cpp1368
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.h307
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.cpp541
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.h27
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp448
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_barriers.cpp38
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp66
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp155
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp505
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp28
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp269
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp396
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp462
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp183
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h579
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp270
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp26
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp275
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_select.cpp42
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_shared_memory.cpp174
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_special.cpp150
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp30
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp203
23 files changed, 6542 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
new file mode 100644
index 000000000..2d29d8c14
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -0,0 +1,1368 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <array>
+#include <climits>
+#include <string_view>
+
+#include <fmt/format.h>
+
+#include "common/common_types.h"
+#include "common/div_ceil.h"
+#include "shader_recompiler/backend/spirv/emit_context.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+enum class Operation {
+ Increment,
+ Decrement,
+ FPAdd,
+ FPMin,
+ FPMax,
+};
+
+struct AttrInfo {
+ Id pointer;
+ Id id;
+ bool needs_cast;
+};
+
+Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) {
+ const spv::ImageFormat format{spv::ImageFormat::Unknown};
+ const Id type{ctx.F32[1]};
+ const bool depth{desc.is_depth};
+ switch (desc.type) {
+ case TextureType::Color1D:
+ return ctx.TypeImage(type, spv::Dim::Dim1D, depth, false, false, 1, format);
+ case TextureType::ColorArray1D:
+ return ctx.TypeImage(type, spv::Dim::Dim1D, depth, true, false, 1, format);
+ case TextureType::Color2D:
+ return ctx.TypeImage(type, spv::Dim::Dim2D, depth, false, false, 1, format);
+ case TextureType::ColorArray2D:
+ return ctx.TypeImage(type, spv::Dim::Dim2D, depth, true, false, 1, format);
+ case TextureType::Color3D:
+ return ctx.TypeImage(type, spv::Dim::Dim3D, depth, false, false, 1, format);
+ case TextureType::ColorCube:
+ return ctx.TypeImage(type, spv::Dim::Cube, depth, false, false, 1, format);
+ case TextureType::ColorArrayCube:
+ return ctx.TypeImage(type, spv::Dim::Cube, depth, true, false, 1, format);
+ case TextureType::Buffer:
+ break;
+ }
+ throw InvalidArgument("Invalid texture type {}", desc.type);
+}
+
+spv::ImageFormat GetImageFormat(ImageFormat format) {
+ switch (format) {
+ case ImageFormat::Typeless:
+ return spv::ImageFormat::Unknown;
+ case ImageFormat::R8_UINT:
+ return spv::ImageFormat::R8ui;
+ case ImageFormat::R8_SINT:
+ return spv::ImageFormat::R8i;
+ case ImageFormat::R16_UINT:
+ return spv::ImageFormat::R16ui;
+ case ImageFormat::R16_SINT:
+ return spv::ImageFormat::R16i;
+ case ImageFormat::R32_UINT:
+ return spv::ImageFormat::R32ui;
+ case ImageFormat::R32G32_UINT:
+ return spv::ImageFormat::Rg32ui;
+ case ImageFormat::R32G32B32A32_UINT:
+ return spv::ImageFormat::Rgba32ui;
+ }
+ throw InvalidArgument("Invalid image format {}", format);
+}
+
+Id ImageType(EmitContext& ctx, const ImageDescriptor& desc) {
+ const spv::ImageFormat format{GetImageFormat(desc.format)};
+ const Id type{ctx.U32[1]};
+ switch (desc.type) {
+ case TextureType::Color1D:
+ return ctx.TypeImage(type, spv::Dim::Dim1D, false, false, false, 2, format);
+ case TextureType::ColorArray1D:
+ return ctx.TypeImage(type, spv::Dim::Dim1D, false, true, false, 2, format);
+ case TextureType::Color2D:
+ return ctx.TypeImage(type, spv::Dim::Dim2D, false, false, false, 2, format);
+ case TextureType::ColorArray2D:
+ return ctx.TypeImage(type, spv::Dim::Dim2D, false, true, false, 2, format);
+ case TextureType::Color3D:
+ return ctx.TypeImage(type, spv::Dim::Dim3D, false, false, false, 2, format);
+ case TextureType::Buffer:
+ throw NotImplementedException("Image buffer");
+ default:
+ break;
+ }
+ throw InvalidArgument("Invalid texture type {}", desc.type);
+}
+
+Id DefineVariable(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin,
+ spv::StorageClass storage_class) {
+ const Id pointer_type{ctx.TypePointer(storage_class, type)};
+ const Id id{ctx.AddGlobalVariable(pointer_type, storage_class)};
+ if (builtin) {
+ ctx.Decorate(id, spv::Decoration::BuiltIn, *builtin);
+ }
+ ctx.interfaces.push_back(id);
+ return id;
+}
+
+u32 NumVertices(InputTopology input_topology) {
+ switch (input_topology) {
+ case InputTopology::Points:
+ return 1;
+ case InputTopology::Lines:
+ return 2;
+ case InputTopology::LinesAdjacency:
+ return 4;
+ case InputTopology::Triangles:
+ return 3;
+ case InputTopology::TrianglesAdjacency:
+ return 6;
+ }
+ throw InvalidArgument("Invalid input topology {}", input_topology);
+}
+
+Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
+ std::optional<spv::BuiltIn> builtin = std::nullopt) {
+ switch (ctx.stage) {
+ case Stage::TessellationControl:
+ case Stage::TessellationEval:
+ if (per_invocation) {
+ type = ctx.TypeArray(type, ctx.Const(32u));
+ }
+ break;
+ case Stage::Geometry:
+ if (per_invocation) {
+ const u32 num_vertices{NumVertices(ctx.runtime_info.input_topology)};
+ type = ctx.TypeArray(type, ctx.Const(num_vertices));
+ }
+ break;
+ default:
+ break;
+ }
+ return DefineVariable(ctx, type, builtin, spv::StorageClass::Input);
+}
+
+Id DefineOutput(EmitContext& ctx, Id type, std::optional<u32> invocations,
+ std::optional<spv::BuiltIn> builtin = std::nullopt) {
+ if (invocations && ctx.stage == Stage::TessellationControl) {
+ type = ctx.TypeArray(type, ctx.Const(*invocations));
+ }
+ return DefineVariable(ctx, type, builtin, spv::StorageClass::Output);
+}
+
+void DefineGenericOutput(EmitContext& ctx, size_t index, std::optional<u32> invocations) {
+ static constexpr std::string_view swizzle{"xyzw"};
+ const size_t base_attr_index{static_cast<size_t>(IR::Attribute::Generic0X) + index * 4};
+ u32 element{0};
+ while (element < 4) {
+ const u32 remainder{4 - element};
+ const TransformFeedbackVarying* xfb_varying{};
+ if (!ctx.runtime_info.xfb_varyings.empty()) {
+ xfb_varying = &ctx.runtime_info.xfb_varyings[base_attr_index + element];
+ xfb_varying = xfb_varying && xfb_varying->components > 0 ? xfb_varying : nullptr;
+ }
+ const u32 num_components{xfb_varying ? xfb_varying->components : remainder};
+
+ const Id id{DefineOutput(ctx, ctx.F32[num_components], invocations)};
+ ctx.Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+ if (element > 0) {
+ ctx.Decorate(id, spv::Decoration::Component, element);
+ }
+ if (xfb_varying) {
+ ctx.Decorate(id, spv::Decoration::XfbBuffer, xfb_varying->buffer);
+ ctx.Decorate(id, spv::Decoration::XfbStride, xfb_varying->stride);
+ ctx.Decorate(id, spv::Decoration::Offset, xfb_varying->offset);
+ }
+ if (num_components < 4 || element > 0) {
+ const std::string_view subswizzle{swizzle.substr(element, num_components)};
+ ctx.Name(id, fmt::format("out_attr{}_{}", index, subswizzle));
+ } else {
+ ctx.Name(id, fmt::format("out_attr{}", index));
+ }
+ const GenericElementInfo info{
+ .id = id,
+ .first_element = element,
+ .num_components = num_components,
+ };
+ std::fill_n(ctx.output_generics[index].begin() + element, num_components, info);
+ element += num_components;
+ }
+}
+
+Id GetAttributeType(EmitContext& ctx, AttributeType type) {
+ switch (type) {
+ case AttributeType::Float:
+ return ctx.F32[4];
+ case AttributeType::SignedInt:
+ return ctx.TypeVector(ctx.TypeInt(32, true), 4);
+ case AttributeType::UnsignedInt:
+ return ctx.U32[4];
+ case AttributeType::Disabled:
+ break;
+ }
+ throw InvalidArgument("Invalid attribute type {}", type);
+}
+
+std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
+ const AttributeType type{ctx.runtime_info.generic_input_types.at(index)};
+ switch (type) {
+ case AttributeType::Float:
+ return AttrInfo{ctx.input_f32, ctx.F32[1], false};
+ case AttributeType::UnsignedInt:
+ return AttrInfo{ctx.input_u32, ctx.U32[1], true};
+ case AttributeType::SignedInt:
+ return AttrInfo{ctx.input_s32, ctx.TypeInt(32, true), true};
+ case AttributeType::Disabled:
+ return std::nullopt;
+ }
+ throw InvalidArgument("Invalid attribute type {}", type);
+}
+
+std::string_view StageName(Stage stage) {
+ switch (stage) {
+ case Stage::VertexA:
+ return "vs_a";
+ case Stage::VertexB:
+ return "vs";
+ case Stage::TessellationControl:
+ return "tcs";
+ case Stage::TessellationEval:
+ return "tes";
+ case Stage::Geometry:
+ return "gs";
+ case Stage::Fragment:
+ return "fs";
+ case Stage::Compute:
+ return "cs";
+ }
+ throw InvalidArgument("Invalid stage {}", stage);
+}
+
+template <typename... Args>
+void Name(EmitContext& ctx, Id object, std::string_view format_str, Args&&... args) {
+ ctx.Name(object, fmt::format(fmt::runtime(format_str), StageName(ctx.stage),
+ std::forward<Args>(args)...)
+ .c_str());
+}
+
+void DefineConstBuffers(EmitContext& ctx, const Info& info, Id UniformDefinitions::*member_type,
+ u32 binding, Id type, char type_char, u32 element_size) {
+ const Id array_type{ctx.TypeArray(type, ctx.Const(65536U / element_size))};
+ ctx.Decorate(array_type, spv::Decoration::ArrayStride, element_size);
+
+ const Id struct_type{ctx.TypeStruct(array_type)};
+ Name(ctx, struct_type, "{}_cbuf_block_{}{}", ctx.stage, type_char, element_size * CHAR_BIT);
+ ctx.Decorate(struct_type, spv::Decoration::Block);
+ ctx.MemberName(struct_type, 0, "data");
+ ctx.MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
+
+ const Id struct_pointer_type{ctx.TypePointer(spv::StorageClass::Uniform, struct_type)};
+ const Id uniform_type{ctx.TypePointer(spv::StorageClass::Uniform, type)};
+ ctx.uniform_types.*member_type = uniform_type;
+
+ for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
+ const Id id{ctx.AddGlobalVariable(struct_pointer_type, spv::StorageClass::Uniform)};
+ ctx.Decorate(id, spv::Decoration::Binding, binding);
+ ctx.Decorate(id, spv::Decoration::DescriptorSet, 0U);
+ ctx.Name(id, fmt::format("c{}", desc.index));
+ for (size_t i = 0; i < desc.count; ++i) {
+ ctx.cbufs[desc.index + i].*member_type = id;
+ }
+ if (ctx.profile.supported_spirv >= 0x00010400) {
+ ctx.interfaces.push_back(id);
+ }
+ binding += desc.count;
+ }
+}
+
+void DefineSsbos(EmitContext& ctx, StorageTypeDefinition& type_def,
+ Id StorageDefinitions::*member_type, const Info& info, u32 binding, Id type,
+ u32 stride) {
+ const Id array_type{ctx.TypeRuntimeArray(type)};
+ ctx.Decorate(array_type, spv::Decoration::ArrayStride, stride);
+
+ const Id struct_type{ctx.TypeStruct(array_type)};
+ ctx.Decorate(struct_type, spv::Decoration::Block);
+ ctx.MemberDecorate(struct_type, 0, spv::Decoration::Offset, 0U);
+
+ const Id struct_pointer{ctx.TypePointer(spv::StorageClass::StorageBuffer, struct_type)};
+ type_def.array = struct_pointer;
+ type_def.element = ctx.TypePointer(spv::StorageClass::StorageBuffer, type);
+
+ u32 index{};
+ for (const StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
+ const Id id{ctx.AddGlobalVariable(struct_pointer, spv::StorageClass::StorageBuffer)};
+ ctx.Decorate(id, spv::Decoration::Binding, binding);
+ ctx.Decorate(id, spv::Decoration::DescriptorSet, 0U);
+ ctx.Name(id, fmt::format("ssbo{}", index));
+ if (ctx.profile.supported_spirv >= 0x00010400) {
+ ctx.interfaces.push_back(id);
+ }
+ for (size_t i = 0; i < desc.count; ++i) {
+ ctx.ssbos[index + i].*member_type = id;
+ }
+ index += desc.count;
+ binding += desc.count;
+ }
+}
+
+Id CasFunction(EmitContext& ctx, Operation operation, Id value_type) {
+ const Id func_type{ctx.TypeFunction(value_type, value_type, value_type)};
+ const Id func{ctx.OpFunction(value_type, spv::FunctionControlMask::MaskNone, func_type)};
+ const Id op_a{ctx.OpFunctionParameter(value_type)};
+ const Id op_b{ctx.OpFunctionParameter(value_type)};
+ ctx.AddLabel();
+ Id result{};
+ switch (operation) {
+ case Operation::Increment: {
+ const Id pred{ctx.OpUGreaterThanEqual(ctx.U1, op_a, op_b)};
+ const Id incr{ctx.OpIAdd(value_type, op_a, ctx.Constant(value_type, 1))};
+ result = ctx.OpSelect(value_type, pred, ctx.u32_zero_value, incr);
+ break;
+ }
+ case Operation::Decrement: {
+ const Id lhs{ctx.OpIEqual(ctx.U1, op_a, ctx.Constant(value_type, 0u))};
+ const Id rhs{ctx.OpUGreaterThan(ctx.U1, op_a, op_b)};
+ const Id pred{ctx.OpLogicalOr(ctx.U1, lhs, rhs)};
+ const Id decr{ctx.OpISub(value_type, op_a, ctx.Constant(value_type, 1))};
+ result = ctx.OpSelect(value_type, pred, op_b, decr);
+ break;
+ }
+ case Operation::FPAdd:
+ result = ctx.OpFAdd(value_type, op_a, op_b);
+ break;
+ case Operation::FPMin:
+ result = ctx.OpFMin(value_type, op_a, op_b);
+ break;
+ case Operation::FPMax:
+ result = ctx.OpFMax(value_type, op_a, op_b);
+ break;
+ default:
+ break;
+ }
+ ctx.OpReturnValue(result);
+ ctx.OpFunctionEnd();
+ return func;
+}
+
+Id CasLoop(EmitContext& ctx, Operation operation, Id array_pointer, Id element_pointer,
+ Id value_type, Id memory_type, spv::Scope scope) {
+ const bool is_shared{scope == spv::Scope::Workgroup};
+ const bool is_struct{!is_shared || ctx.profile.support_explicit_workgroup_layout};
+ const Id cas_func{CasFunction(ctx, operation, value_type)};
+ const Id zero{ctx.u32_zero_value};
+ const Id scope_id{ctx.Const(static_cast<u32>(scope))};
+
+ const Id loop_header{ctx.OpLabel()};
+ const Id continue_block{ctx.OpLabel()};
+ const Id merge_block{ctx.OpLabel()};
+ const Id func_type{is_shared
+ ? ctx.TypeFunction(value_type, ctx.U32[1], value_type)
+ : ctx.TypeFunction(value_type, ctx.U32[1], value_type, array_pointer)};
+
+ const Id func{ctx.OpFunction(value_type, spv::FunctionControlMask::MaskNone, func_type)};
+ const Id index{ctx.OpFunctionParameter(ctx.U32[1])};
+ const Id op_b{ctx.OpFunctionParameter(value_type)};
+ const Id base{is_shared ? ctx.shared_memory_u32 : ctx.OpFunctionParameter(array_pointer)};
+ ctx.AddLabel();
+ ctx.OpBranch(loop_header);
+ ctx.AddLabel(loop_header);
+
+ ctx.OpLoopMerge(merge_block, continue_block, spv::LoopControlMask::MaskNone);
+ ctx.OpBranch(continue_block);
+
+ ctx.AddLabel(continue_block);
+ const Id word_pointer{is_struct ? ctx.OpAccessChain(element_pointer, base, zero, index)
+ : ctx.OpAccessChain(element_pointer, base, index)};
+ if (value_type.value == ctx.F32[2].value) {
+ const Id u32_value{ctx.OpLoad(ctx.U32[1], word_pointer)};
+ const Id value{ctx.OpUnpackHalf2x16(ctx.F32[2], u32_value)};
+ const Id new_value{ctx.OpFunctionCall(value_type, cas_func, value, op_b)};
+ const Id u32_new_value{ctx.OpPackHalf2x16(ctx.U32[1], new_value)};
+ const Id atomic_res{ctx.OpAtomicCompareExchange(ctx.U32[1], word_pointer, scope_id, zero,
+ zero, u32_new_value, u32_value)};
+ const Id success{ctx.OpIEqual(ctx.U1, atomic_res, u32_value)};
+ ctx.OpBranchConditional(success, merge_block, loop_header);
+
+ ctx.AddLabel(merge_block);
+ ctx.OpReturnValue(ctx.OpUnpackHalf2x16(ctx.F32[2], atomic_res));
+ } else {
+ const Id value{ctx.OpLoad(memory_type, word_pointer)};
+ const bool matching_type{value_type.value == memory_type.value};
+ const Id bitcast_value{matching_type ? value : ctx.OpBitcast(value_type, value)};
+ const Id cal_res{ctx.OpFunctionCall(value_type, cas_func, bitcast_value, op_b)};
+ const Id new_value{matching_type ? cal_res : ctx.OpBitcast(memory_type, cal_res)};
+ const Id atomic_res{ctx.OpAtomicCompareExchange(ctx.U32[1], word_pointer, scope_id, zero,
+ zero, new_value, value)};
+ const Id success{ctx.OpIEqual(ctx.U1, atomic_res, value)};
+ ctx.OpBranchConditional(success, merge_block, loop_header);
+
+ ctx.AddLabel(merge_block);
+ ctx.OpReturnValue(ctx.OpBitcast(value_type, atomic_res));
+ }
+ ctx.OpFunctionEnd();
+ return func;
+}
+
+template <typename Desc>
+std::string NameOf(Stage stage, const Desc& desc, std::string_view prefix) {
+ if (desc.count > 1) {
+ return fmt::format("{}_{}{}_{:02x}x{}", StageName(stage), prefix, desc.cbuf_index,
+ desc.cbuf_offset, desc.count);
+ } else {
+ return fmt::format("{}_{}{}_{:02x}", StageName(stage), prefix, desc.cbuf_index,
+ desc.cbuf_offset);
+ }
+}
+
+Id DescType(EmitContext& ctx, Id sampled_type, Id pointer_type, u32 count) {
+ if (count > 1) {
+ const Id array_type{ctx.TypeArray(sampled_type, ctx.Const(count))};
+ return ctx.TypePointer(spv::StorageClass::UniformConstant, array_type);
+ } else {
+ return pointer_type;
+ }
+}
+} // Anonymous namespace
+
+void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) {
+ defs[0] = sirit_ctx.Name(base_type, name);
+
+ std::array<char, 6> def_name;
+ for (int i = 1; i < 4; ++i) {
+ const std::string_view def_name_view(
+ def_name.data(),
+ fmt::format_to_n(def_name.data(), def_name.size(), "{}x{}", name, i + 1).size);
+ defs[static_cast<size_t>(i)] =
+ sirit_ctx.Name(sirit_ctx.TypeVector(base_type, i + 1), def_name_view);
+ }
+}
+
+EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_info_,
+ IR::Program& program, Bindings& bindings)
+ : Sirit::Module(profile_.supported_spirv), profile{profile_},
+ runtime_info{runtime_info_}, stage{program.stage} {
+ const bool is_unified{profile.unified_descriptor_binding};
+ u32& uniform_binding{is_unified ? bindings.unified : bindings.uniform_buffer};
+ u32& storage_binding{is_unified ? bindings.unified : bindings.storage_buffer};
+ u32& texture_binding{is_unified ? bindings.unified : bindings.texture};
+ u32& image_binding{is_unified ? bindings.unified : bindings.image};
+ AddCapability(spv::Capability::Shader);
+ DefineCommonTypes(program.info);
+ DefineCommonConstants();
+ DefineInterfaces(program);
+ DefineLocalMemory(program);
+ DefineSharedMemory(program);
+ DefineSharedMemoryFunctions(program);
+ DefineConstantBuffers(program.info, uniform_binding);
+ DefineStorageBuffers(program.info, storage_binding);
+ DefineTextureBuffers(program.info, texture_binding);
+ DefineImageBuffers(program.info, image_binding);
+ DefineTextures(program.info, texture_binding);
+ DefineImages(program.info, image_binding);
+ DefineAttributeMemAccess(program.info);
+ DefineGlobalMemoryFunctions(program.info);
+}
+
+EmitContext::~EmitContext() = default;
+
+Id EmitContext::Def(const IR::Value& value) {
+ if (!value.IsImmediate()) {
+ return value.InstRecursive()->Definition<Id>();
+ }
+ switch (value.Type()) {
+ case IR::Type::Void:
+ // Void instructions are used for optional arguments (e.g. texture offsets)
+ // They are not meant to be used in the SPIR-V module
+ return Id{};
+ case IR::Type::U1:
+ return value.U1() ? true_value : false_value;
+ case IR::Type::U32:
+ return Const(value.U32());
+ case IR::Type::U64:
+ return Constant(U64, value.U64());
+ case IR::Type::F32:
+ return Const(value.F32());
+ case IR::Type::F64:
+ return Constant(F64[1], value.F64());
+ default:
+ throw NotImplementedException("Immediate type {}", value.Type());
+ }
+}
+
+Id EmitContext::BitOffset8(const IR::Value& offset) {
+ if (offset.IsImmediate()) {
+ return Const((offset.U32() % 4) * 8);
+ }
+ return OpBitwiseAnd(U32[1], OpShiftLeftLogical(U32[1], Def(offset), Const(3u)), Const(24u));
+}
+
+Id EmitContext::BitOffset16(const IR::Value& offset) {
+ if (offset.IsImmediate()) {
+ return Const(((offset.U32() / 2) % 2) * 16);
+ }
+ return OpBitwiseAnd(U32[1], OpShiftLeftLogical(U32[1], Def(offset), Const(3u)), Const(16u));
+}
+
+void EmitContext::DefineCommonTypes(const Info& info) {
+ void_id = TypeVoid();
+
+ U1 = Name(TypeBool(), "u1");
+
+ F32.Define(*this, TypeFloat(32), "f32");
+ U32.Define(*this, TypeInt(32, false), "u32");
+ S32.Define(*this, TypeInt(32, true), "s32");
+
+ private_u32 = Name(TypePointer(spv::StorageClass::Private, U32[1]), "private_u32");
+
+ input_f32 = Name(TypePointer(spv::StorageClass::Input, F32[1]), "input_f32");
+ input_u32 = Name(TypePointer(spv::StorageClass::Input, U32[1]), "input_u32");
+ input_s32 = Name(TypePointer(spv::StorageClass::Input, TypeInt(32, true)), "input_s32");
+
+ output_f32 = Name(TypePointer(spv::StorageClass::Output, F32[1]), "output_f32");
+ output_u32 = Name(TypePointer(spv::StorageClass::Output, U32[1]), "output_u32");
+
+ if (info.uses_int8 && profile.support_int8) {
+ AddCapability(spv::Capability::Int8);
+ U8 = Name(TypeInt(8, false), "u8");
+ S8 = Name(TypeInt(8, true), "s8");
+ }
+ if (info.uses_int16 && profile.support_int16) {
+ AddCapability(spv::Capability::Int16);
+ U16 = Name(TypeInt(16, false), "u16");
+ S16 = Name(TypeInt(16, true), "s16");
+ }
+ if (info.uses_int64) {
+ AddCapability(spv::Capability::Int64);
+ U64 = Name(TypeInt(64, false), "u64");
+ }
+ if (info.uses_fp16) {
+ AddCapability(spv::Capability::Float16);
+ F16.Define(*this, TypeFloat(16), "f16");
+ }
+ if (info.uses_fp64) {
+ AddCapability(spv::Capability::Float64);
+ F64.Define(*this, TypeFloat(64), "f64");
+ }
+}
+
+void EmitContext::DefineCommonConstants() {
+ true_value = ConstantTrue(U1);
+ false_value = ConstantFalse(U1);
+ u32_zero_value = Const(0U);
+ f32_zero_value = Const(0.0f);
+}
+
+void EmitContext::DefineInterfaces(const IR::Program& program) {
+ DefineInputs(program);
+ DefineOutputs(program);
+}
+
+void EmitContext::DefineLocalMemory(const IR::Program& program) {
+ if (program.local_memory_size == 0) {
+ return;
+ }
+ const u32 num_elements{Common::DivCeil(program.local_memory_size, 4U)};
+ const Id type{TypeArray(U32[1], Const(num_elements))};
+ const Id pointer{TypePointer(spv::StorageClass::Private, type)};
+ local_memory = AddGlobalVariable(pointer, spv::StorageClass::Private);
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(local_memory);
+ }
+}
+
+void EmitContext::DefineSharedMemory(const IR::Program& program) {
+ if (program.shared_memory_size == 0) {
+ return;
+ }
+ const auto make{[&](Id element_type, u32 element_size) {
+ const u32 num_elements{Common::DivCeil(program.shared_memory_size, element_size)};
+ const Id array_type{TypeArray(element_type, Const(num_elements))};
+ Decorate(array_type, spv::Decoration::ArrayStride, element_size);
+
+ const Id struct_type{TypeStruct(array_type)};
+ MemberDecorate(struct_type, 0U, spv::Decoration::Offset, 0U);
+ Decorate(struct_type, spv::Decoration::Block);
+
+ const Id pointer{TypePointer(spv::StorageClass::Workgroup, struct_type)};
+ const Id element_pointer{TypePointer(spv::StorageClass::Workgroup, element_type)};
+ const Id variable{AddGlobalVariable(pointer, spv::StorageClass::Workgroup)};
+ Decorate(variable, spv::Decoration::Aliased);
+ interfaces.push_back(variable);
+
+ return std::make_tuple(variable, element_pointer, pointer);
+ }};
+ if (profile.support_explicit_workgroup_layout) {
+ AddExtension("SPV_KHR_workgroup_memory_explicit_layout");
+ AddCapability(spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
+ if (program.info.uses_int8) {
+ AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR);
+ std::tie(shared_memory_u8, shared_u8, std::ignore) = make(U8, 1);
+ }
+ if (program.info.uses_int16) {
+ AddCapability(spv::Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
+ std::tie(shared_memory_u16, shared_u16, std::ignore) = make(U16, 2);
+ }
+ if (program.info.uses_int64) {
+ std::tie(shared_memory_u64, shared_u64, std::ignore) = make(U64, 8);
+ }
+ std::tie(shared_memory_u32, shared_u32, shared_memory_u32_type) = make(U32[1], 4);
+ std::tie(shared_memory_u32x2, shared_u32x2, std::ignore) = make(U32[2], 8);
+ std::tie(shared_memory_u32x4, shared_u32x4, std::ignore) = make(U32[4], 16);
+ return;
+ }
+ const u32 num_elements{Common::DivCeil(program.shared_memory_size, 4U)};
+ const Id type{TypeArray(U32[1], Const(num_elements))};
+ shared_memory_u32_type = TypePointer(spv::StorageClass::Workgroup, type);
+
+ shared_u32 = TypePointer(spv::StorageClass::Workgroup, U32[1]);
+ shared_memory_u32 = AddGlobalVariable(shared_memory_u32_type, spv::StorageClass::Workgroup);
+ interfaces.push_back(shared_memory_u32);
+
+ const Id func_type{TypeFunction(void_id, U32[1], U32[1])};
+ const auto make_function{[&](u32 mask, u32 size) {
+ const Id loop_header{OpLabel()};
+ const Id continue_block{OpLabel()};
+ const Id merge_block{OpLabel()};
+
+ const Id func{OpFunction(void_id, spv::FunctionControlMask::MaskNone, func_type)};
+ const Id offset{OpFunctionParameter(U32[1])};
+ const Id insert_value{OpFunctionParameter(U32[1])};
+ AddLabel();
+ OpBranch(loop_header);
+
+ AddLabel(loop_header);
+ const Id word_offset{OpShiftRightArithmetic(U32[1], offset, Const(2U))};
+ const Id shift_offset{OpShiftLeftLogical(U32[1], offset, Const(3U))};
+ const Id bit_offset{OpBitwiseAnd(U32[1], shift_offset, Const(mask))};
+ const Id count{Const(size)};
+ OpLoopMerge(merge_block, continue_block, spv::LoopControlMask::MaskNone);
+ OpBranch(continue_block);
+
+ AddLabel(continue_block);
+ const Id word_pointer{OpAccessChain(shared_u32, shared_memory_u32, word_offset)};
+ const Id old_value{OpLoad(U32[1], word_pointer)};
+ const Id new_value{OpBitFieldInsert(U32[1], old_value, insert_value, bit_offset, count)};
+ const Id atomic_res{OpAtomicCompareExchange(U32[1], word_pointer, Const(1U), u32_zero_value,
+ u32_zero_value, new_value, old_value)};
+ const Id success{OpIEqual(U1, atomic_res, old_value)};
+ OpBranchConditional(success, merge_block, loop_header);
+
+ AddLabel(merge_block);
+ OpReturn();
+ OpFunctionEnd();
+ return func;
+ }};
+ if (program.info.uses_int8) {
+ shared_store_u8_func = make_function(24, 8);
+ }
+ if (program.info.uses_int16) {
+ shared_store_u16_func = make_function(16, 16);
+ }
+}
+
+void EmitContext::DefineSharedMemoryFunctions(const IR::Program& program) {
+ if (program.info.uses_shared_increment) {
+ increment_cas_shared = CasLoop(*this, Operation::Increment, shared_memory_u32_type,
+ shared_u32, U32[1], U32[1], spv::Scope::Workgroup);
+ }
+ if (program.info.uses_shared_decrement) {
+ decrement_cas_shared = CasLoop(*this, Operation::Decrement, shared_memory_u32_type,
+ shared_u32, U32[1], U32[1], spv::Scope::Workgroup);
+ }
+}
+
+void EmitContext::DefineAttributeMemAccess(const Info& info) {
+ const auto make_load{[&] {
+ const bool is_array{stage == Stage::Geometry};
+ const Id end_block{OpLabel()};
+ const Id default_label{OpLabel()};
+
+ const Id func_type_load{is_array ? TypeFunction(F32[1], U32[1], U32[1])
+ : TypeFunction(F32[1], U32[1])};
+ const Id func{OpFunction(F32[1], spv::FunctionControlMask::MaskNone, func_type_load)};
+ const Id offset{OpFunctionParameter(U32[1])};
+ const Id vertex{is_array ? OpFunctionParameter(U32[1]) : Id{}};
+
+ AddLabel();
+ const Id base_index{OpShiftRightArithmetic(U32[1], offset, Const(2U))};
+ const Id masked_index{OpBitwiseAnd(U32[1], base_index, Const(3U))};
+ const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
+ std::vector<Sirit::Literal> literals;
+ std::vector<Id> labels;
+ if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
+ literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2);
+ labels.push_back(OpLabel());
+ }
+ const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
+ for (u32 index = 0; index < static_cast<u32>(IR::NUM_GENERICS); ++index) {
+ if (!info.loads.Generic(index)) {
+ continue;
+ }
+ literals.push_back(base_attribute_value + index);
+ labels.push_back(OpLabel());
+ }
+ OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone);
+ OpSwitch(compare_index, default_label, literals, labels);
+ AddLabel(default_label);
+ OpReturnValue(Const(0.0f));
+ size_t label_index{0};
+ if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
+ AddLabel(labels[label_index]);
+ const Id pointer{is_array
+ ? OpAccessChain(input_f32, input_position, vertex, masked_index)
+ : OpAccessChain(input_f32, input_position, masked_index)};
+ const Id result{OpLoad(F32[1], pointer)};
+ OpReturnValue(result);
+ ++label_index;
+ }
+ for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
+ if (!info.loads.Generic(index)) {
+ continue;
+ }
+ AddLabel(labels[label_index]);
+ const auto type{AttrTypes(*this, static_cast<u32>(index))};
+ if (!type) {
+ OpReturnValue(Const(0.0f));
+ ++label_index;
+ continue;
+ }
+ const Id generic_id{input_generics.at(index)};
+ const Id pointer{is_array
+ ? OpAccessChain(type->pointer, generic_id, vertex, masked_index)
+ : OpAccessChain(type->pointer, generic_id, masked_index)};
+ const Id value{OpLoad(type->id, pointer)};
+ const Id result{type->needs_cast ? OpBitcast(F32[1], value) : value};
+ OpReturnValue(result);
+ ++label_index;
+ }
+ AddLabel(end_block);
+ OpUnreachable();
+ OpFunctionEnd();
+ return func;
+ }};
+ const auto make_store{[&] {
+ const Id end_block{OpLabel()};
+ const Id default_label{OpLabel()};
+
+ const Id func_type_store{TypeFunction(void_id, U32[1], F32[1])};
+ const Id func{OpFunction(void_id, spv::FunctionControlMask::MaskNone, func_type_store)};
+ const Id offset{OpFunctionParameter(U32[1])};
+ const Id store_value{OpFunctionParameter(F32[1])};
+ AddLabel();
+ const Id base_index{OpShiftRightArithmetic(U32[1], offset, Const(2U))};
+ const Id masked_index{OpBitwiseAnd(U32[1], base_index, Const(3U))};
+ const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
+ std::vector<Sirit::Literal> literals;
+ std::vector<Id> labels;
+ if (info.stores.AnyComponent(IR::Attribute::PositionX)) {
+ literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2);
+ labels.push_back(OpLabel());
+ }
+ const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
+ for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
+ if (!info.stores.Generic(index)) {
+ continue;
+ }
+ literals.push_back(base_attribute_value + static_cast<u32>(index));
+ labels.push_back(OpLabel());
+ }
+ if (info.stores.ClipDistances()) {
+ literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance0) >> 2);
+ labels.push_back(OpLabel());
+ literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance4) >> 2);
+ labels.push_back(OpLabel());
+ }
+ OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone);
+ OpSwitch(compare_index, default_label, literals, labels);
+ AddLabel(default_label);
+ OpReturn();
+ size_t label_index{0};
+ if (info.stores.AnyComponent(IR::Attribute::PositionX)) {
+ AddLabel(labels[label_index]);
+ const Id pointer{OpAccessChain(output_f32, output_position, masked_index)};
+ OpStore(pointer, store_value);
+ OpReturn();
+ ++label_index;
+ }
+ for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
+ if (!info.stores.Generic(index)) {
+ continue;
+ }
+ if (output_generics[index][0].num_components != 4) {
+ throw NotImplementedException("Physical stores and transform feedbacks");
+ }
+ AddLabel(labels[label_index]);
+ const Id generic_id{output_generics[index][0].id};
+ const Id pointer{OpAccessChain(output_f32, generic_id, masked_index)};
+ OpStore(pointer, store_value);
+ OpReturn();
+ ++label_index;
+ }
+ if (info.stores.ClipDistances()) {
+ AddLabel(labels[label_index]);
+ const Id pointer{OpAccessChain(output_f32, clip_distances, masked_index)};
+ OpStore(pointer, store_value);
+ OpReturn();
+ ++label_index;
+ AddLabel(labels[label_index]);
+ const Id fixed_index{OpIAdd(U32[1], masked_index, Const(4U))};
+ const Id pointer2{OpAccessChain(output_f32, clip_distances, fixed_index)};
+ OpStore(pointer2, store_value);
+ OpReturn();
+ ++label_index;
+ }
+ AddLabel(end_block);
+ OpUnreachable();
+ OpFunctionEnd();
+ return func;
+ }};
+ if (info.loads_indexed_attributes) {
+ indexed_load_func = make_load();
+ }
+ if (info.stores_indexed_attributes) {
+ indexed_store_func = make_store();
+ }
+}
+
+void EmitContext::DefineGlobalMemoryFunctions(const Info& info) {
+ if (!info.uses_global_memory || !profile.support_int64) {
+ return;
+ }
+ using DefPtr = Id StorageDefinitions::*;
+ const Id zero{u32_zero_value};
+ const auto define_body{[&](DefPtr ssbo_member, Id addr, Id element_pointer, u32 shift,
+ auto&& callback) {
+ AddLabel();
+ const size_t num_buffers{info.storage_buffers_descriptors.size()};
+ for (size_t index = 0; index < num_buffers; ++index) {
+ if (!info.nvn_buffer_used[index]) {
+ continue;
+ }
+ const auto& ssbo{info.storage_buffers_descriptors[index]};
+ const Id ssbo_addr_cbuf_offset{Const(ssbo.cbuf_offset / 8)};
+ const Id ssbo_size_cbuf_offset{Const(ssbo.cbuf_offset / 4 + 2)};
+ const Id ssbo_addr_pointer{OpAccessChain(
+ uniform_types.U32x2, cbufs[ssbo.cbuf_index].U32x2, zero, ssbo_addr_cbuf_offset)};
+ const Id ssbo_size_pointer{OpAccessChain(uniform_types.U32, cbufs[ssbo.cbuf_index].U32,
+ zero, ssbo_size_cbuf_offset)};
+
+ const Id ssbo_addr{OpBitcast(U64, OpLoad(U32[2], ssbo_addr_pointer))};
+ const Id ssbo_size{OpUConvert(U64, OpLoad(U32[1], ssbo_size_pointer))};
+ const Id ssbo_end{OpIAdd(U64, ssbo_addr, ssbo_size)};
+ const Id cond{OpLogicalAnd(U1, OpUGreaterThanEqual(U1, addr, ssbo_addr),
+ OpULessThan(U1, addr, ssbo_end))};
+ const Id then_label{OpLabel()};
+ const Id else_label{OpLabel()};
+ OpSelectionMerge(else_label, spv::SelectionControlMask::MaskNone);
+ OpBranchConditional(cond, then_label, else_label);
+ AddLabel(then_label);
+ const Id ssbo_id{ssbos[index].*ssbo_member};
+ const Id ssbo_offset{OpUConvert(U32[1], OpISub(U64, addr, ssbo_addr))};
+ const Id ssbo_index{OpShiftRightLogical(U32[1], ssbo_offset, Const(shift))};
+ const Id ssbo_pointer{OpAccessChain(element_pointer, ssbo_id, zero, ssbo_index)};
+ callback(ssbo_pointer);
+ AddLabel(else_label);
+ }
+ }};
+ const auto define_load{[&](DefPtr ssbo_member, Id element_pointer, Id type, u32 shift) {
+ const Id function_type{TypeFunction(type, U64)};
+ const Id func_id{OpFunction(type, spv::FunctionControlMask::MaskNone, function_type)};
+ const Id addr{OpFunctionParameter(U64)};
+ define_body(ssbo_member, addr, element_pointer, shift,
+ [&](Id ssbo_pointer) { OpReturnValue(OpLoad(type, ssbo_pointer)); });
+ OpReturnValue(ConstantNull(type));
+ OpFunctionEnd();
+ return func_id;
+ }};
+ const auto define_write{[&](DefPtr ssbo_member, Id element_pointer, Id type, u32 shift) {
+ const Id function_type{TypeFunction(void_id, U64, type)};
+ const Id func_id{OpFunction(void_id, spv::FunctionControlMask::MaskNone, function_type)};
+ const Id addr{OpFunctionParameter(U64)};
+ const Id data{OpFunctionParameter(type)};
+ define_body(ssbo_member, addr, element_pointer, shift, [&](Id ssbo_pointer) {
+ OpStore(ssbo_pointer, data);
+ OpReturn();
+ });
+ OpReturn();
+ OpFunctionEnd();
+ return func_id;
+ }};
+ const auto define{
+ [&](DefPtr ssbo_member, const StorageTypeDefinition& type_def, Id type, size_t size) {
+ const Id element_type{type_def.element};
+ const u32 shift{static_cast<u32>(std::countr_zero(size))};
+ const Id load_func{define_load(ssbo_member, element_type, type, shift)};
+ const Id write_func{define_write(ssbo_member, element_type, type, shift)};
+ return std::make_pair(load_func, write_func);
+ }};
+ std::tie(load_global_func_u32, write_global_func_u32) =
+ define(&StorageDefinitions::U32, storage_types.U32, U32[1], sizeof(u32));
+ std::tie(load_global_func_u32x2, write_global_func_u32x2) =
+ define(&StorageDefinitions::U32x2, storage_types.U32x2, U32[2], sizeof(u32[2]));
+ std::tie(load_global_func_u32x4, write_global_func_u32x4) =
+ define(&StorageDefinitions::U32x4, storage_types.U32x4, U32[4], sizeof(u32[4]));
+}
+
+void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
+ if (info.constant_buffer_descriptors.empty()) {
+ return;
+ }
+ if (!profile.support_descriptor_aliasing) {
+ DefineConstBuffers(*this, info, &UniformDefinitions::U32x4, binding, U32[4], 'u',
+ sizeof(u32[4]));
+ for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
+ binding += desc.count;
+ }
+ return;
+ }
+ IR::Type types{info.used_constant_buffer_types};
+ if (True(types & IR::Type::U8)) {
+ if (profile.support_int8) {
+ DefineConstBuffers(*this, info, &UniformDefinitions::U8, binding, U8, 'u', sizeof(u8));
+ DefineConstBuffers(*this, info, &UniformDefinitions::S8, binding, S8, 's', sizeof(s8));
+ } else {
+ types |= IR::Type::U32;
+ }
+ }
+ if (True(types & IR::Type::U16)) {
+ if (profile.support_int16) {
+ DefineConstBuffers(*this, info, &UniformDefinitions::U16, binding, U16, 'u',
+ sizeof(u16));
+ DefineConstBuffers(*this, info, &UniformDefinitions::S16, binding, S16, 's',
+ sizeof(s16));
+ } else {
+ types |= IR::Type::U32;
+ }
+ }
+ if (True(types & IR::Type::U32)) {
+ DefineConstBuffers(*this, info, &UniformDefinitions::U32, binding, U32[1], 'u',
+ sizeof(u32));
+ }
+ if (True(types & IR::Type::F32)) {
+ DefineConstBuffers(*this, info, &UniformDefinitions::F32, binding, F32[1], 'f',
+ sizeof(f32));
+ }
+ if (True(types & IR::Type::U32x2)) {
+ DefineConstBuffers(*this, info, &UniformDefinitions::U32x2, binding, U32[2], 'u',
+ sizeof(u32[2]));
+ }
+ binding += static_cast<u32>(info.constant_buffer_descriptors.size());
+}
+
+void EmitContext::DefineStorageBuffers(const Info& info, u32& binding) {
+ if (info.storage_buffers_descriptors.empty()) {
+ return;
+ }
+ AddExtension("SPV_KHR_storage_buffer_storage_class");
+
+ const IR::Type used_types{profile.support_descriptor_aliasing ? info.used_storage_buffer_types
+ : IR::Type::U32};
+ if (profile.support_int8 && True(used_types & IR::Type::U8)) {
+ DefineSsbos(*this, storage_types.U8, &StorageDefinitions::U8, info, binding, U8,
+ sizeof(u8));
+ DefineSsbos(*this, storage_types.S8, &StorageDefinitions::S8, info, binding, S8,
+ sizeof(u8));
+ }
+ if (profile.support_int16 && True(used_types & IR::Type::U16)) {
+ DefineSsbos(*this, storage_types.U16, &StorageDefinitions::U16, info, binding, U16,
+ sizeof(u16));
+ DefineSsbos(*this, storage_types.S16, &StorageDefinitions::S16, info, binding, S16,
+ sizeof(u16));
+ }
+ if (True(used_types & IR::Type::U32)) {
+ DefineSsbos(*this, storage_types.U32, &StorageDefinitions::U32, info, binding, U32[1],
+ sizeof(u32));
+ }
+ if (True(used_types & IR::Type::F32)) {
+ DefineSsbos(*this, storage_types.F32, &StorageDefinitions::F32, info, binding, F32[1],
+ sizeof(f32));
+ }
+ if (True(used_types & IR::Type::U64)) {
+ DefineSsbos(*this, storage_types.U64, &StorageDefinitions::U64, info, binding, U64,
+ sizeof(u64));
+ }
+ if (True(used_types & IR::Type::U32x2)) {
+ DefineSsbos(*this, storage_types.U32x2, &StorageDefinitions::U32x2, info, binding, U32[2],
+ sizeof(u32[2]));
+ }
+ if (True(used_types & IR::Type::U32x4)) {
+ DefineSsbos(*this, storage_types.U32x4, &StorageDefinitions::U32x4, info, binding, U32[4],
+ sizeof(u32[4]));
+ }
+ for (const StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
+ binding += desc.count;
+ }
+ const bool needs_function{
+ info.uses_global_increment || info.uses_global_decrement || info.uses_atomic_f32_add ||
+ info.uses_atomic_f16x2_add || info.uses_atomic_f16x2_min || info.uses_atomic_f16x2_max ||
+ info.uses_atomic_f32x2_add || info.uses_atomic_f32x2_min || info.uses_atomic_f32x2_max};
+ if (needs_function) {
+ AddCapability(spv::Capability::VariablePointersStorageBuffer);
+ }
+ if (info.uses_global_increment) {
+ increment_cas_ssbo = CasLoop(*this, Operation::Increment, storage_types.U32.array,
+ storage_types.U32.element, U32[1], U32[1], spv::Scope::Device);
+ }
+ if (info.uses_global_decrement) {
+ decrement_cas_ssbo = CasLoop(*this, Operation::Decrement, storage_types.U32.array,
+ storage_types.U32.element, U32[1], U32[1], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f32_add) {
+ f32_add_cas = CasLoop(*this, Operation::FPAdd, storage_types.U32.array,
+ storage_types.U32.element, F32[1], U32[1], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f16x2_add) {
+ f16x2_add_cas = CasLoop(*this, Operation::FPAdd, storage_types.U32.array,
+ storage_types.U32.element, F16[2], F16[2], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f16x2_min) {
+ f16x2_min_cas = CasLoop(*this, Operation::FPMin, storage_types.U32.array,
+ storage_types.U32.element, F16[2], F16[2], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f16x2_max) {
+ f16x2_max_cas = CasLoop(*this, Operation::FPMax, storage_types.U32.array,
+ storage_types.U32.element, F16[2], F16[2], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f32x2_add) {
+ f32x2_add_cas = CasLoop(*this, Operation::FPAdd, storage_types.U32.array,
+ storage_types.U32.element, F32[2], F32[2], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f32x2_min) {
+ f32x2_min_cas = CasLoop(*this, Operation::FPMin, storage_types.U32.array,
+ storage_types.U32.element, F32[2], F32[2], spv::Scope::Device);
+ }
+ if (info.uses_atomic_f32x2_max) {
+ f32x2_max_cas = CasLoop(*this, Operation::FPMax, storage_types.U32.array,
+ storage_types.U32.element, F32[2], F32[2], spv::Scope::Device);
+ }
+}
+
+void EmitContext::DefineTextureBuffers(const Info& info, u32& binding) {
+ if (info.texture_buffer_descriptors.empty()) {
+ return;
+ }
+ const spv::ImageFormat format{spv::ImageFormat::Unknown};
+ image_buffer_type = TypeImage(F32[1], spv::Dim::Buffer, 0U, false, false, 1, format);
+ sampled_texture_buffer_type = TypeSampledImage(image_buffer_type);
+
+ const Id type{TypePointer(spv::StorageClass::UniformConstant, sampled_texture_buffer_type)};
+ texture_buffers.reserve(info.texture_buffer_descriptors.size());
+ for (const TextureBufferDescriptor& desc : info.texture_buffer_descriptors) {
+ if (desc.count != 1) {
+ throw NotImplementedException("Array of texture buffers");
+ }
+ const Id id{AddGlobalVariable(type, spv::StorageClass::UniformConstant)};
+ Decorate(id, spv::Decoration::Binding, binding);
+ Decorate(id, spv::Decoration::DescriptorSet, 0U);
+ Name(id, NameOf(stage, desc, "texbuf"));
+ texture_buffers.push_back({
+ .id = id,
+ .count = desc.count,
+ });
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(id);
+ }
+ ++binding;
+ }
+}
+
+void EmitContext::DefineImageBuffers(const Info& info, u32& binding) {
+ image_buffers.reserve(info.image_buffer_descriptors.size());
+ for (const ImageBufferDescriptor& desc : info.image_buffer_descriptors) {
+ if (desc.count != 1) {
+ throw NotImplementedException("Array of image buffers");
+ }
+ const spv::ImageFormat format{GetImageFormat(desc.format)};
+ const Id image_type{TypeImage(U32[1], spv::Dim::Buffer, false, false, false, 2, format)};
+ const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
+ const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
+ Decorate(id, spv::Decoration::Binding, binding);
+ Decorate(id, spv::Decoration::DescriptorSet, 0U);
+ Name(id, NameOf(stage, desc, "imgbuf"));
+ image_buffers.push_back({
+ .id = id,
+ .image_type = image_type,
+ .count = desc.count,
+ });
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(id);
+ }
+ ++binding;
+ }
+}
+
+void EmitContext::DefineTextures(const Info& info, u32& binding) {
+ textures.reserve(info.texture_descriptors.size());
+ for (const TextureDescriptor& desc : info.texture_descriptors) {
+ const Id image_type{ImageType(*this, desc)};
+ const Id sampled_type{TypeSampledImage(image_type)};
+ const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, sampled_type)};
+ const Id desc_type{DescType(*this, sampled_type, pointer_type, desc.count)};
+ const Id id{AddGlobalVariable(desc_type, spv::StorageClass::UniformConstant)};
+ Decorate(id, spv::Decoration::Binding, binding);
+ Decorate(id, spv::Decoration::DescriptorSet, 0U);
+ Name(id, NameOf(stage, desc, "tex"));
+ textures.push_back({
+ .id = id,
+ .sampled_type = sampled_type,
+ .pointer_type = pointer_type,
+ .image_type = image_type,
+ .count = desc.count,
+ });
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(id);
+ }
+ ++binding;
+ }
+ if (info.uses_atomic_image_u32) {
+ image_u32 = TypePointer(spv::StorageClass::Image, U32[1]);
+ }
+}
+
+void EmitContext::DefineImages(const Info& info, u32& binding) {
+ images.reserve(info.image_descriptors.size());
+ for (const ImageDescriptor& desc : info.image_descriptors) {
+ if (desc.count != 1) {
+ throw NotImplementedException("Array of images");
+ }
+ const Id image_type{ImageType(*this, desc)};
+ const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
+ const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
+ Decorate(id, spv::Decoration::Binding, binding);
+ Decorate(id, spv::Decoration::DescriptorSet, 0U);
+ Name(id, NameOf(stage, desc, "img"));
+ images.push_back({
+ .id = id,
+ .image_type = image_type,
+ .count = desc.count,
+ });
+ if (profile.supported_spirv >= 0x00010400) {
+ interfaces.push_back(id);
+ }
+ ++binding;
+ }
+}
+
+void EmitContext::DefineInputs(const IR::Program& program) {
+ const Info& info{program.info};
+ const VaryingState loads{info.loads.mask | info.passthrough.mask};
+
+ if (info.uses_workgroup_id) {
+ workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId);
+ }
+ if (info.uses_local_invocation_id) {
+ local_invocation_id = DefineInput(*this, U32[3], false, spv::BuiltIn::LocalInvocationId);
+ }
+ if (info.uses_invocation_id) {
+ invocation_id = DefineInput(*this, U32[1], false, spv::BuiltIn::InvocationId);
+ }
+ if (info.uses_sample_id) {
+ sample_id = DefineInput(*this, U32[1], false, spv::BuiltIn::SampleId);
+ }
+ if (info.uses_is_helper_invocation) {
+ is_helper_invocation = DefineInput(*this, U1, false, spv::BuiltIn::HelperInvocation);
+ }
+ if (info.uses_subgroup_mask) {
+ subgroup_mask_eq = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupEqMaskKHR);
+ subgroup_mask_lt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLtMaskKHR);
+ subgroup_mask_le = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLeMaskKHR);
+ subgroup_mask_gt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGtMaskKHR);
+ subgroup_mask_ge = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGeMaskKHR);
+ }
+ if (info.uses_subgroup_invocation_id || info.uses_subgroup_shuffles ||
+ (profile.warp_size_potentially_larger_than_guest &&
+ (info.uses_subgroup_vote || info.uses_subgroup_mask))) {
+ subgroup_local_invocation_id =
+ DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId);
+ }
+ if (info.uses_fswzadd) {
+ const Id f32_one{Const(1.0f)};
+ const Id f32_minus_one{Const(-1.0f)};
+ const Id f32_zero{Const(0.0f)};
+ fswzadd_lut_a = ConstantComposite(F32[4], f32_minus_one, f32_one, f32_minus_one, f32_zero);
+ fswzadd_lut_b =
+ ConstantComposite(F32[4], f32_minus_one, f32_minus_one, f32_one, f32_minus_one);
+ }
+ if (loads[IR::Attribute::PrimitiveId]) {
+ primitive_id = DefineInput(*this, U32[1], false, spv::BuiltIn::PrimitiveId);
+ }
+ if (loads.AnyComponent(IR::Attribute::PositionX)) {
+ const bool is_fragment{stage != Stage::Fragment};
+ const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::Position : spv::BuiltIn::FragCoord};
+ input_position = DefineInput(*this, F32[4], true, built_in);
+ if (profile.support_geometry_shader_passthrough) {
+ if (info.passthrough.AnyComponent(IR::Attribute::PositionX)) {
+ Decorate(input_position, spv::Decoration::PassthroughNV);
+ }
+ }
+ }
+ if (loads[IR::Attribute::InstanceId]) {
+ if (profile.support_vertex_instance_id) {
+ instance_id = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceId);
+ } else {
+ instance_index = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceIndex);
+ base_instance = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseInstance);
+ }
+ }
+ if (loads[IR::Attribute::VertexId]) {
+ if (profile.support_vertex_instance_id) {
+ vertex_id = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexId);
+ } else {
+ vertex_index = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexIndex);
+ base_vertex = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseVertex);
+ }
+ }
+ if (loads[IR::Attribute::FrontFace]) {
+ front_face = DefineInput(*this, U1, true, spv::BuiltIn::FrontFacing);
+ }
+ if (loads[IR::Attribute::PointSpriteS] || loads[IR::Attribute::PointSpriteT]) {
+ point_coord = DefineInput(*this, F32[2], true, spv::BuiltIn::PointCoord);
+ }
+ if (loads[IR::Attribute::TessellationEvaluationPointU] ||
+ loads[IR::Attribute::TessellationEvaluationPointV]) {
+ tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord);
+ }
+ for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
+ const AttributeType input_type{runtime_info.generic_input_types[index]};
+ if (!runtime_info.previous_stage_stores.Generic(index)) {
+ continue;
+ }
+ if (!loads.Generic(index)) {
+ continue;
+ }
+ if (input_type == AttributeType::Disabled) {
+ continue;
+ }
+ const Id type{GetAttributeType(*this, input_type)};
+ const Id id{DefineInput(*this, type, true)};
+ Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+ Name(id, fmt::format("in_attr{}", index));
+ input_generics[index] = id;
+
+ if (info.passthrough.Generic(index) && profile.support_geometry_shader_passthrough) {
+ Decorate(id, spv::Decoration::PassthroughNV);
+ }
+ if (stage != Stage::Fragment) {
+ continue;
+ }
+ switch (info.interpolation[index]) {
+ case Interpolation::Smooth:
+ // Default
+ // Decorate(id, spv::Decoration::Smooth);
+ break;
+ case Interpolation::NoPerspective:
+ Decorate(id, spv::Decoration::NoPerspective);
+ break;
+ case Interpolation::Flat:
+ Decorate(id, spv::Decoration::Flat);
+ break;
+ }
+ }
+ if (stage == Stage::TessellationEval) {
+ for (size_t index = 0; index < info.uses_patches.size(); ++index) {
+ if (!info.uses_patches[index]) {
+ continue;
+ }
+ const Id id{DefineInput(*this, F32[4], false)};
+ Decorate(id, spv::Decoration::Patch);
+ Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+ patches[index] = id;
+ }
+ }
+}
+
+void EmitContext::DefineOutputs(const IR::Program& program) {
+ const Info& info{program.info};
+ const std::optional<u32> invocations{program.invocations};
+ if (info.stores.AnyComponent(IR::Attribute::PositionX) || stage == Stage::VertexB) {
+ output_position = DefineOutput(*this, F32[4], invocations, spv::BuiltIn::Position);
+ }
+ if (info.stores[IR::Attribute::PointSize] || runtime_info.fixed_state_point_size) {
+ if (stage == Stage::Fragment) {
+ throw NotImplementedException("Storing PointSize in fragment stage");
+ }
+ output_point_size = DefineOutput(*this, F32[1], invocations, spv::BuiltIn::PointSize);
+ }
+ if (info.stores.ClipDistances()) {
+ if (stage == Stage::Fragment) {
+ throw NotImplementedException("Storing ClipDistance in fragment stage");
+ }
+ const Id type{TypeArray(F32[1], Const(8U))};
+ clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance);
+ }
+ if (info.stores[IR::Attribute::Layer] &&
+ (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
+ if (stage == Stage::Fragment) {
+ throw NotImplementedException("Storing Layer in fragment stage");
+ }
+ layer = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::Layer);
+ }
+ if (info.stores[IR::Attribute::ViewportIndex] &&
+ (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
+ if (stage == Stage::Fragment) {
+ throw NotImplementedException("Storing ViewportIndex in fragment stage");
+ }
+ viewport_index = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::ViewportIndex);
+ }
+ if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
+ viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt,
+ spv::BuiltIn::ViewportMaskNV);
+ }
+ for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
+ if (info.stores.Generic(index)) {
+ DefineGenericOutput(*this, index, invocations);
+ }
+ }
+ switch (stage) {
+ case Stage::TessellationControl:
+ if (info.stores_tess_level_outer) {
+ const Id type{TypeArray(F32[1], Const(4U))};
+ output_tess_level_outer =
+ DefineOutput(*this, type, std::nullopt, spv::BuiltIn::TessLevelOuter);
+ Decorate(output_tess_level_outer, spv::Decoration::Patch);
+ }
+ if (info.stores_tess_level_inner) {
+ const Id type{TypeArray(F32[1], Const(2U))};
+ output_tess_level_inner =
+ DefineOutput(*this, type, std::nullopt, spv::BuiltIn::TessLevelInner);
+ Decorate(output_tess_level_inner, spv::Decoration::Patch);
+ }
+ for (size_t index = 0; index < info.uses_patches.size(); ++index) {
+ if (!info.uses_patches[index]) {
+ continue;
+ }
+ const Id id{DefineOutput(*this, F32[4], std::nullopt)};
+ Decorate(id, spv::Decoration::Patch);
+ Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+ patches[index] = id;
+ }
+ break;
+ case Stage::Fragment:
+ for (u32 index = 0; index < 8; ++index) {
+ if (!info.stores_frag_color[index] && !profile.need_declared_frag_colors) {
+ continue;
+ }
+ frag_color[index] = DefineOutput(*this, F32[4], std::nullopt);
+ Decorate(frag_color[index], spv::Decoration::Location, index);
+ Name(frag_color[index], fmt::format("frag_color{}", index));
+ }
+ if (info.stores_frag_depth) {
+ frag_depth = DefineOutput(*this, F32[1], std::nullopt);
+ Decorate(frag_depth, spv::Decoration::BuiltIn, spv::BuiltIn::FragDepth);
+ }
+ if (info.stores_sample_mask) {
+ sample_mask = DefineOutput(*this, U32[1], std::nullopt);
+ Decorate(sample_mask, spv::Decoration::BuiltIn, spv::BuiltIn::SampleMask);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
new file mode 100644
index 000000000..e277bc358
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -0,0 +1,307 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <string_view>
+
+#include <sirit/sirit.h>
+
+#include "shader_recompiler/backend/bindings.h"
+#include "shader_recompiler/frontend/ir/program.h"
+#include "shader_recompiler/profile.h"
+#include "shader_recompiler/runtime_info.h"
+#include "shader_recompiler/shader_info.h"
+
+namespace Shader::Backend::SPIRV {
+
+using Sirit::Id;
+
+class VectorTypes {
+public:
+ void Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name);
+
+ [[nodiscard]] Id operator[](size_t size) const noexcept {
+ return defs[size - 1];
+ }
+
+private:
+ std::array<Id, 4> defs{};
+};
+
+struct TextureDefinition {
+ Id id;
+ Id sampled_type;
+ Id pointer_type;
+ Id image_type;
+ u32 count;
+};
+
+struct TextureBufferDefinition {
+ Id id;
+ u32 count;
+};
+
+struct ImageBufferDefinition {
+ Id id;
+ Id image_type;
+ u32 count;
+};
+
+struct ImageDefinition {
+ Id id;
+ Id image_type;
+ u32 count;
+};
+
+struct UniformDefinitions {
+ Id U8{};
+ Id S8{};
+ Id U16{};
+ Id S16{};
+ Id U32{};
+ Id F32{};
+ Id U32x2{};
+ Id U32x4{};
+};
+
+struct StorageTypeDefinition {
+ Id array{};
+ Id element{};
+};
+
+struct StorageTypeDefinitions {
+ StorageTypeDefinition U8{};
+ StorageTypeDefinition S8{};
+ StorageTypeDefinition U16{};
+ StorageTypeDefinition S16{};
+ StorageTypeDefinition U32{};
+ StorageTypeDefinition U64{};
+ StorageTypeDefinition F32{};
+ StorageTypeDefinition U32x2{};
+ StorageTypeDefinition U32x4{};
+};
+
+struct StorageDefinitions {
+ Id U8{};
+ Id S8{};
+ Id U16{};
+ Id S16{};
+ Id U32{};
+ Id F32{};
+ Id U64{};
+ Id U32x2{};
+ Id U32x4{};
+};
+
+struct GenericElementInfo {
+ Id id{};
+ u32 first_element{};
+ u32 num_components{};
+};
+
+class EmitContext final : public Sirit::Module {
+public:
+ explicit EmitContext(const Profile& profile, const RuntimeInfo& runtime_info,
+ IR::Program& program, Bindings& binding);
+ ~EmitContext();
+
+ [[nodiscard]] Id Def(const IR::Value& value);
+
+ [[nodiscard]] Id BitOffset8(const IR::Value& offset);
+ [[nodiscard]] Id BitOffset16(const IR::Value& offset);
+
+ Id Const(u32 value) {
+ return Constant(U32[1], value);
+ }
+
+ Id Const(u32 element_1, u32 element_2) {
+ return ConstantComposite(U32[2], Const(element_1), Const(element_2));
+ }
+
+ Id Const(u32 element_1, u32 element_2, u32 element_3) {
+ return ConstantComposite(U32[3], Const(element_1), Const(element_2), Const(element_3));
+ }
+
+ Id Const(u32 element_1, u32 element_2, u32 element_3, u32 element_4) {
+ return ConstantComposite(U32[4], Const(element_1), Const(element_2), Const(element_3),
+ Const(element_4));
+ }
+
+ Id SConst(s32 value) {
+ return Constant(S32[1], value);
+ }
+
+ Id SConst(s32 element_1, s32 element_2) {
+ return ConstantComposite(S32[2], SConst(element_1), SConst(element_2));
+ }
+
+ Id SConst(s32 element_1, s32 element_2, s32 element_3) {
+ return ConstantComposite(S32[3], SConst(element_1), SConst(element_2), SConst(element_3));
+ }
+
+ Id SConst(s32 element_1, s32 element_2, s32 element_3, s32 element_4) {
+ return ConstantComposite(S32[4], SConst(element_1), SConst(element_2), SConst(element_3),
+ SConst(element_4));
+ }
+
+ Id Const(f32 value) {
+ return Constant(F32[1], value);
+ }
+
+ const Profile& profile;
+ const RuntimeInfo& runtime_info;
+ Stage stage{};
+
+ Id void_id{};
+ Id U1{};
+ Id U8{};
+ Id S8{};
+ Id U16{};
+ Id S16{};
+ Id U64{};
+ VectorTypes F32;
+ VectorTypes U32;
+ VectorTypes S32;
+ VectorTypes F16;
+ VectorTypes F64;
+
+ Id true_value{};
+ Id false_value{};
+ Id u32_zero_value{};
+ Id f32_zero_value{};
+
+ UniformDefinitions uniform_types;
+ StorageTypeDefinitions storage_types;
+
+ Id private_u32{};
+
+ Id shared_u8{};
+ Id shared_u16{};
+ Id shared_u32{};
+ Id shared_u64{};
+ Id shared_u32x2{};
+ Id shared_u32x4{};
+
+ Id input_f32{};
+ Id input_u32{};
+ Id input_s32{};
+
+ Id output_f32{};
+ Id output_u32{};
+
+ Id image_buffer_type{};
+ Id sampled_texture_buffer_type{};
+ Id image_u32{};
+
+ std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{};
+ std::array<StorageDefinitions, Info::MAX_SSBOS> ssbos{};
+ std::vector<TextureBufferDefinition> texture_buffers;
+ std::vector<ImageBufferDefinition> image_buffers;
+ std::vector<TextureDefinition> textures;
+ std::vector<ImageDefinition> images;
+
+ Id workgroup_id{};
+ Id local_invocation_id{};
+ Id invocation_id{};
+ Id sample_id{};
+ Id is_helper_invocation{};
+ Id subgroup_local_invocation_id{};
+ Id subgroup_mask_eq{};
+ Id subgroup_mask_lt{};
+ Id subgroup_mask_le{};
+ Id subgroup_mask_gt{};
+ Id subgroup_mask_ge{};
+ Id instance_id{};
+ Id instance_index{};
+ Id base_instance{};
+ Id vertex_id{};
+ Id vertex_index{};
+ Id base_vertex{};
+ Id front_face{};
+ Id point_coord{};
+ Id tess_coord{};
+ Id clip_distances{};
+ Id layer{};
+ Id viewport_index{};
+ Id viewport_mask{};
+ Id primitive_id{};
+
+ Id fswzadd_lut_a{};
+ Id fswzadd_lut_b{};
+
+ Id indexed_load_func{};
+ Id indexed_store_func{};
+
+ Id local_memory{};
+
+ Id shared_memory_u8{};
+ Id shared_memory_u16{};
+ Id shared_memory_u32{};
+ Id shared_memory_u64{};
+ Id shared_memory_u32x2{};
+ Id shared_memory_u32x4{};
+
+ Id shared_memory_u32_type{};
+
+ Id shared_store_u8_func{};
+ Id shared_store_u16_func{};
+ Id increment_cas_shared{};
+ Id increment_cas_ssbo{};
+ Id decrement_cas_shared{};
+ Id decrement_cas_ssbo{};
+ Id f32_add_cas{};
+ Id f16x2_add_cas{};
+ Id f16x2_min_cas{};
+ Id f16x2_max_cas{};
+ Id f32x2_add_cas{};
+ Id f32x2_min_cas{};
+ Id f32x2_max_cas{};
+
+ Id load_global_func_u32{};
+ Id load_global_func_u32x2{};
+ Id load_global_func_u32x4{};
+ Id write_global_func_u32{};
+ Id write_global_func_u32x2{};
+ Id write_global_func_u32x4{};
+
+ Id input_position{};
+ std::array<Id, 32> input_generics{};
+
+ Id output_point_size{};
+ Id output_position{};
+ std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
+
+ Id output_tess_level_outer{};
+ Id output_tess_level_inner{};
+ std::array<Id, 30> patches{};
+
+ std::array<Id, 8> frag_color{};
+ Id sample_mask{};
+ Id frag_depth{};
+
+ std::vector<Id> interfaces;
+
+private:
+ void DefineCommonTypes(const Info& info);
+ void DefineCommonConstants();
+ void DefineInterfaces(const IR::Program& program);
+ void DefineLocalMemory(const IR::Program& program);
+ void DefineSharedMemory(const IR::Program& program);
+ void DefineSharedMemoryFunctions(const IR::Program& program);
+ void DefineConstantBuffers(const Info& info, u32& binding);
+ void DefineStorageBuffers(const Info& info, u32& binding);
+ void DefineTextureBuffers(const Info& info, u32& binding);
+ void DefineImageBuffers(const Info& info, u32& binding);
+ void DefineTextures(const Info& info, u32& binding);
+ void DefineImages(const Info& info, u32& binding);
+ void DefineAttributeMemAccess(const Info& info);
+ void DefineGlobalMemoryFunctions(const Info& info);
+
+ void DefineInputs(const IR::Program& program);
+ void DefineOutputs(const IR::Program& program);
+};
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
new file mode 100644
index 000000000..d7a86e270
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -0,0 +1,541 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <span>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "common/settings.h"
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+#include "shader_recompiler/frontend/ir/basic_block.h"
+#include "shader_recompiler/frontend/ir/program.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+template <class Func>
+struct FuncTraits {};
+
+template <class ReturnType_, class... Args>
+struct FuncTraits<ReturnType_ (*)(Args...)> {
+ using ReturnType = ReturnType_;
+
+ static constexpr size_t NUM_ARGS = sizeof...(Args);
+
+ template <size_t I>
+ using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
+};
+
+template <auto func, typename... Args>
+void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
+ inst->SetDefinition<Id>(func(ctx, std::forward<Args>(args)...));
+}
+
+template <typename ArgType>
+ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
+ if constexpr (std::is_same_v<ArgType, Id>) {
+ return ctx.Def(arg);
+ } else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
+ return arg;
+ } else if constexpr (std::is_same_v<ArgType, u32>) {
+ return arg.U32();
+ } else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
+ return arg.Attribute();
+ } else if constexpr (std::is_same_v<ArgType, IR::Patch>) {
+ return arg.Patch();
+ } else if constexpr (std::is_same_v<ArgType, IR::Reg>) {
+ return arg.Reg();
+ }
+}
+
+template <auto func, bool is_first_arg_inst, size_t... I>
+void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
+ using Traits = FuncTraits<decltype(func)>;
+ if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
+ if constexpr (is_first_arg_inst) {
+ SetDefinition<func>(
+ ctx, inst, inst,
+ Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
+ } else {
+ SetDefinition<func>(
+ ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
+ }
+ } else {
+ if constexpr (is_first_arg_inst) {
+ func(ctx, inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
+ } else {
+ func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
+ }
+ }
+}
+
+template <auto func>
+void Invoke(EmitContext& ctx, IR::Inst* inst) {
+ using Traits = FuncTraits<decltype(func)>;
+ static_assert(Traits::NUM_ARGS >= 1, "Insufficient arguments");
+ if constexpr (Traits::NUM_ARGS == 1) {
+ Invoke<func, false>(ctx, inst, std::make_index_sequence<0>{});
+ } else {
+ using FirstArgType = typename Traits::template ArgType<1>;
+ static constexpr bool is_first_arg_inst = std::is_same_v<FirstArgType, IR::Inst*>;
+ using Indices = std::make_index_sequence<Traits::NUM_ARGS - (is_first_arg_inst ? 2 : 1)>;
+ Invoke<func, is_first_arg_inst>(ctx, inst, Indices{});
+ }
+}
+
+void EmitInst(EmitContext& ctx, IR::Inst* inst) {
+ switch (inst->GetOpcode()) {
+#define OPCODE(name, result_type, ...) \
+ case IR::Opcode::name: \
+ return Invoke<&Emit##name>(ctx, inst);
+#include "shader_recompiler/frontend/ir/opcodes.inc"
+#undef OPCODE
+ }
+ throw LogicError("Invalid opcode {}", inst->GetOpcode());
+}
+
+Id TypeId(const EmitContext& ctx, IR::Type type) {
+ switch (type) {
+ case IR::Type::U1:
+ return ctx.U1;
+ case IR::Type::U32:
+ return ctx.U32[1];
+ default:
+ throw NotImplementedException("Phi node type {}", type);
+ }
+}
+
+void Traverse(EmitContext& ctx, IR::Program& program) {
+ IR::Block* current_block{};
+ for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
+ switch (node.type) {
+ case IR::AbstractSyntaxNode::Type::Block: {
+ const Id label{node.data.block->Definition<Id>()};
+ if (current_block) {
+ ctx.OpBranch(label);
+ }
+ current_block = node.data.block;
+ ctx.AddLabel(label);
+ for (IR::Inst& inst : node.data.block->Instructions()) {
+ EmitInst(ctx, &inst);
+ }
+ break;
+ }
+ case IR::AbstractSyntaxNode::Type::If: {
+ const Id if_label{node.data.if_node.body->Definition<Id>()};
+ const Id endif_label{node.data.if_node.merge->Definition<Id>()};
+ ctx.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone);
+ ctx.OpBranchConditional(ctx.Def(node.data.if_node.cond), if_label, endif_label);
+ break;
+ }
+ case IR::AbstractSyntaxNode::Type::Loop: {
+ const Id body_label{node.data.loop.body->Definition<Id>()};
+ const Id continue_label{node.data.loop.continue_block->Definition<Id>()};
+ const Id endloop_label{node.data.loop.merge->Definition<Id>()};
+
+ ctx.OpLoopMerge(endloop_label, continue_label, spv::LoopControlMask::MaskNone);
+ ctx.OpBranch(body_label);
+ break;
+ }
+ case IR::AbstractSyntaxNode::Type::Break: {
+ const Id break_label{node.data.break_node.merge->Definition<Id>()};
+ const Id skip_label{node.data.break_node.skip->Definition<Id>()};
+ ctx.OpBranchConditional(ctx.Def(node.data.break_node.cond), break_label, skip_label);
+ break;
+ }
+ case IR::AbstractSyntaxNode::Type::EndIf:
+ if (current_block) {
+ ctx.OpBranch(node.data.end_if.merge->Definition<Id>());
+ }
+ break;
+ case IR::AbstractSyntaxNode::Type::Repeat: {
+ Id cond{ctx.Def(node.data.repeat.cond)};
+ if (!Settings::values.disable_shader_loop_safety_checks) {
+ const Id pointer_type{ctx.TypePointer(spv::StorageClass::Private, ctx.U32[1])};
+ const Id safety_counter{ctx.AddGlobalVariable(
+ pointer_type, spv::StorageClass::Private, ctx.Const(0x2000u))};
+ if (ctx.profile.supported_spirv >= 0x00010400) {
+ ctx.interfaces.push_back(safety_counter);
+ }
+ const Id old_counter{ctx.OpLoad(ctx.U32[1], safety_counter)};
+ const Id new_counter{ctx.OpISub(ctx.U32[1], old_counter, ctx.Const(1u))};
+ ctx.OpStore(safety_counter, new_counter);
+
+ const Id safety_cond{
+ ctx.OpSGreaterThanEqual(ctx.U1, new_counter, ctx.u32_zero_value)};
+ cond = ctx.OpLogicalAnd(ctx.U1, cond, safety_cond);
+ }
+ const Id loop_header_label{node.data.repeat.loop_header->Definition<Id>()};
+ const Id merge_label{node.data.repeat.merge->Definition<Id>()};
+ ctx.OpBranchConditional(cond, loop_header_label, merge_label);
+ break;
+ }
+ case IR::AbstractSyntaxNode::Type::Return:
+ ctx.OpReturn();
+ break;
+ case IR::AbstractSyntaxNode::Type::Unreachable:
+ ctx.OpUnreachable();
+ break;
+ }
+ if (node.type != IR::AbstractSyntaxNode::Type::Block) {
+ current_block = nullptr;
+ }
+ }
+}
+
+Id DefineMain(EmitContext& ctx, IR::Program& program) {
+ const Id void_function{ctx.TypeFunction(ctx.void_id)};
+ const Id main{ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function)};
+ for (IR::Block* const block : program.blocks) {
+ block->SetDefinition(ctx.OpLabel());
+ }
+ Traverse(ctx, program);
+ ctx.OpFunctionEnd();
+ return main;
+}
+
+spv::ExecutionMode ExecutionMode(TessPrimitive primitive) {
+ switch (primitive) {
+ case TessPrimitive::Isolines:
+ return spv::ExecutionMode::Isolines;
+ case TessPrimitive::Triangles:
+ return spv::ExecutionMode::Triangles;
+ case TessPrimitive::Quads:
+ return spv::ExecutionMode::Quads;
+ }
+ throw InvalidArgument("Tessellation primitive {}", primitive);
+}
+
+spv::ExecutionMode ExecutionMode(TessSpacing spacing) {
+ switch (spacing) {
+ case TessSpacing::Equal:
+ return spv::ExecutionMode::SpacingEqual;
+ case TessSpacing::FractionalOdd:
+ return spv::ExecutionMode::SpacingFractionalOdd;
+ case TessSpacing::FractionalEven:
+ return spv::ExecutionMode::SpacingFractionalEven;
+ }
+ throw InvalidArgument("Tessellation spacing {}", spacing);
+}
+
+void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
+ const std::span interfaces(ctx.interfaces.data(), ctx.interfaces.size());
+ spv::ExecutionModel execution_model{};
+ switch (program.stage) {
+ case Stage::Compute: {
+ const std::array<u32, 3> workgroup_size{program.workgroup_size};
+ execution_model = spv::ExecutionModel::GLCompute;
+ ctx.AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0],
+ workgroup_size[1], workgroup_size[2]);
+ break;
+ }
+ case Stage::VertexB:
+ execution_model = spv::ExecutionModel::Vertex;
+ break;
+ case Stage::TessellationControl:
+ execution_model = spv::ExecutionModel::TessellationControl;
+ ctx.AddCapability(spv::Capability::Tessellation);
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.invocations);
+ break;
+ case Stage::TessellationEval:
+ execution_model = spv::ExecutionModel::TessellationEvaluation;
+ ctx.AddCapability(spv::Capability::Tessellation);
+ ctx.AddExecutionMode(main, ExecutionMode(ctx.runtime_info.tess_primitive));
+ ctx.AddExecutionMode(main, ExecutionMode(ctx.runtime_info.tess_spacing));
+ ctx.AddExecutionMode(main, ctx.runtime_info.tess_clockwise
+ ? spv::ExecutionMode::VertexOrderCw
+ : spv::ExecutionMode::VertexOrderCcw);
+ break;
+ case Stage::Geometry:
+ execution_model = spv::ExecutionModel::Geometry;
+ ctx.AddCapability(spv::Capability::Geometry);
+ ctx.AddCapability(spv::Capability::GeometryStreams);
+ switch (ctx.runtime_info.input_topology) {
+ case InputTopology::Points:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::InputPoints);
+ break;
+ case InputTopology::Lines:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::InputLines);
+ break;
+ case InputTopology::LinesAdjacency:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::InputLinesAdjacency);
+ break;
+ case InputTopology::Triangles:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::Triangles);
+ break;
+ case InputTopology::TrianglesAdjacency:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::InputTrianglesAdjacency);
+ break;
+ }
+ switch (program.output_topology) {
+ case OutputTopology::PointList:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OutputPoints);
+ break;
+ case OutputTopology::LineStrip:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OutputLineStrip);
+ break;
+ case OutputTopology::TriangleStrip:
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OutputTriangleStrip);
+ break;
+ }
+ if (program.info.stores[IR::Attribute::PointSize]) {
+ ctx.AddCapability(spv::Capability::GeometryPointSize);
+ }
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.output_vertices);
+ ctx.AddExecutionMode(main, spv::ExecutionMode::Invocations, program.invocations);
+ if (program.is_geometry_passthrough) {
+ if (ctx.profile.support_geometry_shader_passthrough) {
+ ctx.AddExtension("SPV_NV_geometry_shader_passthrough");
+ ctx.AddCapability(spv::Capability::GeometryShaderPassthroughNV);
+ } else {
+ LOG_WARNING(Shader_SPIRV, "Geometry shader passthrough used with no support");
+ }
+ }
+ break;
+ case Stage::Fragment:
+ execution_model = spv::ExecutionModel::Fragment;
+ if (ctx.profile.lower_left_origin_mode) {
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OriginLowerLeft);
+ } else {
+ ctx.AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft);
+ }
+ if (program.info.stores_frag_depth) {
+ ctx.AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
+ }
+ if (ctx.runtime_info.force_early_z) {
+ ctx.AddExecutionMode(main, spv::ExecutionMode::EarlyFragmentTests);
+ }
+ break;
+ default:
+ throw NotImplementedException("Stage {}", program.stage);
+ }
+ ctx.AddEntryPoint(execution_model, main, "main", interfaces);
+}
+
+void SetupDenormControl(const Profile& profile, const IR::Program& program, EmitContext& ctx,
+ Id main_func) {
+ const Info& info{program.info};
+ if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) {
+ LOG_DEBUG(Shader_SPIRV, "Fp32 denorm flush and preserve on the same shader");
+ } else if (info.uses_fp32_denorms_flush) {
+ if (profile.support_fp32_denorm_flush) {
+ ctx.AddCapability(spv::Capability::DenormFlushToZero);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormFlushToZero, 32U);
+ } else {
+ // Drivers will most likely flush denorms by default, no need to warn
+ }
+ } else if (info.uses_fp32_denorms_preserve) {
+ if (profile.support_fp32_denorm_preserve) {
+ ctx.AddCapability(spv::Capability::DenormPreserve);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U);
+ } else {
+ LOG_DEBUG(Shader_SPIRV, "Fp32 denorm preserve used in shader without host support");
+ }
+ }
+ if (!profile.support_separate_denorm_behavior || profile.has_broken_fp16_float_controls) {
+ // No separate denorm behavior
+ return;
+ }
+ if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) {
+ LOG_DEBUG(Shader_SPIRV, "Fp16 denorm flush and preserve on the same shader");
+ } else if (info.uses_fp16_denorms_flush) {
+ if (profile.support_fp16_denorm_flush) {
+ ctx.AddCapability(spv::Capability::DenormFlushToZero);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormFlushToZero, 16U);
+ } else {
+ // Same as fp32, no need to warn as most drivers will flush by default
+ }
+ } else if (info.uses_fp16_denorms_preserve) {
+ if (profile.support_fp16_denorm_preserve) {
+ ctx.AddCapability(spv::Capability::DenormPreserve);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U);
+ } else {
+ LOG_DEBUG(Shader_SPIRV, "Fp16 denorm preserve used in shader without host support");
+ }
+ }
+}
+
+void SetupSignedNanCapabilities(const Profile& profile, const IR::Program& program,
+ EmitContext& ctx, Id main_func) {
+ if (profile.has_broken_fp16_float_controls && program.info.uses_fp16) {
+ return;
+ }
+ if (program.info.uses_fp16 && profile.support_fp16_signed_zero_nan_preserve) {
+ ctx.AddCapability(spv::Capability::SignedZeroInfNanPreserve);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::SignedZeroInfNanPreserve, 16U);
+ }
+ if (profile.support_fp32_signed_zero_nan_preserve) {
+ ctx.AddCapability(spv::Capability::SignedZeroInfNanPreserve);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::SignedZeroInfNanPreserve, 32U);
+ }
+ if (program.info.uses_fp64 && profile.support_fp64_signed_zero_nan_preserve) {
+ ctx.AddCapability(spv::Capability::SignedZeroInfNanPreserve);
+ ctx.AddExecutionMode(main_func, spv::ExecutionMode::SignedZeroInfNanPreserve, 64U);
+ }
+}
+
+void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ctx) {
+ if (info.uses_sampled_1d) {
+ ctx.AddCapability(spv::Capability::Sampled1D);
+ }
+ if (info.uses_sparse_residency) {
+ ctx.AddCapability(spv::Capability::SparseResidency);
+ }
+ if (info.uses_demote_to_helper_invocation && profile.support_demote_to_helper_invocation) {
+ ctx.AddExtension("SPV_EXT_demote_to_helper_invocation");
+ ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
+ }
+ if (info.stores[IR::Attribute::ViewportIndex]) {
+ ctx.AddCapability(spv::Capability::MultiViewport);
+ }
+ if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
+ ctx.AddExtension("SPV_NV_viewport_array2");
+ ctx.AddCapability(spv::Capability::ShaderViewportMaskNV);
+ }
+ if (info.stores[IR::Attribute::Layer] || info.stores[IR::Attribute::ViewportIndex]) {
+ if (profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
+ ctx.AddExtension("SPV_EXT_shader_viewport_index_layer");
+ ctx.AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
+ }
+ }
+ if (!profile.support_vertex_instance_id &&
+ (info.loads[IR::Attribute::InstanceId] || info.loads[IR::Attribute::VertexId])) {
+ ctx.AddExtension("SPV_KHR_shader_draw_parameters");
+ ctx.AddCapability(spv::Capability::DrawParameters);
+ }
+ if ((info.uses_subgroup_vote || info.uses_subgroup_invocation_id ||
+ info.uses_subgroup_shuffles) &&
+ profile.support_vote) {
+ ctx.AddExtension("SPV_KHR_shader_ballot");
+ ctx.AddCapability(spv::Capability::SubgroupBallotKHR);
+ if (!profile.warp_size_potentially_larger_than_guest) {
+ // vote ops are only used when not taking the long path
+ ctx.AddExtension("SPV_KHR_subgroup_vote");
+ ctx.AddCapability(spv::Capability::SubgroupVoteKHR);
+ }
+ }
+ if (info.uses_int64_bit_atomics && profile.support_int64_atomics) {
+ ctx.AddCapability(spv::Capability::Int64Atomics);
+ }
+ if (info.uses_typeless_image_reads && profile.support_typeless_image_loads) {
+ ctx.AddCapability(spv::Capability::StorageImageReadWithoutFormat);
+ }
+ if (info.uses_typeless_image_writes) {
+ ctx.AddCapability(spv::Capability::StorageImageWriteWithoutFormat);
+ }
+ if (info.uses_image_buffers) {
+ ctx.AddCapability(spv::Capability::ImageBuffer);
+ }
+ if (info.uses_sample_id) {
+ ctx.AddCapability(spv::Capability::SampleRateShading);
+ }
+ if (!ctx.runtime_info.xfb_varyings.empty()) {
+ ctx.AddCapability(spv::Capability::TransformFeedback);
+ }
+ if (info.uses_derivatives) {
+ ctx.AddCapability(spv::Capability::DerivativeControl);
+ }
+ // TODO: Track this usage
+ ctx.AddCapability(spv::Capability::ImageGatherExtended);
+ ctx.AddCapability(spv::Capability::ImageQuery);
+ ctx.AddCapability(spv::Capability::SampledBuffer);
+}
+
+void PatchPhiNodes(IR::Program& program, EmitContext& ctx) {
+ auto inst{program.blocks.front()->begin()};
+ size_t block_index{0};
+ ctx.PatchDeferredPhi([&](size_t phi_arg) {
+ if (phi_arg == 0) {
+ ++inst;
+ if (inst == program.blocks[block_index]->end() ||
+ inst->GetOpcode() != IR::Opcode::Phi) {
+ do {
+ ++block_index;
+ inst = program.blocks[block_index]->begin();
+ } while (inst->GetOpcode() != IR::Opcode::Phi);
+ }
+ }
+ return ctx.Def(inst->Arg(phi_arg));
+ });
+}
+} // Anonymous namespace
+
+std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
+ IR::Program& program, Bindings& bindings) {
+ EmitContext ctx{profile, runtime_info, program, bindings};
+ const Id main{DefineMain(ctx, program)};
+ DefineEntryPoint(program, ctx, main);
+ if (profile.support_float_controls) {
+ ctx.AddExtension("SPV_KHR_float_controls");
+ SetupDenormControl(profile, program, ctx, main);
+ SetupSignedNanCapabilities(profile, program, ctx, main);
+ }
+ SetupCapabilities(profile, program.info, ctx);
+ PatchPhiNodes(program, ctx);
+ return ctx.Assemble();
+}
+
+Id EmitPhi(EmitContext& ctx, IR::Inst* inst) {
+ const size_t num_args{inst->NumArgs()};
+ boost::container::small_vector<Id, 32> blocks;
+ blocks.reserve(num_args);
+ for (size_t index = 0; index < num_args; ++index) {
+ blocks.push_back(inst->PhiBlock(index)->Definition<Id>());
+ }
+ // The type of a phi instruction is stored in its flags
+ const Id result_type{TypeId(ctx, inst->Flags<IR::Type>())};
+ return ctx.DeferredOpPhi(result_type, std::span(blocks.data(), blocks.size()));
+}
+
+void EmitVoid(EmitContext&) {}
+
+Id EmitIdentity(EmitContext& ctx, const IR::Value& value) {
+ const Id id{ctx.Def(value)};
+ if (!Sirit::ValidId(id)) {
+ throw NotImplementedException("Forward identity declaration");
+ }
+ return id;
+}
+
+Id EmitConditionRef(EmitContext& ctx, const IR::Value& value) {
+ const Id id{ctx.Def(value)};
+ if (!Sirit::ValidId(id)) {
+ throw NotImplementedException("Forward identity declaration");
+ }
+ return id;
+}
+
+void EmitReference(EmitContext&) {}
+
+void EmitPhiMove(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetZeroFromOp(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetSignFromOp(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetCarryFromOp(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetOverflowFromOp(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetSparseFromOp(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetInBoundsFromOp(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h
new file mode 100644
index 000000000..db0c935fe
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.h
@@ -0,0 +1,27 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <vector>
+
+#include <sirit/sirit.h>
+
+#include "common/common_types.h"
+#include "shader_recompiler/backend/bindings.h"
+#include "shader_recompiler/backend/spirv/emit_context.h"
+#include "shader_recompiler/frontend/ir/program.h"
+#include "shader_recompiler/profile.h"
+
+namespace Shader::Backend::SPIRV {
+
+[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
+ IR::Program& program, Bindings& bindings);
+
+[[nodiscard]] inline std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program) {
+ Bindings binding;
+ return EmitSPIRV(profile, {}, program, binding);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
new file mode 100644
index 000000000..9af8bb9e1
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -0,0 +1,448 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id SharedPointer(EmitContext& ctx, Id offset, u32 index_offset = 0) {
+ const Id shift_id{ctx.Const(2U)};
+ Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ if (index_offset > 0) {
+ index = ctx.OpIAdd(ctx.U32[1], index, ctx.Const(index_offset));
+ }
+ return ctx.profile.support_explicit_workgroup_layout
+ ? ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)
+ : ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index);
+}
+
+Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size) {
+ if (offset.IsImmediate()) {
+ const u32 imm_offset{static_cast<u32>(offset.U32() / element_size)};
+ return ctx.Const(imm_offset);
+ }
+ const u32 shift{static_cast<u32>(std::countr_zero(element_size))};
+ const Id index{ctx.Def(offset)};
+ if (shift == 0) {
+ return index;
+ }
+ const Id shift_id{ctx.Const(shift)};
+ return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
+}
+
+Id StoragePointer(EmitContext& ctx, const StorageTypeDefinition& type_def,
+ Id StorageDefinitions::*member_ptr, const IR::Value& binding,
+ const IR::Value& offset, size_t element_size) {
+ if (!binding.IsImmediate()) {
+ throw NotImplementedException("Dynamic storage buffer indexing");
+ }
+ const Id ssbo{ctx.ssbos[binding.U32()].*member_ptr};
+ const Id index{StorageIndex(ctx, offset, element_size)};
+ return ctx.OpAccessChain(type_def.element, ssbo, ctx.u32_zero_value, index);
+}
+
+std::pair<Id, Id> AtomicArgs(EmitContext& ctx) {
+ const Id scope{ctx.Const(static_cast<u32>(spv::Scope::Device))};
+ const Id semantics{ctx.u32_zero_value};
+ return {scope, semantics};
+}
+
+Id SharedAtomicU32(EmitContext& ctx, Id offset, Id value,
+ Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
+ const Id pointer{SharedPointer(ctx, offset)};
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
+}
+
+Id StorageAtomicU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
+ Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U32, &StorageDefinitions::U32, binding,
+ offset, sizeof(u32))};
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
+}
+
+Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
+ Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id),
+ Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
+ if (ctx.profile.support_int64_atomics) {
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U64, &StorageDefinitions::U64,
+ binding, offset, sizeof(u64))};
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value);
+ }
+ LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
+ binding, offset, sizeof(u32[2]))};
+ const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
+ const Id result{(ctx.*non_atomic_func)(ctx.U64, value, original_value)};
+ ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result));
+ return original_value;
+}
+} // Anonymous namespace
+
+Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicIAdd);
+}
+
+Id EmitSharedAtomicSMin32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicSMin);
+}
+
+Id EmitSharedAtomicUMin32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicUMin);
+}
+
+Id EmitSharedAtomicSMax32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicSMax);
+}
+
+Id EmitSharedAtomicUMax32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicUMax);
+}
+
+Id EmitSharedAtomicInc32(EmitContext& ctx, Id offset, Id value) {
+ const Id shift_id{ctx.Const(2U)};
+ const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_shared, index, value);
+}
+
+Id EmitSharedAtomicDec32(EmitContext& ctx, Id offset, Id value) {
+ const Id shift_id{ctx.Const(2U)};
+ const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_shared, index, value);
+}
+
+Id EmitSharedAtomicAnd32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicAnd);
+}
+
+Id EmitSharedAtomicOr32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicOr);
+}
+
+Id EmitSharedAtomicXor32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicXor);
+}
+
+Id EmitSharedAtomicExchange32(EmitContext& ctx, Id offset, Id value) {
+ return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicExchange);
+}
+
+Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
+ if (ctx.profile.support_int64_atomics && ctx.profile.support_explicit_workgroup_layout) {
+ const Id shift_id{ctx.Const(3U)};
+ const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ const Id pointer{
+ ctx.OpAccessChain(ctx.shared_u64, ctx.shared_memory_u64, ctx.u32_zero_value, index)};
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
+ }
+ LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ const Id pointer_1{SharedPointer(ctx, offset, 0)};
+ const Id pointer_2{SharedPointer(ctx, offset, 1)};
+ const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
+ const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
+ const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
+ ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
+ ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
+ return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
+}
+
+Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
+}
+
+Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMin);
+}
+
+Id EmitStorageAtomicUMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMin);
+}
+
+Id EmitStorageAtomicSMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMax);
+}
+
+Id EmitStorageAtomicUMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMax);
+}
+
+Id EmitStorageAtomicInc32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_ssbo, base_index, value, ssbo);
+}
+
+Id EmitStorageAtomicDec32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_ssbo, base_index, value, ssbo);
+}
+
+Id EmitStorageAtomicAnd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicAnd);
+}
+
+Id EmitStorageAtomicOr32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicOr);
+}
+
+Id EmitStorageAtomicXor32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicXor);
+}
+
+Id EmitStorageAtomicExchange32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicExchange);
+}
+
+Id EmitStorageAtomicIAdd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd,
+ &Sirit::Module::OpIAdd);
+}
+
+Id EmitStorageAtomicSMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMin,
+ &Sirit::Module::OpSMin);
+}
+
+Id EmitStorageAtomicUMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMin,
+ &Sirit::Module::OpUMin);
+}
+
+Id EmitStorageAtomicSMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMax,
+ &Sirit::Module::OpSMax);
+}
+
+Id EmitStorageAtomicUMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMax,
+ &Sirit::Module::OpUMax);
+}
+
+Id EmitStorageAtomicAnd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicAnd,
+ &Sirit::Module::OpBitwiseAnd);
+}
+
+Id EmitStorageAtomicOr64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicOr,
+ &Sirit::Module::OpBitwiseOr);
+}
+
+Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicXor,
+ &Sirit::Module::OpBitwiseXor);
+}
+
+Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ if (ctx.profile.support_int64_atomics) {
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U64, &StorageDefinitions::U64,
+ binding, offset, sizeof(u64))};
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
+ }
+ LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
+ binding, offset, sizeof(u32[2]))};
+ const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
+ ctx.OpStore(pointer, value);
+ return original;
+}
+
+Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ return ctx.OpFunctionCall(ctx.F32[1], ctx.f32_add_cas, base_index, value, ssbo);
+}
+
+Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_add_cas, base_index, value, ssbo)};
+ return ctx.OpBitcast(ctx.U32[1], result);
+}
+
+Id EmitStorageAtomicAddF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_add_cas, base_index, value, ssbo)};
+ return ctx.OpPackHalf2x16(ctx.U32[1], result);
+}
+
+Id EmitStorageAtomicMinF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_min_cas, base_index, value, ssbo)};
+ return ctx.OpBitcast(ctx.U32[1], result);
+}
+
+Id EmitStorageAtomicMinF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_min_cas, base_index, value, ssbo)};
+ return ctx.OpPackHalf2x16(ctx.U32[1], result);
+}
+
+Id EmitStorageAtomicMaxF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_max_cas, base_index, value, ssbo)};
+ return ctx.OpBitcast(ctx.U32[1], result);
+}
+
+Id EmitStorageAtomicMaxF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ const Id ssbo{ctx.ssbos[binding.U32()].U32};
+ const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+ const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_max_cas, base_index, value, ssbo)};
+ return ctx.OpPackHalf2x16(ctx.U32[1], result);
+}
+
+Id EmitGlobalAtomicIAdd32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicSMin32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicUMin32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicSMax32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicUMax32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicInc32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicDec32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicAnd32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicOr32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicXor32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicExchange32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicIAdd64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicSMin64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicUMin64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicSMax64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicUMax64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicInc64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicDec64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicAnd64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicOr64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicXor64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicExchange64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicAddF32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicAddF16x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicAddF32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicMinF16x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicMinF32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicMaxF16x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicMaxF32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_barriers.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_barriers.cpp
new file mode 100644
index 000000000..e0b52a001
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_barriers.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+void MemoryBarrier(EmitContext& ctx, spv::Scope scope) {
+ const auto semantics{
+ spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory |
+ spv::MemorySemanticsMask::WorkgroupMemory | spv::MemorySemanticsMask::AtomicCounterMemory |
+ spv::MemorySemanticsMask::ImageMemory};
+ ctx.OpMemoryBarrier(ctx.Const(static_cast<u32>(scope)), ctx.Const(static_cast<u32>(semantics)));
+}
+} // Anonymous namespace
+
+void EmitBarrier(EmitContext& ctx) {
+ const auto execution{spv::Scope::Workgroup};
+ const auto memory{spv::Scope::Workgroup};
+ const auto memory_semantics{spv::MemorySemanticsMask::AcquireRelease |
+ spv::MemorySemanticsMask::WorkgroupMemory};
+ ctx.OpControlBarrier(ctx.Const(static_cast<u32>(execution)),
+ ctx.Const(static_cast<u32>(memory)),
+ ctx.Const(static_cast<u32>(memory_semantics)));
+}
+
+void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
+ MemoryBarrier(ctx, spv::Scope::Workgroup);
+}
+
+void EmitDeviceMemoryBarrier(EmitContext& ctx) {
+ MemoryBarrier(ctx, spv::Scope::Device);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp
new file mode 100644
index 000000000..bb11f4f4e
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp
@@ -0,0 +1,66 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+
+void EmitBitCastU16F16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBitCastU32F32(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.U32[1], value);
+}
+
+void EmitBitCastU64F64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitBitCastF16U16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBitCastF32U32(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.F32[1], value);
+}
+
+void EmitBitCastF64U64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitPackUint2x32(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.U64, value);
+}
+
+Id EmitUnpackUint2x32(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.U32[2], value);
+}
+
+Id EmitPackFloat2x16(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.U32[1], value);
+}
+
+Id EmitUnpackFloat2x16(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.F16[2], value);
+}
+
+Id EmitPackHalf2x16(EmitContext& ctx, Id value) {
+ return ctx.OpPackHalf2x16(ctx.U32[1], value);
+}
+
+Id EmitUnpackHalf2x16(EmitContext& ctx, Id value) {
+ return ctx.OpUnpackHalf2x16(ctx.F32[2], value);
+}
+
+Id EmitPackDouble2x32(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.F64[1], value);
+}
+
+Id EmitUnpackDouble2x32(EmitContext& ctx, Id value) {
+ return ctx.OpBitcast(ctx.U32[2], value);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp
new file mode 100644
index 000000000..10ff4ecab
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp
@@ -0,0 +1,155 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+
+namespace Shader::Backend::SPIRV {
+
+Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2) {
+ return ctx.OpCompositeConstruct(ctx.U32[2], e1, e2);
+}
+
+Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
+ return ctx.OpCompositeConstruct(ctx.U32[3], e1, e2, e3);
+}
+
+Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
+ return ctx.OpCompositeConstruct(ctx.U32[4], e1, e2, e3, e4);
+}
+
+Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
+}
+
+Id EmitCompositeExtractU32x3(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
+}
+
+Id EmitCompositeExtractU32x4(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
+}
+
+Id EmitCompositeInsertU32x2(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.U32[2], object, composite, index);
+}
+
+Id EmitCompositeInsertU32x3(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.U32[3], object, composite, index);
+}
+
+Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.U32[4], object, composite, index);
+}
+
+Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2) {
+ return ctx.OpCompositeConstruct(ctx.F16[2], e1, e2);
+}
+
+Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
+ return ctx.OpCompositeConstruct(ctx.F16[3], e1, e2, e3);
+}
+
+Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
+ return ctx.OpCompositeConstruct(ctx.F16[4], e1, e2, e3, e4);
+}
+
+Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
+}
+
+Id EmitCompositeExtractF16x3(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
+}
+
+Id EmitCompositeExtractF16x4(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
+}
+
+Id EmitCompositeInsertF16x2(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F16[2], object, composite, index);
+}
+
+Id EmitCompositeInsertF16x3(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F16[3], object, composite, index);
+}
+
+Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F16[4], object, composite, index);
+}
+
+Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2) {
+ return ctx.OpCompositeConstruct(ctx.F32[2], e1, e2);
+}
+
+Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
+ return ctx.OpCompositeConstruct(ctx.F32[3], e1, e2, e3);
+}
+
+Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
+ return ctx.OpCompositeConstruct(ctx.F32[4], e1, e2, e3, e4);
+}
+
+Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
+}
+
+Id EmitCompositeExtractF32x3(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
+}
+
+Id EmitCompositeExtractF32x4(EmitContext& ctx, Id composite, u32 index) {
+ return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
+}
+
+Id EmitCompositeInsertF32x2(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F32[2], object, composite, index);
+}
+
+Id EmitCompositeInsertF32x3(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F32[3], object, composite, index);
+}
+
+Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F32[4], object, composite, index);
+}
+
+void EmitCompositeConstructF64x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitCompositeConstructF64x3(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitCompositeConstructF64x4(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitCompositeExtractF64x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitCompositeExtractF64x3(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitCompositeExtractF64x4(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitCompositeInsertF64x2(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F64[2], object, composite, index);
+}
+
+Id EmitCompositeInsertF64x3(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F64[3], object, composite, index);
+}
+
+Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index) {
+ return ctx.OpCompositeInsert(ctx.F64[4], object, composite, index);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
new file mode 100644
index 000000000..fb8c02a77
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -0,0 +1,505 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+#include <utility>
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+struct AttrInfo {
+ Id pointer;
+ Id id;
+ bool needs_cast;
+};
+
+std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
+ const AttributeType type{ctx.runtime_info.generic_input_types.at(index)};
+ switch (type) {
+ case AttributeType::Float:
+ return AttrInfo{ctx.input_f32, ctx.F32[1], false};
+ case AttributeType::UnsignedInt:
+ return AttrInfo{ctx.input_u32, ctx.U32[1], true};
+ case AttributeType::SignedInt:
+ return AttrInfo{ctx.input_s32, ctx.TypeInt(32, true), true};
+ case AttributeType::Disabled:
+ return std::nullopt;
+ }
+ throw InvalidArgument("Invalid attribute type {}", type);
+}
+
+template <typename... Args>
+Id AttrPointer(EmitContext& ctx, Id pointer_type, Id vertex, Id base, Args&&... args) {
+ switch (ctx.stage) {
+ case Stage::TessellationControl:
+ case Stage::TessellationEval:
+ case Stage::Geometry:
+ return ctx.OpAccessChain(pointer_type, base, vertex, std::forward<Args>(args)...);
+ default:
+ return ctx.OpAccessChain(pointer_type, base, std::forward<Args>(args)...);
+ }
+}
+
+template <typename... Args>
+Id OutputAccessChain(EmitContext& ctx, Id result_type, Id base, Args&&... args) {
+ if (ctx.stage == Stage::TessellationControl) {
+ const Id invocation_id{ctx.OpLoad(ctx.U32[1], ctx.invocation_id)};
+ return ctx.OpAccessChain(result_type, base, invocation_id, std::forward<Args>(args)...);
+ } else {
+ return ctx.OpAccessChain(result_type, base, std::forward<Args>(args)...);
+ }
+}
+
+struct OutAttr {
+ OutAttr(Id pointer_) : pointer{pointer_} {}
+ OutAttr(Id pointer_, Id type_) : pointer{pointer_}, type{type_} {}
+
+ Id pointer{};
+ Id type{};
+};
+
+std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
+ if (IR::IsGeneric(attr)) {
+ const u32 index{IR::GenericAttributeIndex(attr)};
+ const u32 element{IR::GenericAttributeElement(attr)};
+ const GenericElementInfo& info{ctx.output_generics.at(index).at(element)};
+ if (info.num_components == 1) {
+ return info.id;
+ } else {
+ const u32 index_element{element - info.first_element};
+ const Id index_id{ctx.Const(index_element)};
+ return OutputAccessChain(ctx, ctx.output_f32, info.id, index_id);
+ }
+ }
+ switch (attr) {
+ case IR::Attribute::PointSize:
+ return ctx.output_point_size;
+ case IR::Attribute::PositionX:
+ case IR::Attribute::PositionY:
+ case IR::Attribute::PositionZ:
+ case IR::Attribute::PositionW: {
+ const u32 element{static_cast<u32>(attr) % 4};
+ const Id element_id{ctx.Const(element)};
+ return OutputAccessChain(ctx, ctx.output_f32, ctx.output_position, element_id);
+ }
+ case IR::Attribute::ClipDistance0:
+ case IR::Attribute::ClipDistance1:
+ case IR::Attribute::ClipDistance2:
+ case IR::Attribute::ClipDistance3:
+ case IR::Attribute::ClipDistance4:
+ case IR::Attribute::ClipDistance5:
+ case IR::Attribute::ClipDistance6:
+ case IR::Attribute::ClipDistance7: {
+ const u32 base{static_cast<u32>(IR::Attribute::ClipDistance0)};
+ const u32 index{static_cast<u32>(attr) - base};
+ const Id clip_num{ctx.Const(index)};
+ return OutputAccessChain(ctx, ctx.output_f32, ctx.clip_distances, clip_num);
+ }
+ case IR::Attribute::Layer:
+ if (ctx.profile.support_viewport_index_layer_non_geometry ||
+ ctx.stage == Shader::Stage::Geometry) {
+ return OutAttr{ctx.layer, ctx.U32[1]};
+ }
+ return std::nullopt;
+ case IR::Attribute::ViewportIndex:
+ if (ctx.profile.support_viewport_index_layer_non_geometry ||
+ ctx.stage == Shader::Stage::Geometry) {
+ return OutAttr{ctx.viewport_index, ctx.U32[1]};
+ }
+ return std::nullopt;
+ case IR::Attribute::ViewportMask:
+ if (!ctx.profile.support_viewport_mask) {
+ return std::nullopt;
+ }
+ return OutAttr{ctx.OpAccessChain(ctx.output_u32, ctx.viewport_mask, ctx.u32_zero_value),
+ ctx.U32[1]};
+ default:
+ throw NotImplementedException("Read attribute {}", attr);
+ }
+}
+
+Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr, u32 element_size,
+ const IR::Value& binding, const IR::Value& offset) {
+ if (!binding.IsImmediate()) {
+ throw NotImplementedException("Constant buffer indexing");
+ }
+ const Id cbuf{ctx.cbufs[binding.U32()].*member_ptr};
+ const Id uniform_type{ctx.uniform_types.*member_ptr};
+ if (!offset.IsImmediate()) {
+ Id index{ctx.Def(offset)};
+ if (element_size > 1) {
+ const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
+ const Id shift{ctx.Const(log2_element_size)};
+ index = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
+ }
+ const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, index)};
+ return ctx.OpLoad(result_type, access_chain);
+ }
+ // Hardware been proved to read the aligned offset (e.g. LDC.U32 at 6 will read offset 4)
+ const Id imm_offset{ctx.Const(offset.U32() / element_size)};
+ const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, imm_offset)};
+ return ctx.OpLoad(result_type, access_chain);
+}
+
+Id GetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ return GetCbuf(ctx, ctx.U32[1], &UniformDefinitions::U32, sizeof(u32), binding, offset);
+}
+
+Id GetCbufU32x4(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ return GetCbuf(ctx, ctx.U32[4], &UniformDefinitions::U32x4, sizeof(u32[4]), binding, offset);
+}
+
+Id GetCbufElement(EmitContext& ctx, Id vector, const IR::Value& offset, u32 index_offset) {
+ if (offset.IsImmediate()) {
+ const u32 element{(offset.U32() / 4) % 4 + index_offset};
+ return ctx.OpCompositeExtract(ctx.U32[1], vector, element);
+ }
+ const Id shift{ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), ctx.Const(2u))};
+ Id element{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Const(3u))};
+ if (index_offset > 0) {
+ element = ctx.OpIAdd(ctx.U32[1], element, ctx.Const(index_offset));
+ }
+ return ctx.OpVectorExtractDynamic(ctx.U32[1], vector, element);
+}
+} // Anonymous namespace
+
+void EmitGetRegister(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitSetRegister(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetPred(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitSetPred(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitSetGotoVariable(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetGotoVariable(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitSetIndirectBranchVariable(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+void EmitGetIndirectBranchVariable(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
+ const Id load{GetCbuf(ctx, ctx.U8, &UniformDefinitions::U8, sizeof(u8), binding, offset)};
+ return ctx.OpUConvert(ctx.U32[1], load);
+ }
+ Id element{};
+ if (ctx.profile.support_descriptor_aliasing) {
+ element = GetCbufU32(ctx, binding, offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ element = GetCbufElement(ctx, vector, offset, 0u);
+ }
+ const Id bit_offset{ctx.BitOffset8(offset)};
+ return ctx.OpBitFieldUExtract(ctx.U32[1], element, bit_offset, ctx.Const(8u));
+}
+
+Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
+ const Id load{GetCbuf(ctx, ctx.S8, &UniformDefinitions::S8, sizeof(s8), binding, offset)};
+ return ctx.OpSConvert(ctx.U32[1], load);
+ }
+ Id element{};
+ if (ctx.profile.support_descriptor_aliasing) {
+ element = GetCbufU32(ctx, binding, offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ element = GetCbufElement(ctx, vector, offset, 0u);
+ }
+ const Id bit_offset{ctx.BitOffset8(offset)};
+ return ctx.OpBitFieldSExtract(ctx.U32[1], element, bit_offset, ctx.Const(8u));
+}
+
+Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
+ const Id load{
+ GetCbuf(ctx, ctx.U16, &UniformDefinitions::U16, sizeof(u16), binding, offset)};
+ return ctx.OpUConvert(ctx.U32[1], load);
+ }
+ Id element{};
+ if (ctx.profile.support_descriptor_aliasing) {
+ element = GetCbufU32(ctx, binding, offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ element = GetCbufElement(ctx, vector, offset, 0u);
+ }
+ const Id bit_offset{ctx.BitOffset16(offset)};
+ return ctx.OpBitFieldUExtract(ctx.U32[1], element, bit_offset, ctx.Const(16u));
+}
+
+Id EmitGetCbufS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
+ const Id load{
+ GetCbuf(ctx, ctx.S16, &UniformDefinitions::S16, sizeof(s16), binding, offset)};
+ return ctx.OpSConvert(ctx.U32[1], load);
+ }
+ Id element{};
+ if (ctx.profile.support_descriptor_aliasing) {
+ element = GetCbufU32(ctx, binding, offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ element = GetCbufElement(ctx, vector, offset, 0u);
+ }
+ const Id bit_offset{ctx.BitOffset16(offset)};
+ return ctx.OpBitFieldSExtract(ctx.U32[1], element, bit_offset, ctx.Const(16u));
+}
+
+Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ return GetCbufU32(ctx, binding, offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ return GetCbufElement(ctx, vector, offset, 0u);
+ }
+}
+
+Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ return GetCbuf(ctx, ctx.F32[1], &UniformDefinitions::F32, sizeof(f32), binding, offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ return ctx.OpBitcast(ctx.F32[1], GetCbufElement(ctx, vector, offset, 0u));
+ }
+}
+
+Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ return GetCbuf(ctx, ctx.U32[2], &UniformDefinitions::U32x2, sizeof(u32[2]), binding,
+ offset);
+ } else {
+ const Id vector{GetCbufU32x4(ctx, binding, offset)};
+ return ctx.OpCompositeConstruct(ctx.U32[2], GetCbufElement(ctx, vector, offset, 0u),
+ GetCbufElement(ctx, vector, offset, 1u));
+ }
+}
+
+Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
+ const u32 element{static_cast<u32>(attr) % 4};
+ if (IR::IsGeneric(attr)) {
+ const u32 index{IR::GenericAttributeIndex(attr)};
+ const std::optional<AttrInfo> type{AttrTypes(ctx, index)};
+ if (!type) {
+ // Attribute is disabled
+ return ctx.Const(element == 3 ? 1.0f : 0.0f);
+ }
+ if (!ctx.runtime_info.previous_stage_stores.Generic(index, element)) {
+ // Varying component is not written
+ return ctx.Const(type && element == 3 ? 1.0f : 0.0f);
+ }
+ const Id generic_id{ctx.input_generics.at(index)};
+ const Id pointer{AttrPointer(ctx, type->pointer, vertex, generic_id, ctx.Const(element))};
+ const Id value{ctx.OpLoad(type->id, pointer)};
+ return type->needs_cast ? ctx.OpBitcast(ctx.F32[1], value) : value;
+ }
+ switch (attr) {
+ case IR::Attribute::PrimitiveId:
+ return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.primitive_id));
+ case IR::Attribute::PositionX:
+ case IR::Attribute::PositionY:
+ case IR::Attribute::PositionZ:
+ case IR::Attribute::PositionW:
+ return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position,
+ ctx.Const(element)));
+ case IR::Attribute::InstanceId:
+ if (ctx.profile.support_vertex_instance_id) {
+ return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id));
+ } else {
+ const Id index{ctx.OpLoad(ctx.U32[1], ctx.instance_index)};
+ const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_instance)};
+ return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base));
+ }
+ case IR::Attribute::VertexId:
+ if (ctx.profile.support_vertex_instance_id) {
+ return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.vertex_id));
+ } else {
+ const Id index{ctx.OpLoad(ctx.U32[1], ctx.vertex_index)};
+ const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_vertex)};
+ return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base));
+ }
+ case IR::Attribute::FrontFace:
+ return ctx.OpSelect(ctx.U32[1], ctx.OpLoad(ctx.U1, ctx.front_face),
+ ctx.Const(std::numeric_limits<u32>::max()), ctx.u32_zero_value);
+ case IR::Attribute::PointSpriteS:
+ return ctx.OpLoad(ctx.F32[1],
+ ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.u32_zero_value));
+ case IR::Attribute::PointSpriteT:
+ return ctx.OpLoad(ctx.F32[1],
+ ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.Const(1U)));
+ case IR::Attribute::TessellationEvaluationPointU:
+ return ctx.OpLoad(ctx.F32[1],
+ ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.u32_zero_value));
+ case IR::Attribute::TessellationEvaluationPointV:
+ return ctx.OpLoad(ctx.F32[1],
+ ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.Const(1U)));
+
+ default:
+ throw NotImplementedException("Read attribute {}", attr);
+ }
+}
+
+void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, [[maybe_unused]] Id vertex) {
+ const std::optional<OutAttr> output{OutputAttrPointer(ctx, attr)};
+ if (!output) {
+ return;
+ }
+ if (Sirit::ValidId(output->type)) {
+ value = ctx.OpBitcast(output->type, value);
+ }
+ ctx.OpStore(output->pointer, value);
+}
+
+Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex) {
+ switch (ctx.stage) {
+ case Stage::TessellationControl:
+ case Stage::TessellationEval:
+ case Stage::Geometry:
+ return ctx.OpFunctionCall(ctx.F32[1], ctx.indexed_load_func, offset, vertex);
+ default:
+ return ctx.OpFunctionCall(ctx.F32[1], ctx.indexed_load_func, offset);
+ }
+}
+
+void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, [[maybe_unused]] Id vertex) {
+ ctx.OpFunctionCall(ctx.void_id, ctx.indexed_store_func, offset, value);
+}
+
+Id EmitGetPatch(EmitContext& ctx, IR::Patch patch) {
+ if (!IR::IsGeneric(patch)) {
+ throw NotImplementedException("Non-generic patch load");
+ }
+ const u32 index{IR::GenericPatchIndex(patch)};
+ const Id element{ctx.Const(IR::GenericPatchElement(patch))};
+ const Id type{ctx.stage == Stage::TessellationControl ? ctx.output_f32 : ctx.input_f32};
+ const Id pointer{ctx.OpAccessChain(type, ctx.patches.at(index), element)};
+ return ctx.OpLoad(ctx.F32[1], pointer);
+}
+
+void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
+ const Id pointer{[&] {
+ if (IR::IsGeneric(patch)) {
+ const u32 index{IR::GenericPatchIndex(patch)};
+ const Id element{ctx.Const(IR::GenericPatchElement(patch))};
+ return ctx.OpAccessChain(ctx.output_f32, ctx.patches.at(index), element);
+ }
+ switch (patch) {
+ case IR::Patch::TessellationLodLeft:
+ case IR::Patch::TessellationLodRight:
+ case IR::Patch::TessellationLodTop:
+ case IR::Patch::TessellationLodBottom: {
+ const u32 index{static_cast<u32>(patch) - u32(IR::Patch::TessellationLodLeft)};
+ const Id index_id{ctx.Const(index)};
+ return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_outer, index_id);
+ }
+ case IR::Patch::TessellationLodInteriorU:
+ return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner,
+ ctx.u32_zero_value);
+ case IR::Patch::TessellationLodInteriorV:
+ return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner, ctx.Const(1u));
+ default:
+ throw NotImplementedException("Patch {}", patch);
+ }
+ }()};
+ ctx.OpStore(pointer, value);
+}
+
+void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value) {
+ const Id component_id{ctx.Const(component)};
+ const Id pointer{ctx.OpAccessChain(ctx.output_f32, ctx.frag_color.at(index), component_id)};
+ ctx.OpStore(pointer, value);
+}
+
+void EmitSetSampleMask(EmitContext& ctx, Id value) {
+ ctx.OpStore(ctx.sample_mask, value);
+}
+
+void EmitSetFragDepth(EmitContext& ctx, Id value) {
+ ctx.OpStore(ctx.frag_depth, value);
+}
+
+void EmitGetZFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitGetSFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitGetCFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitGetOFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitSetZFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitSetSFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitSetCFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitSetOFlag(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitWorkgroupId(EmitContext& ctx) {
+ return ctx.OpLoad(ctx.U32[3], ctx.workgroup_id);
+}
+
+Id EmitLocalInvocationId(EmitContext& ctx) {
+ return ctx.OpLoad(ctx.U32[3], ctx.local_invocation_id);
+}
+
+Id EmitInvocationId(EmitContext& ctx) {
+ return ctx.OpLoad(ctx.U32[1], ctx.invocation_id);
+}
+
+Id EmitSampleId(EmitContext& ctx) {
+ return ctx.OpLoad(ctx.U32[1], ctx.sample_id);
+}
+
+Id EmitIsHelperInvocation(EmitContext& ctx) {
+ return ctx.OpLoad(ctx.U1, ctx.is_helper_invocation);
+}
+
+Id EmitYDirection(EmitContext& ctx) {
+ return ctx.Const(ctx.runtime_info.y_negate ? -1.0f : 1.0f);
+}
+
+Id EmitLoadLocal(EmitContext& ctx, Id word_offset) {
+ const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)};
+ return ctx.OpLoad(ctx.U32[1], pointer);
+}
+
+void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value) {
+ const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)};
+ ctx.OpStore(pointer, value);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp
new file mode 100644
index 000000000..d33486f28
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp
@@ -0,0 +1,28 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+
+void EmitJoin(EmitContext&) {
+ throw NotImplementedException("Join shouldn't be emitted");
+}
+
+void EmitDemoteToHelperInvocation(EmitContext& ctx) {
+ if (ctx.profile.support_demote_to_helper_invocation) {
+ ctx.OpDemoteToHelperInvocationEXT();
+ } else {
+ const Id kill_label{ctx.OpLabel()};
+ const Id impossible_label{ctx.OpLabel()};
+ ctx.OpSelectionMerge(impossible_label, spv::SelectionControlMask::MaskNone);
+ ctx.OpBranchConditional(ctx.true_value, kill_label, impossible_label);
+ ctx.AddLabel(kill_label);
+ ctx.OpKill();
+ ctx.AddLabel(impossible_label);
+ }
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
new file mode 100644
index 000000000..fd42b7a16
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
@@ -0,0 +1,269 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id ExtractU16(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpUConvert(ctx.U16, value);
+ } else {
+ return ctx.OpBitFieldUExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(16u));
+ }
+}
+
+Id ExtractS16(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpSConvert(ctx.S16, value);
+ } else {
+ return ctx.OpBitFieldSExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(16u));
+ }
+}
+
+Id ExtractU8(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int8) {
+ return ctx.OpUConvert(ctx.U8, value);
+ } else {
+ return ctx.OpBitFieldUExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(8u));
+ }
+}
+
+Id ExtractS8(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int8) {
+ return ctx.OpSConvert(ctx.S8, value);
+ } else {
+ return ctx.OpBitFieldSExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(8u));
+ }
+}
+} // Anonymous namespace
+
+Id EmitConvertS16F16(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
+ } else {
+ return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
+ }
+}
+
+Id EmitConvertS16F32(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
+ } else {
+ return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
+ }
+}
+
+Id EmitConvertS16F64(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
+ } else {
+ return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
+ }
+}
+
+Id EmitConvertS32F16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToS(ctx.U32[1], value);
+}
+
+Id EmitConvertS32F32(EmitContext& ctx, Id value) {
+ if (ctx.profile.has_broken_signed_operations) {
+ return ctx.OpBitcast(ctx.U32[1], ctx.OpConvertFToS(ctx.S32[1], value));
+ } else {
+ return ctx.OpConvertFToS(ctx.U32[1], value);
+ }
+}
+
+Id EmitConvertS32F64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToS(ctx.U32[1], value);
+}
+
+Id EmitConvertS64F16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToS(ctx.U64, value);
+}
+
+Id EmitConvertS64F32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToS(ctx.U64, value);
+}
+
+Id EmitConvertS64F64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToS(ctx.U64, value);
+}
+
+Id EmitConvertU16F16(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
+ } else {
+ return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
+ }
+}
+
+Id EmitConvertU16F32(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
+ } else {
+ return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
+ }
+}
+
+Id EmitConvertU16F64(EmitContext& ctx, Id value) {
+ if (ctx.profile.support_int16) {
+ return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
+ } else {
+ return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
+ }
+}
+
+Id EmitConvertU32F16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToU(ctx.U32[1], value);
+}
+
+Id EmitConvertU32F32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToU(ctx.U32[1], value);
+}
+
+Id EmitConvertU32F64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToU(ctx.U32[1], value);
+}
+
+Id EmitConvertU64F16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToU(ctx.U64, value);
+}
+
+Id EmitConvertU64F32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToU(ctx.U64, value);
+}
+
+Id EmitConvertU64F64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertFToU(ctx.U64, value);
+}
+
+Id EmitConvertU64U32(EmitContext& ctx, Id value) {
+ return ctx.OpUConvert(ctx.U64, value);
+}
+
+Id EmitConvertU32U64(EmitContext& ctx, Id value) {
+ return ctx.OpUConvert(ctx.U32[1], value);
+}
+
+Id EmitConvertF16F32(EmitContext& ctx, Id value) {
+ return ctx.OpFConvert(ctx.F16[1], value);
+}
+
+Id EmitConvertF32F16(EmitContext& ctx, Id value) {
+ return ctx.OpFConvert(ctx.F32[1], value);
+}
+
+Id EmitConvertF32F64(EmitContext& ctx, Id value) {
+ return ctx.OpFConvert(ctx.F32[1], value);
+}
+
+Id EmitConvertF64F32(EmitContext& ctx, Id value) {
+ return ctx.OpFConvert(ctx.F64[1], value);
+}
+
+Id EmitConvertF16S8(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F16[1], ExtractS8(ctx, value));
+}
+
+Id EmitConvertF16S16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F16[1], ExtractS16(ctx, value));
+}
+
+Id EmitConvertF16S32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF16S64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF16U8(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F16[1], ExtractU8(ctx, value));
+}
+
+Id EmitConvertF16U16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F16[1], ExtractU16(ctx, value));
+}
+
+Id EmitConvertF16U32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF16U64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF32S8(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F32[1], ExtractS8(ctx, value));
+}
+
+Id EmitConvertF32S16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F32[1], ExtractS16(ctx, value));
+}
+
+Id EmitConvertF32S32(EmitContext& ctx, Id value) {
+ if (ctx.profile.has_broken_signed_operations) {
+ value = ctx.OpBitcast(ctx.S32[1], value);
+ }
+ return ctx.OpConvertSToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF32S64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF32U8(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F32[1], ExtractU8(ctx, value));
+}
+
+Id EmitConvertF32U16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F32[1], ExtractU16(ctx, value));
+}
+
+Id EmitConvertF32U32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF32U64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF64S8(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F64[1], ExtractS8(ctx, value));
+}
+
+Id EmitConvertF64S16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F64[1], ExtractS16(ctx, value));
+}
+
+Id EmitConvertF64S32(EmitContext& ctx, Id value) {
+ if (ctx.profile.has_broken_signed_operations) {
+ value = ctx.OpBitcast(ctx.S32[1], value);
+ }
+ return ctx.OpConvertSToF(ctx.F64[1], value);
+}
+
+Id EmitConvertF64S64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertSToF(ctx.F64[1], value);
+}
+
+Id EmitConvertF64U8(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F64[1], ExtractU8(ctx, value));
+}
+
+Id EmitConvertF64U16(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F64[1], ExtractU16(ctx, value));
+}
+
+Id EmitConvertF64U32(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F64[1], value);
+}
+
+Id EmitConvertF64U64(EmitContext& ctx, Id value) {
+ return ctx.OpConvertUToF(ctx.F64[1], value);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp
new file mode 100644
index 000000000..61cf25f9c
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp
@@ -0,0 +1,396 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id Decorate(EmitContext& ctx, IR::Inst* inst, Id op) {
+ const auto flags{inst->Flags<IR::FpControl>()};
+ if (flags.no_contraction) {
+ ctx.Decorate(op, spv::Decoration::NoContraction);
+ }
+ return op;
+}
+
+Id Clamp(EmitContext& ctx, Id type, Id value, Id zero, Id one) {
+ if (ctx.profile.has_broken_spirv_clamp) {
+ return ctx.OpFMin(type, ctx.OpFMax(type, value, zero), one);
+ } else {
+ return ctx.OpFClamp(type, value, zero, one);
+ }
+}
+
+Id FPOrdNotEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ if (ctx.profile.ignore_nan_fp_comparisons) {
+ const Id comp{ctx.OpFOrdEqual(ctx.U1, lhs, rhs)};
+ const Id lhs_not_nan{ctx.OpLogicalNot(ctx.U1, ctx.OpIsNan(ctx.U1, lhs))};
+ const Id rhs_not_nan{ctx.OpLogicalNot(ctx.U1, ctx.OpIsNan(ctx.U1, rhs))};
+ return ctx.OpLogicalAnd(ctx.U1, ctx.OpLogicalAnd(ctx.U1, comp, lhs_not_nan), rhs_not_nan);
+ } else {
+ return ctx.OpFOrdNotEqual(ctx.U1, lhs, rhs);
+ }
+}
+
+Id FPUnordCompare(Id (EmitContext::*comp_func)(Id, Id, Id), EmitContext& ctx, Id lhs, Id rhs) {
+ if (ctx.profile.ignore_nan_fp_comparisons) {
+ const Id lhs_nan{ctx.OpIsNan(ctx.U1, lhs)};
+ const Id rhs_nan{ctx.OpIsNan(ctx.U1, rhs)};
+ const Id comp{(ctx.*comp_func)(ctx.U1, lhs, rhs)};
+ return ctx.OpLogicalOr(ctx.U1, ctx.OpLogicalOr(ctx.U1, comp, lhs_nan), rhs_nan);
+ } else {
+ return (ctx.*comp_func)(ctx.U1, lhs, rhs);
+ }
+}
+} // Anonymous namespace
+
+Id EmitFPAbs16(EmitContext& ctx, Id value) {
+ return ctx.OpFAbs(ctx.F16[1], value);
+}
+
+Id EmitFPAbs32(EmitContext& ctx, Id value) {
+ return ctx.OpFAbs(ctx.F32[1], value);
+}
+
+Id EmitFPAbs64(EmitContext& ctx, Id value) {
+ return ctx.OpFAbs(ctx.F64[1], value);
+}
+
+Id EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ return Decorate(ctx, inst, ctx.OpFAdd(ctx.F16[1], a, b));
+}
+
+Id EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ return Decorate(ctx, inst, ctx.OpFAdd(ctx.F32[1], a, b));
+}
+
+Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ return Decorate(ctx, inst, ctx.OpFAdd(ctx.F64[1], a, b));
+}
+
+Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
+ return Decorate(ctx, inst, ctx.OpFma(ctx.F16[1], a, b, c));
+}
+
+Id EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
+ return Decorate(ctx, inst, ctx.OpFma(ctx.F32[1], a, b, c));
+}
+
+Id EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) {
+ return Decorate(ctx, inst, ctx.OpFma(ctx.F64[1], a, b, c));
+}
+
+Id EmitFPMax32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpFMax(ctx.F32[1], a, b);
+}
+
+Id EmitFPMax64(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpFMax(ctx.F64[1], a, b);
+}
+
+Id EmitFPMin32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpFMin(ctx.F32[1], a, b);
+}
+
+Id EmitFPMin64(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpFMin(ctx.F64[1], a, b);
+}
+
+Id EmitFPMul16(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ return Decorate(ctx, inst, ctx.OpFMul(ctx.F16[1], a, b));
+}
+
+Id EmitFPMul32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ return Decorate(ctx, inst, ctx.OpFMul(ctx.F32[1], a, b));
+}
+
+Id EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ return Decorate(ctx, inst, ctx.OpFMul(ctx.F64[1], a, b));
+}
+
+Id EmitFPNeg16(EmitContext& ctx, Id value) {
+ return ctx.OpFNegate(ctx.F16[1], value);
+}
+
+Id EmitFPNeg32(EmitContext& ctx, Id value) {
+ return ctx.OpFNegate(ctx.F32[1], value);
+}
+
+Id EmitFPNeg64(EmitContext& ctx, Id value) {
+ return ctx.OpFNegate(ctx.F64[1], value);
+}
+
+Id EmitFPSin(EmitContext& ctx, Id value) {
+ return ctx.OpSin(ctx.F32[1], value);
+}
+
+Id EmitFPCos(EmitContext& ctx, Id value) {
+ return ctx.OpCos(ctx.F32[1], value);
+}
+
+Id EmitFPExp2(EmitContext& ctx, Id value) {
+ return ctx.OpExp2(ctx.F32[1], value);
+}
+
+Id EmitFPLog2(EmitContext& ctx, Id value) {
+ return ctx.OpLog2(ctx.F32[1], value);
+}
+
+Id EmitFPRecip32(EmitContext& ctx, Id value) {
+ return ctx.OpFDiv(ctx.F32[1], ctx.Const(1.0f), value);
+}
+
+Id EmitFPRecip64(EmitContext& ctx, Id value) {
+ return ctx.OpFDiv(ctx.F64[1], ctx.Constant(ctx.F64[1], 1.0f), value);
+}
+
+Id EmitFPRecipSqrt32(EmitContext& ctx, Id value) {
+ return ctx.OpInverseSqrt(ctx.F32[1], value);
+}
+
+Id EmitFPRecipSqrt64(EmitContext& ctx, Id value) {
+ return ctx.OpInverseSqrt(ctx.F64[1], value);
+}
+
+Id EmitFPSqrt(EmitContext& ctx, Id value) {
+ return ctx.OpSqrt(ctx.F32[1], value);
+}
+
+Id EmitFPSaturate16(EmitContext& ctx, Id value) {
+ const Id zero{ctx.Constant(ctx.F16[1], u16{0})};
+ const Id one{ctx.Constant(ctx.F16[1], u16{0x3c00})};
+ return Clamp(ctx, ctx.F16[1], value, zero, one);
+}
+
+Id EmitFPSaturate32(EmitContext& ctx, Id value) {
+ const Id zero{ctx.Const(f32{0.0})};
+ const Id one{ctx.Const(f32{1.0})};
+ return Clamp(ctx, ctx.F32[1], value, zero, one);
+}
+
+Id EmitFPSaturate64(EmitContext& ctx, Id value) {
+ const Id zero{ctx.Constant(ctx.F64[1], f64{0.0})};
+ const Id one{ctx.Constant(ctx.F64[1], f64{1.0})};
+ return Clamp(ctx, ctx.F64[1], value, zero, one);
+}
+
+Id EmitFPClamp16(EmitContext& ctx, Id value, Id min_value, Id max_value) {
+ return Clamp(ctx, ctx.F16[1], value, min_value, max_value);
+}
+
+Id EmitFPClamp32(EmitContext& ctx, Id value, Id min_value, Id max_value) {
+ return Clamp(ctx, ctx.F32[1], value, min_value, max_value);
+}
+
+Id EmitFPClamp64(EmitContext& ctx, Id value, Id min_value, Id max_value) {
+ return Clamp(ctx, ctx.F64[1], value, min_value, max_value);
+}
+
+Id EmitFPRoundEven16(EmitContext& ctx, Id value) {
+ return ctx.OpRoundEven(ctx.F16[1], value);
+}
+
+Id EmitFPRoundEven32(EmitContext& ctx, Id value) {
+ return ctx.OpRoundEven(ctx.F32[1], value);
+}
+
+Id EmitFPRoundEven64(EmitContext& ctx, Id value) {
+ return ctx.OpRoundEven(ctx.F64[1], value);
+}
+
+Id EmitFPFloor16(EmitContext& ctx, Id value) {
+ return ctx.OpFloor(ctx.F16[1], value);
+}
+
+Id EmitFPFloor32(EmitContext& ctx, Id value) {
+ return ctx.OpFloor(ctx.F32[1], value);
+}
+
+Id EmitFPFloor64(EmitContext& ctx, Id value) {
+ return ctx.OpFloor(ctx.F64[1], value);
+}
+
+Id EmitFPCeil16(EmitContext& ctx, Id value) {
+ return ctx.OpCeil(ctx.F16[1], value);
+}
+
+Id EmitFPCeil32(EmitContext& ctx, Id value) {
+ return ctx.OpCeil(ctx.F32[1], value);
+}
+
+Id EmitFPCeil64(EmitContext& ctx, Id value) {
+ return ctx.OpCeil(ctx.F64[1], value);
+}
+
+Id EmitFPTrunc16(EmitContext& ctx, Id value) {
+ return ctx.OpTrunc(ctx.F16[1], value);
+}
+
+Id EmitFPTrunc32(EmitContext& ctx, Id value) {
+ return ctx.OpTrunc(ctx.F32[1], value);
+}
+
+Id EmitFPTrunc64(EmitContext& ctx, Id value) {
+ return ctx.OpTrunc(ctx.F64[1], value);
+}
+
+Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPOrdNotEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPOrdNotEqual(ctx, lhs, rhs);
+}
+
+Id EmitFPOrdNotEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPOrdNotEqual(ctx, lhs, rhs);
+}
+
+Id EmitFPOrdNotEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPOrdNotEqual(ctx, lhs, rhs);
+}
+
+Id EmitFPUnordNotEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFUnordNotEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordNotEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFUnordNotEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordNotEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFUnordNotEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdLessThan16(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdLessThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdLessThan32(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdLessThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdLessThan64(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdLessThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordLessThan16(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordLessThan, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordLessThan32(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordLessThan, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordLessThan64(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordLessThan, ctx, lhs, rhs);
+}
+
+Id EmitFPOrdGreaterThan16(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdGreaterThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdGreaterThan32(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdGreaterThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdGreaterThan64(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdGreaterThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordGreaterThan16(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordGreaterThan, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordGreaterThan32(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordGreaterThan, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordGreaterThan64(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordGreaterThan, ctx, lhs, rhs);
+}
+
+Id EmitFPOrdLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdLessThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdLessThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdLessThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordLessThanEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordLessThanEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordLessThanEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdGreaterThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdGreaterThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPOrdGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpFOrdGreaterThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordGreaterThanEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordGreaterThanEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPUnordGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
+ return FPUnordCompare(&EmitContext::OpFUnordGreaterThanEqual, ctx, lhs, rhs);
+}
+
+Id EmitFPIsNan16(EmitContext& ctx, Id value) {
+ return ctx.OpIsNan(ctx.U1, value);
+}
+
+Id EmitFPIsNan32(EmitContext& ctx, Id value) {
+ return ctx.OpIsNan(ctx.U1, value);
+}
+
+Id EmitFPIsNan64(EmitContext& ctx, Id value) {
+ return ctx.OpIsNan(ctx.U1, value);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
new file mode 100644
index 000000000..3588f052b
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -0,0 +1,462 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <boost/container/static_vector.hpp>
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+class ImageOperands {
+public:
+ explicit ImageOperands(EmitContext& ctx, bool has_bias, bool has_lod, bool has_lod_clamp,
+ Id lod, const IR::Value& offset) {
+ if (has_bias) {
+ const Id bias{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod};
+ Add(spv::ImageOperandsMask::Bias, bias);
+ }
+ if (has_lod) {
+ const Id lod_value{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod};
+ Add(spv::ImageOperandsMask::Lod, lod_value);
+ }
+ AddOffset(ctx, offset);
+ if (has_lod_clamp) {
+ const Id lod_clamp{has_bias ? ctx.OpCompositeExtract(ctx.F32[1], lod, 1) : lod};
+ Add(spv::ImageOperandsMask::MinLod, lod_clamp);
+ }
+ }
+
+ explicit ImageOperands(EmitContext& ctx, const IR::Value& offset, const IR::Value& offset2) {
+ if (offset2.IsEmpty()) {
+ if (offset.IsEmpty()) {
+ return;
+ }
+ Add(spv::ImageOperandsMask::Offset, ctx.Def(offset));
+ return;
+ }
+ const std::array values{offset.InstRecursive(), offset2.InstRecursive()};
+ if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) {
+ LOG_WARNING(Shader_SPIRV, "Not all arguments in PTP are immediate, ignoring");
+ return;
+ }
+ const IR::Opcode opcode{values[0]->GetOpcode()};
+ if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
+ throw LogicError("Invalid PTP arguments");
+ }
+ auto read{[&](unsigned int a, unsigned int b) { return values[a]->Arg(b).U32(); }};
+
+ const Id offsets{ctx.ConstantComposite(
+ ctx.TypeArray(ctx.U32[2], ctx.Const(4U)), ctx.Const(read(0, 0), read(0, 1)),
+ ctx.Const(read(0, 2), read(0, 3)), ctx.Const(read(1, 0), read(1, 1)),
+ ctx.Const(read(1, 2), read(1, 3)))};
+ Add(spv::ImageOperandsMask::ConstOffsets, offsets);
+ }
+
+ explicit ImageOperands(Id offset, Id lod, Id ms) {
+ if (Sirit::ValidId(lod)) {
+ Add(spv::ImageOperandsMask::Lod, lod);
+ }
+ if (Sirit::ValidId(offset)) {
+ Add(spv::ImageOperandsMask::Offset, offset);
+ }
+ if (Sirit::ValidId(ms)) {
+ Add(spv::ImageOperandsMask::Sample, ms);
+ }
+ }
+
+ explicit ImageOperands(EmitContext& ctx, bool has_lod_clamp, Id derivates, u32 num_derivates,
+ Id offset, Id lod_clamp) {
+ if (!Sirit::ValidId(derivates)) {
+ throw LogicError("Derivates must be present");
+ }
+ boost::container::static_vector<Id, 3> deriv_x_accum;
+ boost::container::static_vector<Id, 3> deriv_y_accum;
+ for (u32 i = 0; i < num_derivates; ++i) {
+ deriv_x_accum.push_back(ctx.OpCompositeExtract(ctx.F32[1], derivates, i * 2));
+ deriv_y_accum.push_back(ctx.OpCompositeExtract(ctx.F32[1], derivates, i * 2 + 1));
+ }
+ const Id derivates_X{ctx.OpCompositeConstruct(
+ ctx.F32[num_derivates], std::span{deriv_x_accum.data(), deriv_x_accum.size()})};
+ const Id derivates_Y{ctx.OpCompositeConstruct(
+ ctx.F32[num_derivates], std::span{deriv_y_accum.data(), deriv_y_accum.size()})};
+ Add(spv::ImageOperandsMask::Grad, derivates_X, derivates_Y);
+ if (Sirit::ValidId(offset)) {
+ Add(spv::ImageOperandsMask::Offset, offset);
+ }
+ if (has_lod_clamp) {
+ Add(spv::ImageOperandsMask::MinLod, lod_clamp);
+ }
+ }
+
+ std::span<const Id> Span() const noexcept {
+ return std::span{operands.data(), operands.size()};
+ }
+
+ std::optional<spv::ImageOperandsMask> MaskOptional() const noexcept {
+ return mask != spv::ImageOperandsMask{} ? std::make_optional(mask) : std::nullopt;
+ }
+
+ spv::ImageOperandsMask Mask() const noexcept {
+ return mask;
+ }
+
+private:
+ void AddOffset(EmitContext& ctx, const IR::Value& offset) {
+ if (offset.IsEmpty()) {
+ return;
+ }
+ if (offset.IsImmediate()) {
+ Add(spv::ImageOperandsMask::ConstOffset, ctx.SConst(static_cast<s32>(offset.U32())));
+ return;
+ }
+ IR::Inst* const inst{offset.InstRecursive()};
+ if (inst->AreAllArgsImmediates()) {
+ switch (inst->GetOpcode()) {
+ case IR::Opcode::CompositeConstructU32x2:
+ Add(spv::ImageOperandsMask::ConstOffset,
+ ctx.SConst(static_cast<s32>(inst->Arg(0).U32()),
+ static_cast<s32>(inst->Arg(1).U32())));
+ return;
+ case IR::Opcode::CompositeConstructU32x3:
+ Add(spv::ImageOperandsMask::ConstOffset,
+ ctx.SConst(static_cast<s32>(inst->Arg(0).U32()),
+ static_cast<s32>(inst->Arg(1).U32()),
+ static_cast<s32>(inst->Arg(2).U32())));
+ return;
+ case IR::Opcode::CompositeConstructU32x4:
+ Add(spv::ImageOperandsMask::ConstOffset,
+ ctx.SConst(static_cast<s32>(inst->Arg(0).U32()),
+ static_cast<s32>(inst->Arg(1).U32()),
+ static_cast<s32>(inst->Arg(2).U32()),
+ static_cast<s32>(inst->Arg(3).U32())));
+ return;
+ default:
+ break;
+ }
+ }
+ Add(spv::ImageOperandsMask::Offset, ctx.Def(offset));
+ }
+
+ void Add(spv::ImageOperandsMask new_mask, Id value) {
+ mask = static_cast<spv::ImageOperandsMask>(static_cast<unsigned>(mask) |
+ static_cast<unsigned>(new_mask));
+ operands.push_back(value);
+ }
+
+ void Add(spv::ImageOperandsMask new_mask, Id value_1, Id value_2) {
+ mask = static_cast<spv::ImageOperandsMask>(static_cast<unsigned>(mask) |
+ static_cast<unsigned>(new_mask));
+ operands.push_back(value_1);
+ operands.push_back(value_2);
+ }
+
+ boost::container::static_vector<Id, 4> operands;
+ spv::ImageOperandsMask mask{};
+};
+
+Id Texture(EmitContext& ctx, IR::TextureInstInfo info, [[maybe_unused]] const IR::Value& index) {
+ const TextureDefinition& def{ctx.textures.at(info.descriptor_index)};
+ if (def.count > 1) {
+ const Id pointer{ctx.OpAccessChain(def.pointer_type, def.id, ctx.Def(index))};
+ return ctx.OpLoad(def.sampled_type, pointer);
+ } else {
+ return ctx.OpLoad(def.sampled_type, def.id);
+ }
+}
+
+Id TextureImage(EmitContext& ctx, IR::TextureInstInfo info, const IR::Value& index) {
+ if (!index.IsImmediate() || index.U32() != 0) {
+ throw NotImplementedException("Indirect image indexing");
+ }
+ if (info.type == TextureType::Buffer) {
+ const TextureBufferDefinition& def{ctx.texture_buffers.at(info.descriptor_index)};
+ if (def.count > 1) {
+ throw NotImplementedException("Indirect texture sample");
+ }
+ const Id sampler_id{def.id};
+ const Id id{ctx.OpLoad(ctx.sampled_texture_buffer_type, sampler_id)};
+ return ctx.OpImage(ctx.image_buffer_type, id);
+ } else {
+ const TextureDefinition& def{ctx.textures.at(info.descriptor_index)};
+ if (def.count > 1) {
+ throw NotImplementedException("Indirect texture sample");
+ }
+ return ctx.OpImage(def.image_type, ctx.OpLoad(def.sampled_type, def.id));
+ }
+}
+
+Id Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) {
+ if (!index.IsImmediate() || index.U32() != 0) {
+ throw NotImplementedException("Indirect image indexing");
+ }
+ if (info.type == TextureType::Buffer) {
+ const ImageBufferDefinition def{ctx.image_buffers.at(info.descriptor_index)};
+ return ctx.OpLoad(def.image_type, def.id);
+ } else {
+ const ImageDefinition def{ctx.images.at(info.descriptor_index)};
+ return ctx.OpLoad(def.image_type, def.id);
+ }
+}
+
+Id Decorate(EmitContext& ctx, IR::Inst* inst, Id sample) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ if (info.relaxed_precision != 0) {
+ ctx.Decorate(sample, spv::Decoration::RelaxedPrecision);
+ }
+ return sample;
+}
+
+template <typename MethodPtrType, typename... Args>
+Id Emit(MethodPtrType sparse_ptr, MethodPtrType non_sparse_ptr, EmitContext& ctx, IR::Inst* inst,
+ Id result_type, Args&&... args) {
+ IR::Inst* const sparse{inst->GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
+ if (!sparse) {
+ return Decorate(ctx, inst, (ctx.*non_sparse_ptr)(result_type, std::forward<Args>(args)...));
+ }
+ const Id struct_type{ctx.TypeStruct(ctx.U32[1], result_type)};
+ const Id sample{(ctx.*sparse_ptr)(struct_type, std::forward<Args>(args)...)};
+ const Id resident_code{ctx.OpCompositeExtract(ctx.U32[1], sample, 0U)};
+ sparse->SetDefinition(ctx.OpImageSparseTexelsResident(ctx.U1, resident_code));
+ sparse->Invalidate();
+ Decorate(ctx, inst, sample);
+ return ctx.OpCompositeExtract(result_type, sample, 1U);
+}
+} // Anonymous namespace
+
+Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageSampleExplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageSampleDrefImplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageSampleDrefExplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageGather(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageGatherDref(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageFetch(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageQueryDimensions(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageQueryLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageGradient(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageRead(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageWrite(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleImplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleExplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleDrefImplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleDrefExplicitLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageGather(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageGatherDref(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageFetch(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageQueryDimensions(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageQueryLod(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageGradient(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageRead(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageWrite(EmitContext&) {
+ throw LogicError("Unreachable instruction");
+}
+
+Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id bias_lc, const IR::Value& offset) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ if (ctx.stage == Stage::Fragment) {
+ const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0,
+ bias_lc, offset);
+ return Emit(&EmitContext::OpImageSparseSampleImplicitLod,
+ &EmitContext::OpImageSampleImplicitLod, ctx, inst, ctx.F32[4],
+ Texture(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
+ } else {
+ // We can't use implicit lods on non-fragment stages on SPIR-V. Maxwell hardware behaves as
+ // if the lod was explicitly zero. This may change on Turing with implicit compute
+ // derivatives
+ const Id lod{ctx.Const(0.0f)};
+ const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod, offset);
+ return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
+ &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
+ Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
+ }
+}
+
+Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id lod, const IR::Value& offset) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const ImageOperands operands(ctx, false, true, false, lod, offset);
+ return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
+ &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
+ Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
+}
+
+Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+ Id coords, Id dref, Id bias_lc, const IR::Value& offset) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, bias_lc,
+ offset);
+ return Emit(&EmitContext::OpImageSparseSampleDrefImplicitLod,
+ &EmitContext::OpImageSampleDrefImplicitLod, ctx, inst, ctx.F32[1],
+ Texture(ctx, info, index), coords, dref, operands.MaskOptional(), operands.Span());
+}
+
+Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+ Id coords, Id dref, Id lod, const IR::Value& offset) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const ImageOperands operands(ctx, false, true, false, lod, offset);
+ return Emit(&EmitContext::OpImageSparseSampleDrefExplicitLod,
+ &EmitContext::OpImageSampleDrefExplicitLod, ctx, inst, ctx.F32[1],
+ Texture(ctx, info, index), coords, dref, operands.Mask(), operands.Span());
+}
+
+Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ const IR::Value& offset, const IR::Value& offset2) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const ImageOperands operands(ctx, offset, offset2);
+ return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
+ ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
+ operands.MaskOptional(), operands.Span());
+}
+
+Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ const IR::Value& offset, const IR::Value& offset2, Id dref) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const ImageOperands operands(ctx, offset, offset2);
+ return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
+ ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(),
+ operands.Span());
+}
+
+Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
+ Id lod, Id ms) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ if (info.type == TextureType::Buffer) {
+ lod = Id{};
+ }
+ const ImageOperands operands(offset, lod, ms);
+ return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4],
+ TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
+}
+
+Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const Id image{TextureImage(ctx, info, index)};
+ const Id zero{ctx.u32_zero_value};
+ const auto mips{[&] { return ctx.OpImageQueryLevels(ctx.U32[1], image); }};
+ switch (info.type) {
+ case TextureType::Color1D:
+ return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySizeLod(ctx.U32[1], image, lod),
+ zero, zero, mips());
+ case TextureType::ColorArray1D:
+ case TextureType::Color2D:
+ case TextureType::ColorCube:
+ return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySizeLod(ctx.U32[2], image, lod),
+ zero, mips());
+ case TextureType::ColorArray2D:
+ case TextureType::Color3D:
+ case TextureType::ColorArrayCube:
+ return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySizeLod(ctx.U32[3], image, lod),
+ mips());
+ case TextureType::Buffer:
+ return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySize(ctx.U32[1], image), zero,
+ zero, mips());
+ }
+ throw LogicError("Unspecified image type {}", info.type.Value());
+}
+
+Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const Id zero{ctx.f32_zero_value};
+ const Id sampler{Texture(ctx, info, index)};
+ return ctx.OpCompositeConstruct(ctx.F32[4], ctx.OpImageQueryLod(ctx.F32[2], sampler, coords),
+ zero, zero);
+}
+
+Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id derivates, Id offset, Id lod_clamp) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const ImageOperands operands(ctx, info.has_lod_clamp != 0, derivates, info.num_derivates,
+ offset, lod_clamp);
+ return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
+ &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
+ Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
+}
+
+Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) {
+ LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host");
+ return ctx.ConstantNull(ctx.U32[4]);
+ }
+ return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4],
+ Image(ctx, index, info), coords, std::nullopt, std::span<const Id>{});
+}
+
+void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ ctx.OpImageWrite(Image(ctx, index, info), coords, color);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp
new file mode 100644
index 000000000..d7f1a365a
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image_atomic.cpp
@@ -0,0 +1,183 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) {
+ if (!index.IsImmediate()) {
+ throw NotImplementedException("Indirect image indexing");
+ }
+ if (info.type == TextureType::Buffer) {
+ const ImageBufferDefinition def{ctx.image_buffers.at(index.U32())};
+ return def.id;
+ } else {
+ const ImageDefinition def{ctx.images.at(index.U32())};
+ return def.id;
+ }
+}
+
+std::pair<Id, Id> AtomicArgs(EmitContext& ctx) {
+ const Id scope{ctx.Const(static_cast<u32>(spv::Scope::Device))};
+ const Id semantics{ctx.u32_zero_value};
+ return {scope, semantics};
+}
+
+Id ImageAtomicU32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id value,
+ Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
+ const auto info{inst->Flags<IR::TextureInstInfo>()};
+ const Id image{Image(ctx, index, info)};
+ const Id pointer{ctx.OpImageTexelPointer(ctx.image_u32, image, coords, ctx.Const(0U))};
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
+}
+} // Anonymous namespace
+
+Id EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicIAdd);
+}
+
+Id EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicSMin);
+}
+
+Id EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicUMin);
+}
+
+Id EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicSMax);
+}
+
+Id EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicUMax);
+}
+
+Id EmitImageAtomicInc32(EmitContext&, IR::Inst*, const IR::Value&, Id, Id) {
+ // TODO: This is not yet implemented
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitImageAtomicDec32(EmitContext&, IR::Inst*, const IR::Value&, Id, Id) {
+ // TODO: This is not yet implemented
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicAnd);
+}
+
+Id EmitImageAtomicOr32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicOr);
+}
+
+Id EmitImageAtomicXor32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicXor);
+}
+
+Id EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value) {
+ return ImageAtomicU32(ctx, inst, index, coords, value, &Sirit::Module::OpAtomicExchange);
+}
+
+Id EmitBindlessImageAtomicIAdd32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicSMin32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicUMin32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicSMax32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicUMax32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicInc32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicDec32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicAnd32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicOr32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicXor32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBindlessImageAtomicExchange32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicIAdd32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicSMin32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicUMin32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicSMax32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicUMax32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicInc32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicDec32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicAnd32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicOr32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicXor32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitBoundImageAtomicExchange32(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
new file mode 100644
index 000000000..f99c02848
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -0,0 +1,579 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <sirit/sirit.h>
+
+#include "common/common_types.h"
+
+namespace Shader::IR {
+enum class Attribute : u64;
+enum class Patch : u64;
+class Inst;
+class Value;
+} // namespace Shader::IR
+
+namespace Shader::Backend::SPIRV {
+
+using Sirit::Id;
+
+class EmitContext;
+
+// Microinstruction emitters
+Id EmitPhi(EmitContext& ctx, IR::Inst* inst);
+void EmitVoid(EmitContext& ctx);
+Id EmitIdentity(EmitContext& ctx, const IR::Value& value);
+Id EmitConditionRef(EmitContext& ctx, const IR::Value& value);
+void EmitReference(EmitContext&);
+void EmitPhiMove(EmitContext&);
+void EmitJoin(EmitContext& ctx);
+void EmitDemoteToHelperInvocation(EmitContext& ctx);
+void EmitBarrier(EmitContext& ctx);
+void EmitWorkgroupMemoryBarrier(EmitContext& ctx);
+void EmitDeviceMemoryBarrier(EmitContext& ctx);
+void EmitPrologue(EmitContext& ctx);
+void EmitEpilogue(EmitContext& ctx);
+void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream);
+void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream);
+void EmitGetRegister(EmitContext& ctx);
+void EmitSetRegister(EmitContext& ctx);
+void EmitGetPred(EmitContext& ctx);
+void EmitSetPred(EmitContext& ctx);
+void EmitSetGotoVariable(EmitContext& ctx);
+void EmitGetGotoVariable(EmitContext& ctx);
+void EmitSetIndirectBranchVariable(EmitContext& ctx);
+void EmitGetIndirectBranchVariable(EmitContext& ctx);
+Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetCbufS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex);
+void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, Id vertex);
+Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex);
+void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, Id vertex);
+Id EmitGetPatch(EmitContext& ctx, IR::Patch patch);
+void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value);
+void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value);
+void EmitSetSampleMask(EmitContext& ctx, Id value);
+void EmitSetFragDepth(EmitContext& ctx, Id value);
+void EmitGetZFlag(EmitContext& ctx);
+void EmitGetSFlag(EmitContext& ctx);
+void EmitGetCFlag(EmitContext& ctx);
+void EmitGetOFlag(EmitContext& ctx);
+void EmitSetZFlag(EmitContext& ctx);
+void EmitSetSFlag(EmitContext& ctx);
+void EmitSetCFlag(EmitContext& ctx);
+void EmitSetOFlag(EmitContext& ctx);
+Id EmitWorkgroupId(EmitContext& ctx);
+Id EmitLocalInvocationId(EmitContext& ctx);
+Id EmitInvocationId(EmitContext& ctx);
+Id EmitSampleId(EmitContext& ctx);
+Id EmitIsHelperInvocation(EmitContext& ctx);
+Id EmitYDirection(EmitContext& ctx);
+Id EmitLoadLocal(EmitContext& ctx, Id word_offset);
+void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value);
+Id EmitUndefU1(EmitContext& ctx);
+Id EmitUndefU8(EmitContext& ctx);
+Id EmitUndefU16(EmitContext& ctx);
+Id EmitUndefU32(EmitContext& ctx);
+Id EmitUndefU64(EmitContext& ctx);
+void EmitLoadGlobalU8(EmitContext& ctx);
+void EmitLoadGlobalS8(EmitContext& ctx);
+void EmitLoadGlobalU16(EmitContext& ctx);
+void EmitLoadGlobalS16(EmitContext& ctx);
+Id EmitLoadGlobal32(EmitContext& ctx, Id address);
+Id EmitLoadGlobal64(EmitContext& ctx, Id address);
+Id EmitLoadGlobal128(EmitContext& ctx, Id address);
+void EmitWriteGlobalU8(EmitContext& ctx);
+void EmitWriteGlobalS8(EmitContext& ctx);
+void EmitWriteGlobalU16(EmitContext& ctx);
+void EmitWriteGlobalS16(EmitContext& ctx);
+void EmitWriteGlobal32(EmitContext& ctx, Id address, Id value);
+void EmitWriteGlobal64(EmitContext& ctx, Id address, Id value);
+void EmitWriteGlobal128(EmitContext& ctx, Id address, Id value);
+Id EmitLoadStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitLoadStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitLoadStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitLoadStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitLoadStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
+void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitLoadSharedU8(EmitContext& ctx, Id offset);
+Id EmitLoadSharedS8(EmitContext& ctx, Id offset);
+Id EmitLoadSharedU16(EmitContext& ctx, Id offset);
+Id EmitLoadSharedS16(EmitContext& ctx, Id offset);
+Id EmitLoadSharedU32(EmitContext& ctx, Id offset);
+Id EmitLoadSharedU64(EmitContext& ctx, Id offset);
+Id EmitLoadSharedU128(EmitContext& ctx, Id offset);
+void EmitWriteSharedU8(EmitContext& ctx, Id offset, Id value);
+void EmitWriteSharedU16(EmitContext& ctx, Id offset, Id value);
+void EmitWriteSharedU32(EmitContext& ctx, Id offset, Id value);
+void EmitWriteSharedU64(EmitContext& ctx, Id offset, Id value);
+void EmitWriteSharedU128(EmitContext& ctx, Id offset, Id value);
+Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2);
+Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
+Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
+Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeExtractU32x3(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeExtractU32x4(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeInsertU32x2(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertU32x3(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2);
+Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3);
+Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
+Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeExtractF16x3(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeExtractF16x4(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeInsertF16x2(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertF16x3(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2);
+Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
+Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
+Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeExtractF32x3(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeExtractF32x4(EmitContext& ctx, Id composite, u32 index);
+Id EmitCompositeInsertF32x2(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertF32x3(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index);
+void EmitCompositeConstructF64x2(EmitContext& ctx);
+void EmitCompositeConstructF64x3(EmitContext& ctx);
+void EmitCompositeConstructF64x4(EmitContext& ctx);
+void EmitCompositeExtractF64x2(EmitContext& ctx);
+void EmitCompositeExtractF64x3(EmitContext& ctx);
+void EmitCompositeExtractF64x4(EmitContext& ctx);
+Id EmitCompositeInsertF64x2(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertF64x3(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index);
+Id EmitSelectU1(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectU8(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectU32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectU64(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectF16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectF32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+Id EmitSelectF64(EmitContext& ctx, Id cond, Id true_value, Id false_value);
+void EmitBitCastU16F16(EmitContext& ctx);
+Id EmitBitCastU32F32(EmitContext& ctx, Id value);
+void EmitBitCastU64F64(EmitContext& ctx);
+void EmitBitCastF16U16(EmitContext& ctx);
+Id EmitBitCastF32U32(EmitContext& ctx, Id value);
+void EmitBitCastF64U64(EmitContext& ctx);
+Id EmitPackUint2x32(EmitContext& ctx, Id value);
+Id EmitUnpackUint2x32(EmitContext& ctx, Id value);
+Id EmitPackFloat2x16(EmitContext& ctx, Id value);
+Id EmitUnpackFloat2x16(EmitContext& ctx, Id value);
+Id EmitPackHalf2x16(EmitContext& ctx, Id value);
+Id EmitUnpackHalf2x16(EmitContext& ctx, Id value);
+Id EmitPackDouble2x32(EmitContext& ctx, Id value);
+Id EmitUnpackDouble2x32(EmitContext& ctx, Id value);
+void EmitGetZeroFromOp(EmitContext& ctx);
+void EmitGetSignFromOp(EmitContext& ctx);
+void EmitGetCarryFromOp(EmitContext& ctx);
+void EmitGetOverflowFromOp(EmitContext& ctx);
+void EmitGetSparseFromOp(EmitContext& ctx);
+void EmitGetInBoundsFromOp(EmitContext& ctx);
+Id EmitFPAbs16(EmitContext& ctx, Id value);
+Id EmitFPAbs32(EmitContext& ctx, Id value);
+Id EmitFPAbs64(EmitContext& ctx, Id value);
+Id EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
+Id EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
+Id EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c);
+Id EmitFPMax32(EmitContext& ctx, Id a, Id b);
+Id EmitFPMax64(EmitContext& ctx, Id a, Id b);
+Id EmitFPMin32(EmitContext& ctx, Id a, Id b);
+Id EmitFPMin64(EmitContext& ctx, Id a, Id b);
+Id EmitFPMul16(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitFPMul32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitFPNeg16(EmitContext& ctx, Id value);
+Id EmitFPNeg32(EmitContext& ctx, Id value);
+Id EmitFPNeg64(EmitContext& ctx, Id value);
+Id EmitFPSin(EmitContext& ctx, Id value);
+Id EmitFPCos(EmitContext& ctx, Id value);
+Id EmitFPExp2(EmitContext& ctx, Id value);
+Id EmitFPLog2(EmitContext& ctx, Id value);
+Id EmitFPRecip32(EmitContext& ctx, Id value);
+Id EmitFPRecip64(EmitContext& ctx, Id value);
+Id EmitFPRecipSqrt32(EmitContext& ctx, Id value);
+Id EmitFPRecipSqrt64(EmitContext& ctx, Id value);
+Id EmitFPSqrt(EmitContext& ctx, Id value);
+Id EmitFPSaturate16(EmitContext& ctx, Id value);
+Id EmitFPSaturate32(EmitContext& ctx, Id value);
+Id EmitFPSaturate64(EmitContext& ctx, Id value);
+Id EmitFPClamp16(EmitContext& ctx, Id value, Id min_value, Id max_value);
+Id EmitFPClamp32(EmitContext& ctx, Id value, Id min_value, Id max_value);
+Id EmitFPClamp64(EmitContext& ctx, Id value, Id min_value, Id max_value);
+Id EmitFPRoundEven16(EmitContext& ctx, Id value);
+Id EmitFPRoundEven32(EmitContext& ctx, Id value);
+Id EmitFPRoundEven64(EmitContext& ctx, Id value);
+Id EmitFPFloor16(EmitContext& ctx, Id value);
+Id EmitFPFloor32(EmitContext& ctx, Id value);
+Id EmitFPFloor64(EmitContext& ctx, Id value);
+Id EmitFPCeil16(EmitContext& ctx, Id value);
+Id EmitFPCeil32(EmitContext& ctx, Id value);
+Id EmitFPCeil64(EmitContext& ctx, Id value);
+Id EmitFPTrunc16(EmitContext& ctx, Id value);
+Id EmitFPTrunc32(EmitContext& ctx, Id value);
+Id EmitFPTrunc64(EmitContext& ctx, Id value);
+Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdNotEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdNotEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdNotEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordNotEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordNotEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordNotEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdLessThan16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdLessThan32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdLessThan64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordLessThan16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordLessThan32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordLessThan64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdGreaterThan16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdGreaterThan32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdGreaterThan64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordGreaterThan16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordGreaterThan32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordGreaterThan64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPOrdGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPUnordGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitFPIsNan16(EmitContext& ctx, Id value);
+Id EmitFPIsNan32(EmitContext& ctx, Id value);
+Id EmitFPIsNan64(EmitContext& ctx, Id value);
+Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitIAdd64(EmitContext& ctx, Id a, Id b);
+Id EmitISub32(EmitContext& ctx, Id a, Id b);
+Id EmitISub64(EmitContext& ctx, Id a, Id b);
+Id EmitIMul32(EmitContext& ctx, Id a, Id b);
+Id EmitINeg32(EmitContext& ctx, Id value);
+Id EmitINeg64(EmitContext& ctx, Id value);
+Id EmitIAbs32(EmitContext& ctx, Id value);
+Id EmitShiftLeftLogical32(EmitContext& ctx, Id base, Id shift);
+Id EmitShiftLeftLogical64(EmitContext& ctx, Id base, Id shift);
+Id EmitShiftRightLogical32(EmitContext& ctx, Id base, Id shift);
+Id EmitShiftRightLogical64(EmitContext& ctx, Id base, Id shift);
+Id EmitShiftRightArithmetic32(EmitContext& ctx, Id base, Id shift);
+Id EmitShiftRightArithmetic64(EmitContext& ctx, Id base, Id shift);
+Id EmitBitwiseAnd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitBitwiseOr32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitBitwiseXor32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
+Id EmitBitFieldInsert(EmitContext& ctx, Id base, Id insert, Id offset, Id count);
+Id EmitBitFieldSExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count);
+Id EmitBitFieldUExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count);
+Id EmitBitReverse32(EmitContext& ctx, Id value);
+Id EmitBitCount32(EmitContext& ctx, Id value);
+Id EmitBitwiseNot32(EmitContext& ctx, Id value);
+Id EmitFindSMsb32(EmitContext& ctx, Id value);
+Id EmitFindUMsb32(EmitContext& ctx, Id value);
+Id EmitSMin32(EmitContext& ctx, Id a, Id b);
+Id EmitUMin32(EmitContext& ctx, Id a, Id b);
+Id EmitSMax32(EmitContext& ctx, Id a, Id b);
+Id EmitUMax32(EmitContext& ctx, Id a, Id b);
+Id EmitSClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max);
+Id EmitUClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max);
+Id EmitSLessThan(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitULessThan(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitIEqual(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitSLessThanEqual(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitULessThanEqual(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitSGreaterThan(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitUGreaterThan(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitINotEqual(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitSGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitUGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs);
+Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicSMin32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicUMin32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicSMax32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicUMax32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicInc32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicDec32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicAnd32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicOr32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicUMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicSMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicUMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicInc32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicDec32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicAnd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicOr32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicXor32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicExchange32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicIAdd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicSMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicUMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicSMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicUMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicAnd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicOr64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicAddF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicMinF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicMinF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicMaxF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicMaxF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitGlobalAtomicIAdd32(EmitContext& ctx);
+Id EmitGlobalAtomicSMin32(EmitContext& ctx);
+Id EmitGlobalAtomicUMin32(EmitContext& ctx);
+Id EmitGlobalAtomicSMax32(EmitContext& ctx);
+Id EmitGlobalAtomicUMax32(EmitContext& ctx);
+Id EmitGlobalAtomicInc32(EmitContext& ctx);
+Id EmitGlobalAtomicDec32(EmitContext& ctx);
+Id EmitGlobalAtomicAnd32(EmitContext& ctx);
+Id EmitGlobalAtomicOr32(EmitContext& ctx);
+Id EmitGlobalAtomicXor32(EmitContext& ctx);
+Id EmitGlobalAtomicExchange32(EmitContext& ctx);
+Id EmitGlobalAtomicIAdd64(EmitContext& ctx);
+Id EmitGlobalAtomicSMin64(EmitContext& ctx);
+Id EmitGlobalAtomicUMin64(EmitContext& ctx);
+Id EmitGlobalAtomicSMax64(EmitContext& ctx);
+Id EmitGlobalAtomicUMax64(EmitContext& ctx);
+Id EmitGlobalAtomicInc64(EmitContext& ctx);
+Id EmitGlobalAtomicDec64(EmitContext& ctx);
+Id EmitGlobalAtomicAnd64(EmitContext& ctx);
+Id EmitGlobalAtomicOr64(EmitContext& ctx);
+Id EmitGlobalAtomicXor64(EmitContext& ctx);
+Id EmitGlobalAtomicExchange64(EmitContext& ctx);
+Id EmitGlobalAtomicAddF32(EmitContext& ctx);
+Id EmitGlobalAtomicAddF16x2(EmitContext& ctx);
+Id EmitGlobalAtomicAddF32x2(EmitContext& ctx);
+Id EmitGlobalAtomicMinF16x2(EmitContext& ctx);
+Id EmitGlobalAtomicMinF32x2(EmitContext& ctx);
+Id EmitGlobalAtomicMaxF16x2(EmitContext& ctx);
+Id EmitGlobalAtomicMaxF32x2(EmitContext& ctx);
+Id EmitLogicalOr(EmitContext& ctx, Id a, Id b);
+Id EmitLogicalAnd(EmitContext& ctx, Id a, Id b);
+Id EmitLogicalXor(EmitContext& ctx, Id a, Id b);
+Id EmitLogicalNot(EmitContext& ctx, Id value);
+Id EmitConvertS16F16(EmitContext& ctx, Id value);
+Id EmitConvertS16F32(EmitContext& ctx, Id value);
+Id EmitConvertS16F64(EmitContext& ctx, Id value);
+Id EmitConvertS32F16(EmitContext& ctx, Id value);
+Id EmitConvertS32F32(EmitContext& ctx, Id value);
+Id EmitConvertS32F64(EmitContext& ctx, Id value);
+Id EmitConvertS64F16(EmitContext& ctx, Id value);
+Id EmitConvertS64F32(EmitContext& ctx, Id value);
+Id EmitConvertS64F64(EmitContext& ctx, Id value);
+Id EmitConvertU16F16(EmitContext& ctx, Id value);
+Id EmitConvertU16F32(EmitContext& ctx, Id value);
+Id EmitConvertU16F64(EmitContext& ctx, Id value);
+Id EmitConvertU32F16(EmitContext& ctx, Id value);
+Id EmitConvertU32F32(EmitContext& ctx, Id value);
+Id EmitConvertU32F64(EmitContext& ctx, Id value);
+Id EmitConvertU64F16(EmitContext& ctx, Id value);
+Id EmitConvertU64F32(EmitContext& ctx, Id value);
+Id EmitConvertU64F64(EmitContext& ctx, Id value);
+Id EmitConvertU64U32(EmitContext& ctx, Id value);
+Id EmitConvertU32U64(EmitContext& ctx, Id value);
+Id EmitConvertF16F32(EmitContext& ctx, Id value);
+Id EmitConvertF32F16(EmitContext& ctx, Id value);
+Id EmitConvertF32F64(EmitContext& ctx, Id value);
+Id EmitConvertF64F32(EmitContext& ctx, Id value);
+Id EmitConvertF16S8(EmitContext& ctx, Id value);
+Id EmitConvertF16S16(EmitContext& ctx, Id value);
+Id EmitConvertF16S32(EmitContext& ctx, Id value);
+Id EmitConvertF16S64(EmitContext& ctx, Id value);
+Id EmitConvertF16U8(EmitContext& ctx, Id value);
+Id EmitConvertF16U16(EmitContext& ctx, Id value);
+Id EmitConvertF16U32(EmitContext& ctx, Id value);
+Id EmitConvertF16U64(EmitContext& ctx, Id value);
+Id EmitConvertF32S8(EmitContext& ctx, Id value);
+Id EmitConvertF32S16(EmitContext& ctx, Id value);
+Id EmitConvertF32S32(EmitContext& ctx, Id value);
+Id EmitConvertF32S64(EmitContext& ctx, Id value);
+Id EmitConvertF32U8(EmitContext& ctx, Id value);
+Id EmitConvertF32U16(EmitContext& ctx, Id value);
+Id EmitConvertF32U32(EmitContext& ctx, Id value);
+Id EmitConvertF32U64(EmitContext& ctx, Id value);
+Id EmitConvertF64S8(EmitContext& ctx, Id value);
+Id EmitConvertF64S16(EmitContext& ctx, Id value);
+Id EmitConvertF64S32(EmitContext& ctx, Id value);
+Id EmitConvertF64S64(EmitContext& ctx, Id value);
+Id EmitConvertF64U8(EmitContext& ctx, Id value);
+Id EmitConvertF64U16(EmitContext& ctx, Id value);
+Id EmitConvertF64U32(EmitContext& ctx, Id value);
+Id EmitConvertF64U64(EmitContext& ctx, Id value);
+Id EmitBindlessImageSampleImplicitLod(EmitContext&);
+Id EmitBindlessImageSampleExplicitLod(EmitContext&);
+Id EmitBindlessImageSampleDrefImplicitLod(EmitContext&);
+Id EmitBindlessImageSampleDrefExplicitLod(EmitContext&);
+Id EmitBindlessImageGather(EmitContext&);
+Id EmitBindlessImageGatherDref(EmitContext&);
+Id EmitBindlessImageFetch(EmitContext&);
+Id EmitBindlessImageQueryDimensions(EmitContext&);
+Id EmitBindlessImageQueryLod(EmitContext&);
+Id EmitBindlessImageGradient(EmitContext&);
+Id EmitBindlessImageRead(EmitContext&);
+Id EmitBindlessImageWrite(EmitContext&);
+Id EmitBoundImageSampleImplicitLod(EmitContext&);
+Id EmitBoundImageSampleExplicitLod(EmitContext&);
+Id EmitBoundImageSampleDrefImplicitLod(EmitContext&);
+Id EmitBoundImageSampleDrefExplicitLod(EmitContext&);
+Id EmitBoundImageGather(EmitContext&);
+Id EmitBoundImageGatherDref(EmitContext&);
+Id EmitBoundImageFetch(EmitContext&);
+Id EmitBoundImageQueryDimensions(EmitContext&);
+Id EmitBoundImageQueryLod(EmitContext&);
+Id EmitBoundImageGradient(EmitContext&);
+Id EmitBoundImageRead(EmitContext&);
+Id EmitBoundImageWrite(EmitContext&);
+Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id bias_lc, const IR::Value& offset);
+Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id lod, const IR::Value& offset);
+Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+ Id coords, Id dref, Id bias_lc, const IR::Value& offset);
+Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+ Id coords, Id dref, Id lod, const IR::Value& offset);
+Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ const IR::Value& offset, const IR::Value& offset2);
+Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ const IR::Value& offset, const IR::Value& offset2, Id dref);
+Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
+ Id lod, Id ms);
+Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod);
+Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
+Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id derivates, Id offset, Id lod_clamp);
+Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords);
+void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color);
+Id EmitBindlessImageAtomicIAdd32(EmitContext&);
+Id EmitBindlessImageAtomicSMin32(EmitContext&);
+Id EmitBindlessImageAtomicUMin32(EmitContext&);
+Id EmitBindlessImageAtomicSMax32(EmitContext&);
+Id EmitBindlessImageAtomicUMax32(EmitContext&);
+Id EmitBindlessImageAtomicInc32(EmitContext&);
+Id EmitBindlessImageAtomicDec32(EmitContext&);
+Id EmitBindlessImageAtomicAnd32(EmitContext&);
+Id EmitBindlessImageAtomicOr32(EmitContext&);
+Id EmitBindlessImageAtomicXor32(EmitContext&);
+Id EmitBindlessImageAtomicExchange32(EmitContext&);
+Id EmitBoundImageAtomicIAdd32(EmitContext&);
+Id EmitBoundImageAtomicSMin32(EmitContext&);
+Id EmitBoundImageAtomicUMin32(EmitContext&);
+Id EmitBoundImageAtomicSMax32(EmitContext&);
+Id EmitBoundImageAtomicUMax32(EmitContext&);
+Id EmitBoundImageAtomicInc32(EmitContext&);
+Id EmitBoundImageAtomicDec32(EmitContext&);
+Id EmitBoundImageAtomicAnd32(EmitContext&);
+Id EmitBoundImageAtomicOr32(EmitContext&);
+Id EmitBoundImageAtomicXor32(EmitContext&);
+Id EmitBoundImageAtomicExchange32(EmitContext&);
+Id EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicInc32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicDec32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicOr32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicXor32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+ Id value);
+Id EmitLaneId(EmitContext& ctx);
+Id EmitVoteAll(EmitContext& ctx, Id pred);
+Id EmitVoteAny(EmitContext& ctx, Id pred);
+Id EmitVoteEqual(EmitContext& ctx, Id pred);
+Id EmitSubgroupBallot(EmitContext& ctx, Id pred);
+Id EmitSubgroupEqMask(EmitContext& ctx);
+Id EmitSubgroupLtMask(EmitContext& ctx);
+Id EmitSubgroupLeMask(EmitContext& ctx);
+Id EmitSubgroupGtMask(EmitContext& ctx);
+Id EmitSubgroupGeMask(EmitContext& ctx);
+Id EmitShuffleIndex(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask);
+Id EmitShuffleUp(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask);
+Id EmitShuffleDown(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask);
+Id EmitShuffleButterfly(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask);
+Id EmitFSwizzleAdd(EmitContext& ctx, Id op_a, Id op_b, Id swizzle);
+Id EmitDPdxFine(EmitContext& ctx, Id op_a);
+Id EmitDPdyFine(EmitContext& ctx, Id op_a);
+Id EmitDPdxCoarse(EmitContext& ctx, Id op_a);
+Id EmitDPdyCoarse(EmitContext& ctx, Id op_a);
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp
new file mode 100644
index 000000000..3501d7495
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp
@@ -0,0 +1,270 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+void SetZeroFlag(EmitContext& ctx, IR::Inst* inst, Id result) {
+ IR::Inst* const zero{inst->GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp)};
+ if (!zero) {
+ return;
+ }
+ zero->SetDefinition(ctx.OpIEqual(ctx.U1, result, ctx.u32_zero_value));
+ zero->Invalidate();
+}
+
+void SetSignFlag(EmitContext& ctx, IR::Inst* inst, Id result) {
+ IR::Inst* const sign{inst->GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp)};
+ if (!sign) {
+ return;
+ }
+ sign->SetDefinition(ctx.OpSLessThan(ctx.U1, result, ctx.u32_zero_value));
+ sign->Invalidate();
+}
+} // Anonymous namespace
+
+Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ Id result{};
+ if (IR::Inst* const carry{inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) {
+ const Id carry_type{ctx.TypeStruct(ctx.U32[1], ctx.U32[1])};
+ const Id carry_result{ctx.OpIAddCarry(carry_type, a, b)};
+ result = ctx.OpCompositeExtract(ctx.U32[1], carry_result, 0U);
+
+ const Id carry_value{ctx.OpCompositeExtract(ctx.U32[1], carry_result, 1U)};
+ carry->SetDefinition(ctx.OpINotEqual(ctx.U1, carry_value, ctx.u32_zero_value));
+ carry->Invalidate();
+ } else {
+ result = ctx.OpIAdd(ctx.U32[1], a, b);
+ }
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ if (IR::Inst * overflow{inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp)}) {
+ // https://stackoverflow.com/questions/55468823/how-to-detect-integer-overflow-in-c
+ constexpr u32 s32_max{static_cast<u32>(std::numeric_limits<s32>::max())};
+ const Id is_positive{ctx.OpSGreaterThanEqual(ctx.U1, a, ctx.u32_zero_value)};
+ const Id sub_a{ctx.OpISub(ctx.U32[1], ctx.Const(s32_max), a)};
+
+ const Id positive_test{ctx.OpSGreaterThan(ctx.U1, b, sub_a)};
+ const Id negative_test{ctx.OpSLessThan(ctx.U1, b, sub_a)};
+ const Id carry_flag{ctx.OpSelect(ctx.U1, is_positive, positive_test, negative_test)};
+ overflow->SetDefinition(carry_flag);
+ overflow->Invalidate();
+ }
+ return result;
+}
+
+Id EmitIAdd64(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpIAdd(ctx.U64, a, b);
+}
+
+Id EmitISub32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpISub(ctx.U32[1], a, b);
+}
+
+Id EmitISub64(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpISub(ctx.U64, a, b);
+}
+
+Id EmitIMul32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpIMul(ctx.U32[1], a, b);
+}
+
+Id EmitINeg32(EmitContext& ctx, Id value) {
+ return ctx.OpSNegate(ctx.U32[1], value);
+}
+
+Id EmitINeg64(EmitContext& ctx, Id value) {
+ return ctx.OpSNegate(ctx.U64, value);
+}
+
+Id EmitIAbs32(EmitContext& ctx, Id value) {
+ return ctx.OpSAbs(ctx.U32[1], value);
+}
+
+Id EmitShiftLeftLogical32(EmitContext& ctx, Id base, Id shift) {
+ return ctx.OpShiftLeftLogical(ctx.U32[1], base, shift);
+}
+
+Id EmitShiftLeftLogical64(EmitContext& ctx, Id base, Id shift) {
+ return ctx.OpShiftLeftLogical(ctx.U64, base, shift);
+}
+
+Id EmitShiftRightLogical32(EmitContext& ctx, Id base, Id shift) {
+ return ctx.OpShiftRightLogical(ctx.U32[1], base, shift);
+}
+
+Id EmitShiftRightLogical64(EmitContext& ctx, Id base, Id shift) {
+ return ctx.OpShiftRightLogical(ctx.U64, base, shift);
+}
+
+Id EmitShiftRightArithmetic32(EmitContext& ctx, Id base, Id shift) {
+ return ctx.OpShiftRightArithmetic(ctx.U32[1], base, shift);
+}
+
+Id EmitShiftRightArithmetic64(EmitContext& ctx, Id base, Id shift) {
+ return ctx.OpShiftRightArithmetic(ctx.U64, base, shift);
+}
+
+Id EmitBitwiseAnd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ const Id result{ctx.OpBitwiseAnd(ctx.U32[1], a, b)};
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitBitwiseOr32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ const Id result{ctx.OpBitwiseOr(ctx.U32[1], a, b)};
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitBitwiseXor32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
+ const Id result{ctx.OpBitwiseXor(ctx.U32[1], a, b)};
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitBitFieldInsert(EmitContext& ctx, Id base, Id insert, Id offset, Id count) {
+ return ctx.OpBitFieldInsert(ctx.U32[1], base, insert, offset, count);
+}
+
+Id EmitBitFieldSExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count) {
+ const Id result{ctx.OpBitFieldSExtract(ctx.U32[1], base, offset, count)};
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitBitFieldUExtract(EmitContext& ctx, IR::Inst* inst, Id base, Id offset, Id count) {
+ const Id result{ctx.OpBitFieldUExtract(ctx.U32[1], base, offset, count)};
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitBitReverse32(EmitContext& ctx, Id value) {
+ return ctx.OpBitReverse(ctx.U32[1], value);
+}
+
+Id EmitBitCount32(EmitContext& ctx, Id value) {
+ return ctx.OpBitCount(ctx.U32[1], value);
+}
+
+Id EmitBitwiseNot32(EmitContext& ctx, Id value) {
+ return ctx.OpNot(ctx.U32[1], value);
+}
+
+Id EmitFindSMsb32(EmitContext& ctx, Id value) {
+ return ctx.OpFindSMsb(ctx.U32[1], value);
+}
+
+Id EmitFindUMsb32(EmitContext& ctx, Id value) {
+ return ctx.OpFindUMsb(ctx.U32[1], value);
+}
+
+Id EmitSMin32(EmitContext& ctx, Id a, Id b) {
+ const bool is_broken{ctx.profile.has_broken_signed_operations};
+ if (is_broken) {
+ a = ctx.OpBitcast(ctx.S32[1], a);
+ b = ctx.OpBitcast(ctx.S32[1], b);
+ }
+ const Id result{ctx.OpSMin(ctx.U32[1], a, b)};
+ return is_broken ? ctx.OpBitcast(ctx.U32[1], result) : result;
+}
+
+Id EmitUMin32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpUMin(ctx.U32[1], a, b);
+}
+
+Id EmitSMax32(EmitContext& ctx, Id a, Id b) {
+ const bool is_broken{ctx.profile.has_broken_signed_operations};
+ if (is_broken) {
+ a = ctx.OpBitcast(ctx.S32[1], a);
+ b = ctx.OpBitcast(ctx.S32[1], b);
+ }
+ const Id result{ctx.OpSMax(ctx.U32[1], a, b)};
+ return is_broken ? ctx.OpBitcast(ctx.U32[1], result) : result;
+}
+
+Id EmitUMax32(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpUMax(ctx.U32[1], a, b);
+}
+
+Id EmitSClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max) {
+ Id result{};
+ if (ctx.profile.has_broken_signed_operations || ctx.profile.has_broken_spirv_clamp) {
+ value = ctx.OpBitcast(ctx.S32[1], value);
+ min = ctx.OpBitcast(ctx.S32[1], min);
+ max = ctx.OpBitcast(ctx.S32[1], max);
+ if (ctx.profile.has_broken_spirv_clamp) {
+ result = ctx.OpSMax(ctx.S32[1], ctx.OpSMin(ctx.S32[1], value, max), min);
+ } else {
+ result = ctx.OpSClamp(ctx.S32[1], value, min, max);
+ }
+ result = ctx.OpBitcast(ctx.U32[1], result);
+ } else {
+ result = ctx.OpSClamp(ctx.U32[1], value, min, max);
+ }
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitUClamp32(EmitContext& ctx, IR::Inst* inst, Id value, Id min, Id max) {
+ Id result{};
+ if (ctx.profile.has_broken_spirv_clamp) {
+ result = ctx.OpUMax(ctx.U32[1], ctx.OpUMin(ctx.U32[1], value, max), min);
+ } else {
+ result = ctx.OpUClamp(ctx.U32[1], value, min, max);
+ }
+ SetZeroFlag(ctx, inst, result);
+ SetSignFlag(ctx, inst, result);
+ return result;
+}
+
+Id EmitSLessThan(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpSLessThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitULessThan(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpULessThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitIEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpIEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitSLessThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpSLessThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitULessThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpULessThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitSGreaterThan(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpSGreaterThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitUGreaterThan(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpUGreaterThan(ctx.U1, lhs, rhs);
+}
+
+Id EmitINotEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpINotEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitSGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpSGreaterThanEqual(ctx.U1, lhs, rhs);
+}
+
+Id EmitUGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs) {
+ return ctx.OpUGreaterThanEqual(ctx.U1, lhs, rhs);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp
new file mode 100644
index 000000000..b9a9500fc
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp
@@ -0,0 +1,26 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+
+Id EmitLogicalOr(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpLogicalOr(ctx.U1, a, b);
+}
+
+Id EmitLogicalAnd(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpLogicalAnd(ctx.U1, a, b);
+}
+
+Id EmitLogicalXor(EmitContext& ctx, Id a, Id b) {
+ return ctx.OpLogicalNotEqual(ctx.U1, a, b);
+}
+
+Id EmitLogicalNot(EmitContext& ctx, Id value) {
+ return ctx.OpLogicalNot(ctx.U1, value);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp
new file mode 100644
index 000000000..679ee2684
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp
@@ -0,0 +1,275 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <bit>
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size,
+ u32 index_offset = 0) {
+ if (offset.IsImmediate()) {
+ const u32 imm_offset{static_cast<u32>(offset.U32() / element_size) + index_offset};
+ return ctx.Const(imm_offset);
+ }
+ const u32 shift{static_cast<u32>(std::countr_zero(element_size))};
+ Id index{ctx.Def(offset)};
+ if (shift != 0) {
+ const Id shift_id{ctx.Const(shift)};
+ index = ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
+ }
+ if (index_offset != 0) {
+ index = ctx.OpIAdd(ctx.U32[1], index, ctx.Const(index_offset));
+ }
+ return index;
+}
+
+Id StoragePointer(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ const StorageTypeDefinition& type_def, size_t element_size,
+ Id StorageDefinitions::*member_ptr, u32 index_offset = 0) {
+ if (!binding.IsImmediate()) {
+ throw NotImplementedException("Dynamic storage buffer indexing");
+ }
+ const Id ssbo{ctx.ssbos[binding.U32()].*member_ptr};
+ const Id index{StorageIndex(ctx, offset, element_size, index_offset)};
+ return ctx.OpAccessChain(type_def.element, ssbo, ctx.u32_zero_value, index);
+}
+
+Id LoadStorage(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id result_type,
+ const StorageTypeDefinition& type_def, size_t element_size,
+ Id StorageDefinitions::*member_ptr, u32 index_offset = 0) {
+ const Id pointer{
+ StoragePointer(ctx, binding, offset, type_def, element_size, member_ptr, index_offset)};
+ return ctx.OpLoad(result_type, pointer);
+}
+
+Id LoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ u32 index_offset = 0) {
+ return LoadStorage(ctx, binding, offset, ctx.U32[1], ctx.storage_types.U32, sizeof(u32),
+ &StorageDefinitions::U32, index_offset);
+}
+
+void WriteStorage(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
+ const StorageTypeDefinition& type_def, size_t element_size,
+ Id StorageDefinitions::*member_ptr, u32 index_offset = 0) {
+ const Id pointer{
+ StoragePointer(ctx, binding, offset, type_def, element_size, member_ptr, index_offset)};
+ ctx.OpStore(pointer, value);
+}
+
+void WriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
+ u32 index_offset = 0) {
+ WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32, sizeof(u32),
+ &StorageDefinitions::U32, index_offset);
+}
+} // Anonymous namespace
+
+void EmitLoadGlobalU8(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitLoadGlobalS8(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitLoadGlobalU16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitLoadGlobalS16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitLoadGlobal32(EmitContext& ctx, Id address) {
+ if (ctx.profile.support_int64) {
+ return ctx.OpFunctionCall(ctx.U32[1], ctx.load_global_func_u32, address);
+ }
+ LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation");
+ return ctx.Const(0u);
+}
+
+Id EmitLoadGlobal64(EmitContext& ctx, Id address) {
+ if (ctx.profile.support_int64) {
+ return ctx.OpFunctionCall(ctx.U32[2], ctx.load_global_func_u32x2, address);
+ }
+ LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation");
+ return ctx.Const(0u, 0u);
+}
+
+Id EmitLoadGlobal128(EmitContext& ctx, Id address) {
+ if (ctx.profile.support_int64) {
+ return ctx.OpFunctionCall(ctx.U32[4], ctx.load_global_func_u32x4, address);
+ }
+ LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation");
+ return ctx.Const(0u, 0u, 0u, 0u);
+}
+
+void EmitWriteGlobalU8(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitWriteGlobalS8(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitWriteGlobalU16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitWriteGlobalS16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+void EmitWriteGlobal32(EmitContext& ctx, Id address, Id value) {
+ if (ctx.profile.support_int64) {
+ ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32, address, value);
+ return;
+ }
+ LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation");
+}
+
+void EmitWriteGlobal64(EmitContext& ctx, Id address, Id value) {
+ if (ctx.profile.support_int64) {
+ ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32x2, address, value);
+ return;
+ }
+ LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation");
+}
+
+void EmitWriteGlobal128(EmitContext& ctx, Id address, Id value) {
+ if (ctx.profile.support_int64) {
+ ctx.OpFunctionCall(ctx.void_id, ctx.write_global_func_u32x4, address, value);
+ return;
+ }
+ LOG_WARNING(Shader_SPIRV, "Int64 not supported, ignoring memory operation");
+}
+
+Id EmitLoadStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_int8 && ctx.profile.support_descriptor_aliasing) {
+ return ctx.OpUConvert(ctx.U32[1],
+ LoadStorage(ctx, binding, offset, ctx.U8, ctx.storage_types.U8,
+ sizeof(u8), &StorageDefinitions::U8));
+ } else {
+ return ctx.OpBitFieldUExtract(ctx.U32[1], LoadStorage32(ctx, binding, offset),
+ ctx.BitOffset8(offset), ctx.Const(8u));
+ }
+}
+
+Id EmitLoadStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_int8 && ctx.profile.support_descriptor_aliasing) {
+ return ctx.OpSConvert(ctx.U32[1],
+ LoadStorage(ctx, binding, offset, ctx.S8, ctx.storage_types.S8,
+ sizeof(s8), &StorageDefinitions::S8));
+ } else {
+ return ctx.OpBitFieldSExtract(ctx.U32[1], LoadStorage32(ctx, binding, offset),
+ ctx.BitOffset8(offset), ctx.Const(8u));
+ }
+}
+
+Id EmitLoadStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_int16 && ctx.profile.support_descriptor_aliasing) {
+ return ctx.OpUConvert(ctx.U32[1],
+ LoadStorage(ctx, binding, offset, ctx.U16, ctx.storage_types.U16,
+ sizeof(u16), &StorageDefinitions::U16));
+ } else {
+ return ctx.OpBitFieldUExtract(ctx.U32[1], LoadStorage32(ctx, binding, offset),
+ ctx.BitOffset16(offset), ctx.Const(16u));
+ }
+}
+
+Id EmitLoadStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_int16 && ctx.profile.support_descriptor_aliasing) {
+ return ctx.OpSConvert(ctx.U32[1],
+ LoadStorage(ctx, binding, offset, ctx.S16, ctx.storage_types.S16,
+ sizeof(s16), &StorageDefinitions::S16));
+ } else {
+ return ctx.OpBitFieldSExtract(ctx.U32[1], LoadStorage32(ctx, binding, offset),
+ ctx.BitOffset16(offset), ctx.Const(16u));
+ }
+}
+
+Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ return LoadStorage32(ctx, binding, offset);
+}
+
+Id EmitLoadStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ return LoadStorage(ctx, binding, offset, ctx.U32[2], ctx.storage_types.U32x2,
+ sizeof(u32[2]), &StorageDefinitions::U32x2);
+ } else {
+ return ctx.OpCompositeConstruct(ctx.U32[2], LoadStorage32(ctx, binding, offset, 0),
+ LoadStorage32(ctx, binding, offset, 1));
+ }
+}
+
+Id EmitLoadStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ return LoadStorage(ctx, binding, offset, ctx.U32[4], ctx.storage_types.U32x4,
+ sizeof(u32[4]), &StorageDefinitions::U32x4);
+ } else {
+ return ctx.OpCompositeConstruct(ctx.U32[4], LoadStorage32(ctx, binding, offset, 0),
+ LoadStorage32(ctx, binding, offset, 1),
+ LoadStorage32(ctx, binding, offset, 2),
+ LoadStorage32(ctx, binding, offset, 3));
+ }
+}
+
+void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.U8, value), ctx.storage_types.U8,
+ sizeof(u8), &StorageDefinitions::U8);
+}
+
+void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.S8, value), ctx.storage_types.S8,
+ sizeof(s8), &StorageDefinitions::S8);
+}
+
+void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.U16, value), ctx.storage_types.U16,
+ sizeof(u16), &StorageDefinitions::U16);
+}
+
+void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ WriteStorage(ctx, binding, offset, ctx.OpSConvert(ctx.S16, value), ctx.storage_types.S16,
+ sizeof(s16), &StorageDefinitions::S16);
+}
+
+void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ WriteStorage32(ctx, binding, offset, value);
+}
+
+void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32x2, sizeof(u32[2]),
+ &StorageDefinitions::U32x2);
+ } else {
+ for (u32 index = 0; index < 2; ++index) {
+ const Id element{ctx.OpCompositeExtract(ctx.U32[1], value, index)};
+ WriteStorage32(ctx, binding, offset, element, index);
+ }
+ }
+}
+
+void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ if (ctx.profile.support_descriptor_aliasing) {
+ WriteStorage(ctx, binding, offset, value, ctx.storage_types.U32x4, sizeof(u32[4]),
+ &StorageDefinitions::U32x4);
+ } else {
+ for (u32 index = 0; index < 4; ++index) {
+ const Id element{ctx.OpCompositeExtract(ctx.U32[1], value, index)};
+ WriteStorage32(ctx, binding, offset, element, index);
+ }
+ }
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_select.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_select.cpp
new file mode 100644
index 000000000..c5b4f4720
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_select.cpp
@@ -0,0 +1,42 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+
+Id EmitSelectU1(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.U1, cond, true_value, false_value);
+}
+
+Id EmitSelectU8(EmitContext&, Id, Id, Id) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.U16, cond, true_value, false_value);
+}
+
+Id EmitSelectU32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.U32[1], cond, true_value, false_value);
+}
+
+Id EmitSelectU64(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.U64, cond, true_value, false_value);
+}
+
+Id EmitSelectF16(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.F16[1], cond, true_value, false_value);
+}
+
+Id EmitSelectF32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.F32[1], cond, true_value, false_value);
+}
+
+Id EmitSelectF64(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
+ return ctx.OpSelect(ctx.F64[1], cond, true_value, false_value);
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_shared_memory.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_shared_memory.cpp
new file mode 100644
index 000000000..9a79fc7a2
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_shared_memory.cpp
@@ -0,0 +1,174 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id Pointer(EmitContext& ctx, Id pointer_type, Id array, Id offset, u32 shift) {
+ const Id shift_id{ctx.Const(shift)};
+ const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ return ctx.OpAccessChain(pointer_type, array, ctx.u32_zero_value, index);
+}
+
+Id Word(EmitContext& ctx, Id offset) {
+ const Id shift_id{ctx.Const(2U)};
+ const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ const Id pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index)};
+ return ctx.OpLoad(ctx.U32[1], pointer);
+}
+
+std::pair<Id, Id> ExtractArgs(EmitContext& ctx, Id offset, u32 mask, u32 count) {
+ const Id shift{ctx.OpShiftLeftLogical(ctx.U32[1], offset, ctx.Const(3U))};
+ const Id bit{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Const(mask))};
+ const Id count_id{ctx.Const(count)};
+ return {bit, count_id};
+}
+} // Anonymous namespace
+
+Id EmitLoadSharedU8(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{
+ ctx.OpAccessChain(ctx.shared_u8, ctx.shared_memory_u8, ctx.u32_zero_value, offset)};
+ return ctx.OpUConvert(ctx.U32[1], ctx.OpLoad(ctx.U8, pointer));
+ } else {
+ const auto [bit, count]{ExtractArgs(ctx, offset, 24, 8)};
+ return ctx.OpBitFieldUExtract(ctx.U32[1], Word(ctx, offset), bit, count);
+ }
+}
+
+Id EmitLoadSharedS8(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{
+ ctx.OpAccessChain(ctx.shared_u8, ctx.shared_memory_u8, ctx.u32_zero_value, offset)};
+ return ctx.OpSConvert(ctx.U32[1], ctx.OpLoad(ctx.U8, pointer));
+ } else {
+ const auto [bit, count]{ExtractArgs(ctx, offset, 24, 8)};
+ return ctx.OpBitFieldSExtract(ctx.U32[1], Word(ctx, offset), bit, count);
+ }
+}
+
+Id EmitLoadSharedU16(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u16, ctx.shared_memory_u16, offset, 1)};
+ return ctx.OpUConvert(ctx.U32[1], ctx.OpLoad(ctx.U16, pointer));
+ } else {
+ const auto [bit, count]{ExtractArgs(ctx, offset, 16, 16)};
+ return ctx.OpBitFieldUExtract(ctx.U32[1], Word(ctx, offset), bit, count);
+ }
+}
+
+Id EmitLoadSharedS16(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u16, ctx.shared_memory_u16, offset, 1)};
+ return ctx.OpSConvert(ctx.U32[1], ctx.OpLoad(ctx.U16, pointer));
+ } else {
+ const auto [bit, count]{ExtractArgs(ctx, offset, 16, 16)};
+ return ctx.OpBitFieldSExtract(ctx.U32[1], Word(ctx, offset), bit, count);
+ }
+}
+
+Id EmitLoadSharedU32(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u32, ctx.shared_memory_u32, offset, 2)};
+ return ctx.OpLoad(ctx.U32[1], pointer);
+ } else {
+ return Word(ctx, offset);
+ }
+}
+
+Id EmitLoadSharedU64(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u32x2, ctx.shared_memory_u32x2, offset, 3)};
+ return ctx.OpLoad(ctx.U32[2], pointer);
+ } else {
+ const Id shift_id{ctx.Const(2U)};
+ const Id base_index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ const Id next_index{ctx.OpIAdd(ctx.U32[1], base_index, ctx.Const(1U))};
+ const Id lhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, base_index)};
+ const Id rhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, next_index)};
+ return ctx.OpCompositeConstruct(ctx.U32[2], ctx.OpLoad(ctx.U32[1], lhs_pointer),
+ ctx.OpLoad(ctx.U32[1], rhs_pointer));
+ }
+}
+
+Id EmitLoadSharedU128(EmitContext& ctx, Id offset) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u32x4, ctx.shared_memory_u32x4, offset, 4)};
+ return ctx.OpLoad(ctx.U32[4], pointer);
+ }
+ const Id shift_id{ctx.Const(2U)};
+ const Id base_index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
+ std::array<Id, 4> values{};
+ for (u32 i = 0; i < 4; ++i) {
+ const Id index{i == 0 ? base_index : ctx.OpIAdd(ctx.U32[1], base_index, ctx.Const(i))};
+ const Id pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index)};
+ values[i] = ctx.OpLoad(ctx.U32[1], pointer);
+ }
+ return ctx.OpCompositeConstruct(ctx.U32[4], values);
+}
+
+void EmitWriteSharedU8(EmitContext& ctx, Id offset, Id value) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{
+ ctx.OpAccessChain(ctx.shared_u8, ctx.shared_memory_u8, ctx.u32_zero_value, offset)};
+ ctx.OpStore(pointer, ctx.OpUConvert(ctx.U8, value));
+ } else {
+ ctx.OpFunctionCall(ctx.void_id, ctx.shared_store_u8_func, offset, value);
+ }
+}
+
+void EmitWriteSharedU16(EmitContext& ctx, Id offset, Id value) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u16, ctx.shared_memory_u16, offset, 1)};
+ ctx.OpStore(pointer, ctx.OpUConvert(ctx.U16, value));
+ } else {
+ ctx.OpFunctionCall(ctx.void_id, ctx.shared_store_u16_func, offset, value);
+ }
+}
+
+void EmitWriteSharedU32(EmitContext& ctx, Id offset, Id value) {
+ Id pointer{};
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ pointer = Pointer(ctx, ctx.shared_u32, ctx.shared_memory_u32, offset, 2);
+ } else {
+ const Id shift{ctx.Const(2U)};
+ const Id word_offset{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift)};
+ pointer = ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, word_offset);
+ }
+ ctx.OpStore(pointer, value);
+}
+
+void EmitWriteSharedU64(EmitContext& ctx, Id offset, Id value) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u32x2, ctx.shared_memory_u32x2, offset, 3)};
+ ctx.OpStore(pointer, value);
+ return;
+ }
+ const Id shift{ctx.Const(2U)};
+ const Id word_offset{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift)};
+ const Id next_offset{ctx.OpIAdd(ctx.U32[1], word_offset, ctx.Const(1U))};
+ const Id lhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, word_offset)};
+ const Id rhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, next_offset)};
+ ctx.OpStore(lhs_pointer, ctx.OpCompositeExtract(ctx.U32[1], value, 0U));
+ ctx.OpStore(rhs_pointer, ctx.OpCompositeExtract(ctx.U32[1], value, 1U));
+}
+
+void EmitWriteSharedU128(EmitContext& ctx, Id offset, Id value) {
+ if (ctx.profile.support_explicit_workgroup_layout) {
+ const Id pointer{Pointer(ctx, ctx.shared_u32x4, ctx.shared_memory_u32x4, offset, 4)};
+ ctx.OpStore(pointer, value);
+ return;
+ }
+ const Id shift{ctx.Const(2U)};
+ const Id base_index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift)};
+ for (u32 i = 0; i < 4; ++i) {
+ const Id index{i == 0 ? base_index : ctx.OpIAdd(ctx.U32[1], base_index, ctx.Const(i))};
+ const Id pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index)};
+ ctx.OpStore(pointer, ctx.OpCompositeExtract(ctx.U32[1], value, i));
+ }
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
new file mode 100644
index 000000000..9e7eb3cb1
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
@@ -0,0 +1,150 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+void ConvertDepthMode(EmitContext& ctx) {
+ const Id type{ctx.F32[1]};
+ const Id position{ctx.OpLoad(ctx.F32[4], ctx.output_position)};
+ const Id z{ctx.OpCompositeExtract(type, position, 2u)};
+ const Id w{ctx.OpCompositeExtract(type, position, 3u)};
+ const Id screen_depth{ctx.OpFMul(type, ctx.OpFAdd(type, z, w), ctx.Constant(type, 0.5f))};
+ const Id vector{ctx.OpCompositeInsert(ctx.F32[4], screen_depth, position, 2u)};
+ ctx.OpStore(ctx.output_position, vector);
+}
+
+void SetFixedPipelinePointSize(EmitContext& ctx) {
+ if (ctx.runtime_info.fixed_state_point_size) {
+ const float point_size{*ctx.runtime_info.fixed_state_point_size};
+ ctx.OpStore(ctx.output_point_size, ctx.Const(point_size));
+ }
+}
+
+Id DefaultVarying(EmitContext& ctx, u32 num_components, u32 element, Id zero, Id one,
+ Id default_vector) {
+ switch (num_components) {
+ case 1:
+ return element == 3 ? one : zero;
+ case 2:
+ return ctx.ConstantComposite(ctx.F32[2], zero, element + 1 == 3 ? one : zero);
+ case 3:
+ return ctx.ConstantComposite(ctx.F32[3], zero, zero, element + 2 == 3 ? one : zero);
+ case 4:
+ return default_vector;
+ }
+ throw InvalidArgument("Bad element");
+}
+
+Id ComparisonFunction(EmitContext& ctx, CompareFunction comparison, Id operand_1, Id operand_2) {
+ switch (comparison) {
+ case CompareFunction::Never:
+ return ctx.false_value;
+ case CompareFunction::Less:
+ return ctx.OpFOrdLessThan(ctx.U1, operand_1, operand_2);
+ case CompareFunction::Equal:
+ return ctx.OpFOrdEqual(ctx.U1, operand_1, operand_2);
+ case CompareFunction::LessThanEqual:
+ return ctx.OpFOrdLessThanEqual(ctx.U1, operand_1, operand_2);
+ case CompareFunction::Greater:
+ return ctx.OpFOrdGreaterThan(ctx.U1, operand_1, operand_2);
+ case CompareFunction::NotEqual:
+ return ctx.OpFOrdNotEqual(ctx.U1, operand_1, operand_2);
+ case CompareFunction::GreaterThanEqual:
+ return ctx.OpFOrdGreaterThanEqual(ctx.U1, operand_1, operand_2);
+ case CompareFunction::Always:
+ return ctx.true_value;
+ }
+ throw InvalidArgument("Comparison function {}", comparison);
+}
+
+void AlphaTest(EmitContext& ctx) {
+ if (!ctx.runtime_info.alpha_test_func) {
+ return;
+ }
+ const auto comparison{*ctx.runtime_info.alpha_test_func};
+ if (comparison == CompareFunction::Always) {
+ return;
+ }
+ if (!Sirit::ValidId(ctx.frag_color[0])) {
+ return;
+ }
+
+ const Id type{ctx.F32[1]};
+ const Id rt0_color{ctx.OpLoad(ctx.F32[4], ctx.frag_color[0])};
+ const Id alpha{ctx.OpCompositeExtract(type, rt0_color, 3u)};
+
+ const Id true_label{ctx.OpLabel()};
+ const Id discard_label{ctx.OpLabel()};
+ const Id alpha_reference{ctx.Const(ctx.runtime_info.alpha_test_reference)};
+ const Id condition{ComparisonFunction(ctx, comparison, alpha, alpha_reference)};
+
+ ctx.OpSelectionMerge(true_label, spv::SelectionControlMask::MaskNone);
+ ctx.OpBranchConditional(condition, true_label, discard_label);
+ ctx.AddLabel(discard_label);
+ ctx.OpKill();
+ ctx.AddLabel(true_label);
+}
+} // Anonymous namespace
+
+void EmitPrologue(EmitContext& ctx) {
+ if (ctx.stage == Stage::VertexB) {
+ const Id zero{ctx.Const(0.0f)};
+ const Id one{ctx.Const(1.0f)};
+ const Id default_vector{ctx.ConstantComposite(ctx.F32[4], zero, zero, zero, one)};
+ ctx.OpStore(ctx.output_position, default_vector);
+ for (const auto& info : ctx.output_generics) {
+ if (info[0].num_components == 0) {
+ continue;
+ }
+ u32 element{0};
+ while (element < 4) {
+ const auto& element_info{info[element]};
+ const u32 num{element_info.num_components};
+ const Id value{DefaultVarying(ctx, num, element, zero, one, default_vector)};
+ ctx.OpStore(element_info.id, value);
+ element += num;
+ }
+ }
+ }
+ if (ctx.stage == Stage::VertexB || ctx.stage == Stage::Geometry) {
+ SetFixedPipelinePointSize(ctx);
+ }
+}
+
+void EmitEpilogue(EmitContext& ctx) {
+ if (ctx.stage == Stage::VertexB && ctx.runtime_info.convert_depth_mode) {
+ ConvertDepthMode(ctx);
+ }
+ if (ctx.stage == Stage::Fragment) {
+ AlphaTest(ctx);
+ }
+}
+
+void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
+ if (ctx.runtime_info.convert_depth_mode) {
+ ConvertDepthMode(ctx);
+ }
+ if (stream.IsImmediate()) {
+ ctx.OpEmitStreamVertex(ctx.Def(stream));
+ } else {
+ LOG_WARNING(Shader_SPIRV, "Stream is not immediate");
+ ctx.OpEmitStreamVertex(ctx.u32_zero_value);
+ }
+ // Restore fixed pipeline point size after emitting the vertex
+ SetFixedPipelinePointSize(ctx);
+}
+
+void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
+ if (stream.IsImmediate()) {
+ ctx.OpEndStreamPrimitive(ctx.Def(stream));
+ } else {
+ LOG_WARNING(Shader_SPIRV, "Stream is not immediate");
+ ctx.OpEndStreamPrimitive(ctx.u32_zero_value);
+ }
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp
new file mode 100644
index 000000000..c9f469e90
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp
@@ -0,0 +1,30 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+
+Id EmitUndefU1(EmitContext& ctx) {
+ return ctx.OpUndef(ctx.U1);
+}
+
+Id EmitUndefU8(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitUndefU16(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitUndefU32(EmitContext& ctx) {
+ return ctx.OpUndef(ctx.U32[1]);
+}
+
+Id EmitUndefU64(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
new file mode 100644
index 000000000..78b1e1ba7
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
@@ -0,0 +1,203 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+Id WarpExtract(EmitContext& ctx, Id value) {
+ const Id local_index{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ return ctx.OpVectorExtractDynamic(ctx.U32[1], value, local_index);
+}
+
+Id LoadMask(EmitContext& ctx, Id mask) {
+ const Id value{ctx.OpLoad(ctx.U32[4], mask)};
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return ctx.OpCompositeExtract(ctx.U32[1], value, 0U);
+ }
+ return WarpExtract(ctx, value);
+}
+
+void SetInBoundsFlag(IR::Inst* inst, Id result) {
+ IR::Inst* const in_bounds{inst->GetAssociatedPseudoOperation(IR::Opcode::GetInBoundsFromOp)};
+ if (!in_bounds) {
+ return;
+ }
+ in_bounds->SetDefinition(result);
+ in_bounds->Invalidate();
+}
+
+Id ComputeMinThreadId(EmitContext& ctx, Id thread_id, Id segmentation_mask) {
+ return ctx.OpBitwiseAnd(ctx.U32[1], thread_id, segmentation_mask);
+}
+
+Id ComputeMaxThreadId(EmitContext& ctx, Id min_thread_id, Id clamp, Id not_seg_mask) {
+ return ctx.OpBitwiseOr(ctx.U32[1], min_thread_id,
+ ctx.OpBitwiseAnd(ctx.U32[1], clamp, not_seg_mask));
+}
+
+Id GetMaxThreadId(EmitContext& ctx, Id thread_id, Id clamp, Id segmentation_mask) {
+ const Id not_seg_mask{ctx.OpNot(ctx.U32[1], segmentation_mask)};
+ const Id min_thread_id{ComputeMinThreadId(ctx, thread_id, segmentation_mask)};
+ return ComputeMaxThreadId(ctx, min_thread_id, clamp, not_seg_mask);
+}
+
+Id SelectValue(EmitContext& ctx, Id in_range, Id value, Id src_thread_id) {
+ return ctx.OpSelect(ctx.U32[1], in_range,
+ ctx.OpSubgroupReadInvocationKHR(ctx.U32[1], value, src_thread_id), value);
+}
+} // Anonymous namespace
+
+Id EmitLaneId(EmitContext& ctx) {
+ const Id id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return id;
+ }
+ return ctx.OpBitwiseAnd(ctx.U32[1], id, ctx.Const(31U));
+}
+
+Id EmitVoteAll(EmitContext& ctx, Id pred) {
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return ctx.OpSubgroupAllKHR(ctx.U1, pred);
+ }
+ const Id mask_ballot{ctx.OpSubgroupBallotKHR(ctx.U32[4], ctx.true_value)};
+ const Id active_mask{WarpExtract(ctx, mask_ballot)};
+ const Id ballot{WarpExtract(ctx, ctx.OpSubgroupBallotKHR(ctx.U32[4], pred))};
+ const Id lhs{ctx.OpBitwiseAnd(ctx.U32[1], ballot, active_mask)};
+ return ctx.OpIEqual(ctx.U1, lhs, active_mask);
+}
+
+Id EmitVoteAny(EmitContext& ctx, Id pred) {
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return ctx.OpSubgroupAnyKHR(ctx.U1, pred);
+ }
+ const Id mask_ballot{ctx.OpSubgroupBallotKHR(ctx.U32[4], ctx.true_value)};
+ const Id active_mask{WarpExtract(ctx, mask_ballot)};
+ const Id ballot{WarpExtract(ctx, ctx.OpSubgroupBallotKHR(ctx.U32[4], pred))};
+ const Id lhs{ctx.OpBitwiseAnd(ctx.U32[1], ballot, active_mask)};
+ return ctx.OpINotEqual(ctx.U1, lhs, ctx.u32_zero_value);
+}
+
+Id EmitVoteEqual(EmitContext& ctx, Id pred) {
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return ctx.OpSubgroupAllEqualKHR(ctx.U1, pred);
+ }
+ const Id mask_ballot{ctx.OpSubgroupBallotKHR(ctx.U32[4], ctx.true_value)};
+ const Id active_mask{WarpExtract(ctx, mask_ballot)};
+ const Id ballot{WarpExtract(ctx, ctx.OpSubgroupBallotKHR(ctx.U32[4], pred))};
+ const Id lhs{ctx.OpBitwiseXor(ctx.U32[1], ballot, active_mask)};
+ return ctx.OpLogicalOr(ctx.U1, ctx.OpIEqual(ctx.U1, lhs, ctx.u32_zero_value),
+ ctx.OpIEqual(ctx.U1, lhs, active_mask));
+}
+
+Id EmitSubgroupBallot(EmitContext& ctx, Id pred) {
+ const Id ballot{ctx.OpSubgroupBallotKHR(ctx.U32[4], pred)};
+ if (!ctx.profile.warp_size_potentially_larger_than_guest) {
+ return ctx.OpCompositeExtract(ctx.U32[1], ballot, 0U);
+ }
+ return WarpExtract(ctx, ballot);
+}
+
+Id EmitSubgroupEqMask(EmitContext& ctx) {
+ return LoadMask(ctx, ctx.subgroup_mask_eq);
+}
+
+Id EmitSubgroupLtMask(EmitContext& ctx) {
+ return LoadMask(ctx, ctx.subgroup_mask_lt);
+}
+
+Id EmitSubgroupLeMask(EmitContext& ctx) {
+ return LoadMask(ctx, ctx.subgroup_mask_le);
+}
+
+Id EmitSubgroupGtMask(EmitContext& ctx) {
+ return LoadMask(ctx, ctx.subgroup_mask_gt);
+}
+
+Id EmitSubgroupGeMask(EmitContext& ctx) {
+ return LoadMask(ctx, ctx.subgroup_mask_ge);
+}
+
+Id EmitShuffleIndex(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask) {
+ const Id not_seg_mask{ctx.OpNot(ctx.U32[1], segmentation_mask)};
+ const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id min_thread_id{ComputeMinThreadId(ctx, thread_id, segmentation_mask)};
+ const Id max_thread_id{ComputeMaxThreadId(ctx, min_thread_id, clamp, not_seg_mask)};
+
+ const Id lhs{ctx.OpBitwiseAnd(ctx.U32[1], index, not_seg_mask)};
+ const Id src_thread_id{ctx.OpBitwiseOr(ctx.U32[1], lhs, min_thread_id)};
+ const Id in_range{ctx.OpSLessThanEqual(ctx.U1, src_thread_id, max_thread_id)};
+
+ SetInBoundsFlag(inst, in_range);
+ return SelectValue(ctx, in_range, value, src_thread_id);
+}
+
+Id EmitShuffleUp(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask) {
+ const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
+ const Id src_thread_id{ctx.OpISub(ctx.U32[1], thread_id, index)};
+ const Id in_range{ctx.OpSGreaterThanEqual(ctx.U1, src_thread_id, max_thread_id)};
+
+ SetInBoundsFlag(inst, in_range);
+ return SelectValue(ctx, in_range, value, src_thread_id);
+}
+
+Id EmitShuffleDown(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask) {
+ const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
+ const Id src_thread_id{ctx.OpIAdd(ctx.U32[1], thread_id, index)};
+ const Id in_range{ctx.OpSLessThanEqual(ctx.U1, src_thread_id, max_thread_id)};
+
+ SetInBoundsFlag(inst, in_range);
+ return SelectValue(ctx, in_range, value, src_thread_id);
+}
+
+Id EmitShuffleButterfly(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id clamp,
+ Id segmentation_mask) {
+ const Id thread_id{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ const Id max_thread_id{GetMaxThreadId(ctx, thread_id, clamp, segmentation_mask)};
+ const Id src_thread_id{ctx.OpBitwiseXor(ctx.U32[1], thread_id, index)};
+ const Id in_range{ctx.OpSLessThanEqual(ctx.U1, src_thread_id, max_thread_id)};
+
+ SetInBoundsFlag(inst, in_range);
+ return SelectValue(ctx, in_range, value, src_thread_id);
+}
+
+Id EmitFSwizzleAdd(EmitContext& ctx, Id op_a, Id op_b, Id swizzle) {
+ const Id three{ctx.Const(3U)};
+ Id mask{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
+ mask = ctx.OpBitwiseAnd(ctx.U32[1], mask, three);
+ mask = ctx.OpShiftLeftLogical(ctx.U32[1], mask, ctx.Const(1U));
+ mask = ctx.OpShiftRightLogical(ctx.U32[1], swizzle, mask);
+ mask = ctx.OpBitwiseAnd(ctx.U32[1], mask, three);
+
+ const Id modifier_a{ctx.OpVectorExtractDynamic(ctx.F32[1], ctx.fswzadd_lut_a, mask)};
+ const Id modifier_b{ctx.OpVectorExtractDynamic(ctx.F32[1], ctx.fswzadd_lut_b, mask)};
+
+ const Id result_a{ctx.OpFMul(ctx.F32[1], op_a, modifier_a)};
+ const Id result_b{ctx.OpFMul(ctx.F32[1], op_b, modifier_b)};
+ return ctx.OpFAdd(ctx.F32[1], result_a, result_b);
+}
+
+Id EmitDPdxFine(EmitContext& ctx, Id op_a) {
+ return ctx.OpDPdxFine(ctx.F32[1], op_a);
+}
+
+Id EmitDPdyFine(EmitContext& ctx, Id op_a) {
+ return ctx.OpDPdyFine(ctx.F32[1], op_a);
+}
+
+Id EmitDPdxCoarse(EmitContext& ctx, Id op_a) {
+ return ctx.OpDPdxCoarse(ctx.F32[1], op_a);
+}
+
+Id EmitDPdyCoarse(EmitContext& ctx, Id op_a) {
+ return ctx.OpDPdyCoarse(ctx.F32[1], op_a);
+}
+
+} // namespace Shader::Backend::SPIRV