summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler/backend/spirv
diff options
context:
space:
mode:
Diffstat (limited to 'src/shader_recompiler/backend/spirv')
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.h2
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp119
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp59
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h30
-rw-r--r--src/shader_recompiler/backend/spirv/spirv_emit_context.cpp59
-rw-r--r--src/shader_recompiler/backend/spirv/spirv_emit_context.h8
6 files changed, 247 insertions, 30 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h
index b412957c7..2b360e073 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.h
@@ -22,7 +22,7 @@ constexpr u32 NUM_TEXTURE_AND_IMAGE_SCALING_WORDS =
struct RescalingLayout {
alignas(16) std::array<u32, NUM_TEXTURE_SCALING_WORDS> rescaling_textures;
alignas(16) std::array<u32, NUM_IMAGE_SCALING_WORDS> rescaling_images;
- alignas(16) u32 down_factor;
+ u32 down_factor;
};
constexpr u32 RESCALING_LAYOUT_WORDS_OFFSET = offsetof(RescalingLayout, rescaling_textures);
constexpr u32 RESCALING_LAYOUT_DOWN_FACTOR_OFFSET = offsetof(RescalingLayout, down_factor);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
index 46ba52a25..d3cbb14a9 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -82,6 +82,17 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value&
ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result));
return original_value;
}
+
+Id StorageAtomicU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
+ Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
+ LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
+ binding, offset, sizeof(u32[2]))};
+ const Id original_value{ctx.OpLoad(ctx.U32[2], pointer)};
+ const Id result{(ctx.*non_atomic_func)(ctx.U32[2], value, original_value)};
+ ctx.OpStore(pointer, result);
+ return original_value;
+}
} // Anonymous namespace
Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
@@ -141,7 +152,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
const auto [scope, semantics]{AtomicArgs(ctx)};
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
}
- LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer_1{SharedPointer(ctx, offset, 0)};
const Id pointer_2{SharedPointer(ctx, offset, 1)};
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
@@ -152,6 +163,18 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
}
+Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id offset, Id value) {
+ LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ const Id pointer_1{SharedPointer(ctx, offset, 0)};
+ const Id pointer_2{SharedPointer(ctx, offset, 1)};
+ const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
+ const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
+ const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
+ ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
+ ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
+ return ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2);
+}
+
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
@@ -275,6 +298,56 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const
return original;
}
+Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpIAdd);
+}
+
+Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMin);
+}
+
+Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMin);
+}
+
+Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMax);
+}
+
+Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMax);
+}
+
+Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseAnd);
+}
+
+Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseOr);
+}
+
+Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value) {
+ return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseXor);
+}
+
+Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
+ const IR::Value& offset, Id value) {
+ LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
+ const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
+ binding, offset, sizeof(u32[2]))};
+ const Id original{ctx.OpLoad(ctx.U32[2], pointer)};
+ ctx.OpStore(pointer, value);
+ return original;
+}
+
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
@@ -418,6 +491,50 @@ Id EmitGlobalAtomicExchange64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
+Id EmitGlobalAtomicIAdd32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicSMin32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicUMin32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicSMax32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicUMax32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicInc32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicDec32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicAnd32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicOr32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicXor32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
+Id EmitGlobalAtomicExchange32x2(EmitContext&) {
+ throw NotImplementedException("SPIR-V Instruction");
+}
+
Id EmitGlobalAtomicAddF32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 8ea730c80..80b4bbd27 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -123,34 +123,36 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
}
Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr, u32 element_size,
- const IR::Value& binding, const IR::Value& offset) {
+ const IR::Value& binding, const IR::Value& offset, const Id indirect_func) {
+ Id buffer_offset;
+ const Id uniform_type{ctx.uniform_types.*member_ptr};
+ if (offset.IsImmediate()) {
+ // Hardware been proved to read the aligned offset (e.g. LDC.U32 at 6 will read offset 4)
+ const Id imm_offset{ctx.Const(offset.U32() / element_size)};
+ buffer_offset = imm_offset;
+ } else if (element_size > 1) {
+ const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
+ const Id shift{ctx.Const(log2_element_size)};
+ buffer_offset = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
+ } else {
+ buffer_offset = ctx.Def(offset);
+ }
if (!binding.IsImmediate()) {
- throw NotImplementedException("Constant buffer indexing");
+ return ctx.OpFunctionCall(result_type, indirect_func, ctx.Def(binding), buffer_offset);
}
const Id cbuf{ctx.cbufs[binding.U32()].*member_ptr};
- const Id uniform_type{ctx.uniform_types.*member_ptr};
- if (!offset.IsImmediate()) {
- Id index{ctx.Def(offset)};
- if (element_size > 1) {
- const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
- const Id shift{ctx.Const(log2_element_size)};
- index = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
- }
- const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, index)};
- return ctx.OpLoad(result_type, access_chain);
- }
- // Hardware been proved to read the aligned offset (e.g. LDC.U32 at 6 will read offset 4)
- const Id imm_offset{ctx.Const(offset.U32() / element_size)};
- const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, imm_offset)};
+ const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, buffer_offset)};
return ctx.OpLoad(result_type, access_chain);
}
Id GetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
- return GetCbuf(ctx, ctx.U32[1], &UniformDefinitions::U32, sizeof(u32), binding, offset);
+ return GetCbuf(ctx, ctx.U32[1], &UniformDefinitions::U32, sizeof(u32), binding, offset,
+ ctx.load_const_func_u32);
}
Id GetCbufU32x4(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
- return GetCbuf(ctx, ctx.U32[4], &UniformDefinitions::U32x4, sizeof(u32[4]), binding, offset);
+ return GetCbuf(ctx, ctx.U32[4], &UniformDefinitions::U32x4, sizeof(u32[4]), binding, offset,
+ ctx.load_const_func_u32x4);
}
Id GetCbufElement(EmitContext& ctx, Id vector, const IR::Value& offset, u32 index_offset) {
@@ -201,7 +203,8 @@ void EmitGetIndirectBranchVariable(EmitContext&) {
Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
- const Id load{GetCbuf(ctx, ctx.U8, &UniformDefinitions::U8, sizeof(u8), binding, offset)};
+ const Id load{GetCbuf(ctx, ctx.U8, &UniformDefinitions::U8, sizeof(u8), binding, offset,
+ ctx.load_const_func_u8)};
return ctx.OpUConvert(ctx.U32[1], load);
}
Id element{};
@@ -217,7 +220,8 @@ Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& of
Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
- const Id load{GetCbuf(ctx, ctx.S8, &UniformDefinitions::S8, sizeof(s8), binding, offset)};
+ const Id load{GetCbuf(ctx, ctx.S8, &UniformDefinitions::S8, sizeof(s8), binding, offset,
+ ctx.load_const_func_u8)};
return ctx.OpSConvert(ctx.U32[1], load);
}
Id element{};
@@ -233,8 +237,8 @@ Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& of
Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
- const Id load{
- GetCbuf(ctx, ctx.U16, &UniformDefinitions::U16, sizeof(u16), binding, offset)};
+ const Id load{GetCbuf(ctx, ctx.U16, &UniformDefinitions::U16, sizeof(u16), binding, offset,
+ ctx.load_const_func_u16)};
return ctx.OpUConvert(ctx.U32[1], load);
}
Id element{};
@@ -250,8 +254,8 @@ Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
Id EmitGetCbufS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
- const Id load{
- GetCbuf(ctx, ctx.S16, &UniformDefinitions::S16, sizeof(s16), binding, offset)};
+ const Id load{GetCbuf(ctx, ctx.S16, &UniformDefinitions::S16, sizeof(s16), binding, offset,
+ ctx.load_const_func_u16)};
return ctx.OpSConvert(ctx.U32[1], load);
}
Id element{};
@@ -276,7 +280,8 @@ Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing) {
- return GetCbuf(ctx, ctx.F32[1], &UniformDefinitions::F32, sizeof(f32), binding, offset);
+ return GetCbuf(ctx, ctx.F32[1], &UniformDefinitions::F32, sizeof(f32), binding, offset,
+ ctx.load_const_func_f32);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
return ctx.OpBitcast(ctx.F32[1], GetCbufElement(ctx, vector, offset, 0u));
@@ -285,8 +290,8 @@ Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing) {
- return GetCbuf(ctx, ctx.U32[2], &UniformDefinitions::U32x2, sizeof(u32[2]), binding,
- offset);
+ return GetCbuf(ctx, ctx.U32[2], &UniformDefinitions::U32x2, sizeof(u32[2]), binding, offset,
+ ctx.load_const_func_u32x2);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
return ctx.OpCompositeConstruct(ctx.U32[2], GetCbufElement(ctx, vector, offset, 0u),
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index 887112deb..f263b41b0 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -335,6 +335,7 @@ Id EmitSharedAtomicOr32(EmitContext& ctx, Id pointer_offset, Id value);
Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value);
Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value);
Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value);
+Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id pointer_offset, Id value);
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value);
Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
@@ -375,6 +376,24 @@ Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::
Id value);
Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value);
+Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+ Id value);
+Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
+ const IR::Value& offset, Id value);
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value);
Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
@@ -411,6 +430,17 @@ Id EmitGlobalAtomicAnd64(EmitContext& ctx);
Id EmitGlobalAtomicOr64(EmitContext& ctx);
Id EmitGlobalAtomicXor64(EmitContext& ctx);
Id EmitGlobalAtomicExchange64(EmitContext& ctx);
+Id EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
+Id EmitGlobalAtomicSMin32x2(EmitContext& ctx);
+Id EmitGlobalAtomicUMin32x2(EmitContext& ctx);
+Id EmitGlobalAtomicSMax32x2(EmitContext& ctx);
+Id EmitGlobalAtomicUMax32x2(EmitContext& ctx);
+Id EmitGlobalAtomicInc32x2(EmitContext& ctx);
+Id EmitGlobalAtomicDec32x2(EmitContext& ctx);
+Id EmitGlobalAtomicAnd32x2(EmitContext& ctx);
+Id EmitGlobalAtomicOr32x2(EmitContext& ctx);
+Id EmitGlobalAtomicXor32x2(EmitContext& ctx);
+Id EmitGlobalAtomicExchange32x2(EmitContext& ctx);
Id EmitGlobalAtomicAddF32(EmitContext& ctx);
Id EmitGlobalAtomicAddF16x2(EmitContext& ctx);
Id EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
index cd90c084a..aa5b6c9b7 100644
--- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
@@ -464,6 +464,7 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf
DefineSharedMemory(program);
DefineSharedMemoryFunctions(program);
DefineConstantBuffers(program.info, uniform_binding);
+ DefineConstantBufferIndirectFunctions(program.info);
DefineStorageBuffers(program.info, storage_binding);
DefineTextureBuffers(program.info, texture_binding);
DefineImageBuffers(program.info, image_binding);
@@ -993,7 +994,7 @@ void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
}
return;
}
- IR::Type types{info.used_constant_buffer_types};
+ IR::Type types{info.used_constant_buffer_types | info.used_indirect_cbuf_types};
if (True(types & IR::Type::U8)) {
if (profile.support_int8) {
DefineConstBuffers(*this, info, &UniformDefinitions::U8, binding, U8, 'u', sizeof(u8));
@@ -1027,6 +1028,62 @@ void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
binding += static_cast<u32>(info.constant_buffer_descriptors.size());
}
+void EmitContext::DefineConstantBufferIndirectFunctions(const Info& info) {
+ if (!info.uses_cbuf_indirect) {
+ return;
+ }
+ const auto make_accessor{[&](Id buffer_type, Id UniformDefinitions::*member_ptr) {
+ const Id func_type{TypeFunction(buffer_type, U32[1], U32[1])};
+ const Id func{OpFunction(buffer_type, spv::FunctionControlMask::MaskNone, func_type)};
+ const Id binding{OpFunctionParameter(U32[1])};
+ const Id offset{OpFunctionParameter(U32[1])};
+
+ AddLabel();
+
+ const Id merge_label{OpLabel()};
+ const Id uniform_type{uniform_types.*member_ptr};
+
+ std::array<Id, Info::MAX_CBUFS> buf_labels;
+ std::array<Sirit::Literal, Info::MAX_CBUFS> buf_literals;
+ for (u32 i = 0; i < Info::MAX_CBUFS; i++) {
+ buf_labels[i] = OpLabel();
+ buf_literals[i] = Sirit::Literal{i};
+ }
+ OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone);
+ OpSwitch(binding, buf_labels[0], buf_literals, buf_labels);
+ for (u32 i = 0; i < Info::MAX_CBUFS; i++) {
+ AddLabel(buf_labels[i]);
+ const Id cbuf{cbufs[i].*member_ptr};
+ const Id access_chain{OpAccessChain(uniform_type, cbuf, u32_zero_value, offset)};
+ const Id result{OpLoad(buffer_type, access_chain)};
+ OpReturnValue(result);
+ }
+ AddLabel(merge_label);
+ OpUnreachable();
+ OpFunctionEnd();
+ return func;
+ }};
+ IR::Type types{info.used_indirect_cbuf_types};
+ if (True(types & IR::Type::U8)) {
+ load_const_func_u8 = make_accessor(U8, &UniformDefinitions::U8);
+ }
+ if (True(types & IR::Type::U16)) {
+ load_const_func_u16 = make_accessor(U16, &UniformDefinitions::U16);
+ }
+ if (True(types & IR::Type::F32)) {
+ load_const_func_f32 = make_accessor(F32[1], &UniformDefinitions::F32);
+ }
+ if (True(types & IR::Type::U32)) {
+ load_const_func_u32 = make_accessor(U32[1], &UniformDefinitions::U32);
+ }
+ if (True(types & IR::Type::U32x2)) {
+ load_const_func_u32x2 = make_accessor(U32[2], &UniformDefinitions::U32x2);
+ }
+ if (True(types & IR::Type::U32x4)) {
+ load_const_func_u32x4 = make_accessor(U32[4], &UniformDefinitions::U32x4);
+ }
+}
+
void EmitContext::DefineStorageBuffers(const Info& info, u32& binding) {
if (info.storage_buffers_descriptors.empty()) {
return;
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h
index f87138f7e..906a1dc2c 100644
--- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h
+++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h
@@ -294,6 +294,13 @@ public:
std::vector<Id> interfaces;
+ Id load_const_func_u8{};
+ Id load_const_func_u16{};
+ Id load_const_func_u32{};
+ Id load_const_func_f32{};
+ Id load_const_func_u32x2{};
+ Id load_const_func_u32x4{};
+
private:
void DefineCommonTypes(const Info& info);
void DefineCommonConstants();
@@ -302,6 +309,7 @@ private:
void DefineSharedMemory(const IR::Program& program);
void DefineSharedMemoryFunctions(const IR::Program& program);
void DefineConstantBuffers(const Info& info, u32& binding);
+ void DefineConstantBufferIndirectFunctions(const Info& info);
void DefineStorageBuffers(const Info& info, u32& binding);
void DefineTextureBuffers(const Info& info, u32& binding);
void DefineImageBuffers(const Info& info, u32& binding);