summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2023-04-15 01:56:34 +0200
committerGitHub <noreply@github.com>2023-04-15 01:56:34 +0200
commite0895a85810d76d810b40ade50dc514a459b685e (patch)
treecf3d44618ee0757f4994917066ba48c3b8151ac6 /src/shader_recompiler
parentMerge pull request #10055 from v1993/patch-1 (diff)
parentvideo_core: Enable ImageGather rounding fix on AMD open source drivers (diff)
downloadyuzu-e0895a85810d76d810b40ade50dc514a459b685e.tar
yuzu-e0895a85810d76d810b40ade50dc514a459b685e.tar.gz
yuzu-e0895a85810d76d810b40ade50dc514a459b685e.tar.bz2
yuzu-e0895a85810d76d810b40ade50dc514a459b685e.tar.lz
yuzu-e0895a85810d76d810b40ade50dc514a459b685e.tar.xz
yuzu-e0895a85810d76d810b40ade50dc514a459b685e.tar.zst
yuzu-e0895a85810d76d810b40ade50dc514a459b685e.zip
Diffstat (limited to '')
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_image.cpp29
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp30
-rw-r--r--src/shader_recompiler/profile.h4
3 files changed, 63 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
index f335c8af0..418505475 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp
@@ -143,6 +143,21 @@ IR::Inst* PrepareSparse(IR::Inst& inst) {
}
return sparse_inst;
}
+
+std::string ImageGatherSubpixelOffset(const IR::TextureInstInfo& info, std::string_view texture,
+ std::string_view coords) {
+ switch (info.type) {
+ case TextureType::Color2D:
+ case TextureType::Color2DRect:
+ return fmt::format("{}+vec2(0.001953125)/vec2(textureSize({}, 0))", coords, texture);
+ case TextureType::ColorArray2D:
+ case TextureType::ColorCube:
+ return fmt::format("vec3({0}.xy+vec2(0.001953125)/vec2(textureSize({1}, 0)),{0}.z)", coords,
+ texture);
+ default:
+ return std::string{coords};
+ }
+}
} // Anonymous namespace
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
@@ -340,6 +355,13 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
+ std::string coords_with_subpixel_offset;
+ if (ctx.profile.need_gather_subpixel_offset) {
+ // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+ // AMD hardware as on Maxwell or other Nvidia architectures.
+ coords_with_subpixel_offset = ImageGatherSubpixelOffset(info, texture, coords);
+ coords = coords_with_subpixel_offset;
+ }
if (!sparse_inst || !supports_sparse) {
if (offset.IsEmpty()) {
ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords,
@@ -387,6 +409,13 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
+ std::string coords_with_subpixel_offset;
+ if (ctx.profile.need_gather_subpixel_offset) {
+ // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+ // AMD hardware as on Maxwell or other Nvidia architectures.
+ coords_with_subpixel_offset = ImageGatherSubpixelOffset(info, texture, coords);
+ coords = coords_with_subpixel_offset;
+ }
if (!sparse_inst || !supports_sparse) {
if (offset.IsEmpty()) {
ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 02073c420..7d901c04b 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -261,6 +261,30 @@ Id BitTest(EmitContext& ctx, Id mask, Id bit) {
const Id bit_value{ctx.OpBitwiseAnd(ctx.U32[1], shifted, ctx.Const(1u))};
return ctx.OpINotEqual(ctx.U1, bit_value, ctx.u32_zero_value);
}
+
+Id ImageGatherSubpixelOffset(EmitContext& ctx, const IR::TextureInstInfo& info, Id texture,
+ Id coords) {
+ // Apply a subpixel offset of 1/512 the texel size of the texture to ensure same rounding on
+ // AMD hardware as on Maxwell or other Nvidia architectures.
+ const auto calculate_coords{[&](size_t dim) {
+ const Id nudge{ctx.Const(0x1p-9f)};
+ const Id image_size{ctx.OpImageQuerySizeLod(ctx.U32[dim], texture, ctx.u32_zero_value)};
+ Id offset{dim == 2 ? ctx.ConstantComposite(ctx.F32[dim], nudge, nudge)
+ : ctx.ConstantComposite(ctx.F32[dim], nudge, nudge, ctx.f32_zero_value)};
+ offset = ctx.OpFDiv(ctx.F32[dim], offset, ctx.OpConvertUToF(ctx.F32[dim], image_size));
+ return ctx.OpFAdd(ctx.F32[dim], coords, offset);
+ }};
+ switch (info.type) {
+ case TextureType::Color2D:
+ case TextureType::Color2DRect:
+ return calculate_coords(2);
+ case TextureType::ColorArray2D:
+ case TextureType::ColorCube:
+ return calculate_coords(3);
+ default:
+ return coords;
+ }
+}
} // Anonymous namespace
Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
@@ -423,6 +447,9 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
const IR::Value& offset, const IR::Value& offset2) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
const ImageOperands operands(ctx, offset, offset2);
+ if (ctx.profile.need_gather_subpixel_offset) {
+ coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
+ }
return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
operands.MaskOptional(), operands.Span());
@@ -432,6 +459,9 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
const IR::Value& offset, const IR::Value& offset2, Id dref) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
const ImageOperands operands(ctx, offset, offset2);
+ if (ctx.profile.need_gather_subpixel_offset) {
+ coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
+ }
return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(),
operands.Span());
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index 253e0d0bd..9f88fb440 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -52,6 +52,10 @@ struct Profile {
bool need_declared_frag_colors{};
/// Prevents fast math optimizations that may cause inaccuracies
bool need_fastmath_off{};
+ /// Some GPU vendors use a different rounding precision when calculating texture pixel
+ /// coordinates with the 16.8 format in the ImageGather instruction than the Maxwell
+ /// architecture. Applying an offset does fix this mismatching rounding behaviour.
+ bool need_gather_subpixel_offset{};
/// OpFClamp is broken and OpFMax + OpFMin should be used instead
bool has_broken_spirv_clamp{};