summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler/backend
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm.cpp2
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp6
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_image.cpp4
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp2
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp8
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.cpp8
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp6
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp4
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_special.cpp4
9 files changed, 23 insertions, 21 deletions
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
index fc01797b6..832b4fd40 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
@@ -253,7 +253,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
}
}
if (!ctx.reg_alloc.IsEmpty()) {
- // LOG_WARNING ...;
+ LOG_WARNING(Shader_GLASM, "Register leak after generating code");
}
}
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
index c1df7a342..20b925877 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
@@ -145,14 +145,16 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value,
if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) {
ctx.Add("MOV.F result.layer.x,{};", value);
} else {
- // LOG_WARNING
+ LOG_WARNING(Shader_GLASM,
+ "Layer stored outside of geometry shader not supported by device");
}
break;
case IR::Attribute::ViewportIndex:
if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) {
ctx.Add("MOV.F result.viewport.x,{};", value);
} else {
- // LOG_WARNING
+ LOG_WARNING(Shader_GLASM,
+ "Viewport stored outside of geometry shader not supported by device");
}
break;
case IR::Attribute::PointSize:
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
index 81d5fe72c..09e3a9b82 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp
@@ -139,12 +139,12 @@ void SwizzleOffsets(EmitContext& ctx, Register off_x, Register off_y, const IR::
std::string GradOffset(const IR::Value& offset) {
if (offset.IsImmediate()) {
- // LOG_WARNING immediate
+ LOG_WARNING(Shader_GLASM, "Gradient offset is a scalar immediate");
return "";
}
IR::Inst* const vector{offset.InstRecursive()};
if (!vector->AreAllArgsImmediates()) {
- // LOG_WARNING elements not immediate
+ LOG_WARNING(Shader_GLASM, "Gradient offset vector is not immediate");
return "";
}
switch (vector->NumArgs()) {
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
index 60735fe31..a487a0744 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp
@@ -115,7 +115,7 @@ void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream) {
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
if (!stream.IsImmediate()) {
- // LOG_WARNING not immediate
+ LOG_WARNING(Shader_GLASM, "Stream is not immediate");
}
ctx.reg_alloc.Consume(stream);
ctx.Add("ENDPRIM;");
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp
index 8cec5ee7e..544d475b4 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp
@@ -115,7 +115,7 @@ void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDX.FINE {}.x,{};", inst, p);
} else {
- // LOG_WARNING
+ LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device");
ctx.Add("DDX {}.x,{};", inst, p);
}
}
@@ -124,7 +124,7 @@ void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDY.FINE {}.x,{};", inst, p);
} else {
- // LOG_WARNING
+ LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device");
ctx.Add("DDY {}.x,{};", inst, p);
}
}
@@ -133,7 +133,7 @@ void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDX.COARSE {}.x,{};", inst, p);
} else {
- // LOG_WARNING
+ LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device");
ctx.Add("DDX {}.x,{};", inst, p);
}
}
@@ -142,7 +142,7 @@ void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDY.COARSE {}.x,{};", inst, p);
} else {
- // LOG_WARNING
+ LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device");
ctx.Add("DDY {}.x,{};", inst, p);
}
}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index cba420cda..14a99750d 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -294,7 +294,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
Id main_func) {
const Info& info{program.info};
if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) {
- // LOG_ERROR(HW_GPU, "Fp32 denorm flush and preserve on the same shader");
+ LOG_ERROR(Shader_SPIRV, "Fp32 denorm flush and preserve on the same shader");
} else if (info.uses_fp32_denorms_flush) {
if (profile.support_fp32_denorm_flush) {
ctx.AddCapability(spv::Capability::DenormFlushToZero);
@@ -307,7 +307,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
ctx.AddCapability(spv::Capability::DenormPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U);
} else {
- // LOG_WARNING(HW_GPU, "Fp32 denorm preserve used in shader without host support");
+ LOG_WARNING(Shader_SPIRV, "Fp32 denorm preserve used in shader without host support");
}
}
if (!profile.support_separate_denorm_behavior) {
@@ -315,7 +315,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
return;
}
if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) {
- // LOG_ERROR(HW_GPU, "Fp16 denorm flush and preserve on the same shader");
+ LOG_ERROR(Shader_SPIRV, "Fp16 denorm flush and preserve on the same shader");
} else if (info.uses_fp16_denorms_flush) {
if (profile.support_fp16_denorm_flush) {
ctx.AddCapability(spv::Capability::DenormFlushToZero);
@@ -328,7 +328,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit
ctx.AddCapability(spv::Capability::DenormPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U);
} else {
- // LOG_WARNING(HW_GPU, "Fp16 denorm preserve used in shader without host support");
+ LOG_WARNING(Shader_SPIRV, "Fp16 denorm preserve used in shader without host support");
}
}
}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
index 053800eb7..9af8bb9e1 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -73,7 +73,7 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value&
const auto [scope, semantics]{AtomicArgs(ctx)};
return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value);
}
- // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic");
+ LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
binding, offset, sizeof(u32[2]))};
const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
@@ -140,7 +140,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
const auto [scope, semantics]{AtomicArgs(ctx)};
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
}
- // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic");
+ LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer_1{SharedPointer(ctx, offset, 0)};
const Id pointer_2{SharedPointer(ctx, offset, 1)};
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
@@ -266,7 +266,7 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const
const auto [scope, semantics]{AtomicArgs(ctx)};
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
}
- // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic");
+ LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
binding, offset, sizeof(u32[2]))};
const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index cf842e1e0..647804814 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -39,7 +39,7 @@ public:
}
const std::array values{offset.InstRecursive(), offset2.InstRecursive()};
if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) {
- // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING");
+ LOG_WARNING(Shader_SPIRV, "Not all arguments in PTP are immediate, ignoring");
return;
}
const IR::Opcode opcode{values[0]->GetOpcode()};
@@ -442,7 +442,7 @@ Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I
Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) {
- // LOG_WARNING(..., "Typeless image read not supported by host");
+ LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host");
return ctx.ConstantNull(ctx.U32[4]);
}
return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4],
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
index 072a3b1bd..9e7eb3cb1 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
@@ -131,7 +131,7 @@ void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
if (stream.IsImmediate()) {
ctx.OpEmitStreamVertex(ctx.Def(stream));
} else {
- // LOG_WARNING(..., "EmitVertex's stream is not constant");
+ LOG_WARNING(Shader_SPIRV, "Stream is not immediate");
ctx.OpEmitStreamVertex(ctx.u32_zero_value);
}
// Restore fixed pipeline point size after emitting the vertex
@@ -142,7 +142,7 @@ void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
if (stream.IsImmediate()) {
ctx.OpEndStreamPrimitive(ctx.Def(stream));
} else {
- // LOG_WARNING(..., "EndPrimitive's stream is not constant");
+ LOG_WARNING(Shader_SPIRV, "Stream is not immediate");
ctx.OpEndStreamPrimitive(ctx.u32_zero_value);
}
}