shader: Add logging
This commit is contained in:
		| @@ -144,6 +144,10 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) { | ||||
|     SUB(Render, Software)                                                                          \ | ||||
|     SUB(Render, OpenGL)                                                                            \ | ||||
|     SUB(Render, Vulkan)                                                                            \ | ||||
|     CLS(Shader)                                                                                    \ | ||||
|     SUB(Shader, SPIRV)                                                                             \ | ||||
|     SUB(Shader, GLASM)                                                                             \ | ||||
|     SUB(Shader, GLSL)                                                                              \ | ||||
|     CLS(Audio)                                                                                     \ | ||||
|     SUB(Audio, DSP)                                                                                \ | ||||
|     SUB(Audio, Sink)                                                                               \ | ||||
|   | ||||
| @@ -114,6 +114,10 @@ enum class Class : u8 { | ||||
|     Render_Software,   ///< Software renderer backend | ||||
|     Render_OpenGL,     ///< OpenGL backend | ||||
|     Render_Vulkan,     ///< Vulkan backend | ||||
|     Shader,            ///< Shader recompiler | ||||
|     Shader_SPIRV,      ///< Shader SPIR-V code generation | ||||
|     Shader_GLASM,      ///< Shader GLASM code generation | ||||
|     Shader_GLSL,       ///< Shader GLSL code generation | ||||
|     Audio,             ///< Audio emulation | ||||
|     Audio_DSP,         ///< The HLE implementation of the DSP | ||||
|     Audio_Sink,        ///< Emulator audio output backend | ||||
|   | ||||
| @@ -253,7 +253,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) { | ||||
|         } | ||||
|     } | ||||
|     if (!ctx.reg_alloc.IsEmpty()) { | ||||
|         // LOG_WARNING ...; | ||||
|         LOG_WARNING(Shader_GLASM, "Register leak after generating code"); | ||||
|     } | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -145,14 +145,16 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value, | ||||
|         if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) { | ||||
|             ctx.Add("MOV.F result.layer.x,{};", value); | ||||
|         } else { | ||||
|             // LOG_WARNING | ||||
|             LOG_WARNING(Shader_GLASM, | ||||
|                         "Layer stored outside of geometry shader not supported by device"); | ||||
|         } | ||||
|         break; | ||||
|     case IR::Attribute::ViewportIndex: | ||||
|         if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) { | ||||
|             ctx.Add("MOV.F result.viewport.x,{};", value); | ||||
|         } else { | ||||
|             // LOG_WARNING | ||||
|             LOG_WARNING(Shader_GLASM, | ||||
|                         "Viewport stored outside of geometry shader not supported by device"); | ||||
|         } | ||||
|         break; | ||||
|     case IR::Attribute::PointSize: | ||||
|   | ||||
| @@ -139,12 +139,12 @@ void SwizzleOffsets(EmitContext& ctx, Register off_x, Register off_y, const IR:: | ||||
|  | ||||
| std::string GradOffset(const IR::Value& offset) { | ||||
|     if (offset.IsImmediate()) { | ||||
|         // LOG_WARNING immediate | ||||
|         LOG_WARNING(Shader_GLASM, "Gradient offset is a scalar immediate"); | ||||
|         return ""; | ||||
|     } | ||||
|     IR::Inst* const vector{offset.InstRecursive()}; | ||||
|     if (!vector->AreAllArgsImmediates()) { | ||||
|         // LOG_WARNING elements not immediate | ||||
|         LOG_WARNING(Shader_GLASM, "Gradient offset vector is not immediate"); | ||||
|         return ""; | ||||
|     } | ||||
|     switch (vector->NumArgs()) { | ||||
|   | ||||
| @@ -115,7 +115,7 @@ void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream) { | ||||
|  | ||||
| void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) { | ||||
|     if (!stream.IsImmediate()) { | ||||
|         // LOG_WARNING not immediate | ||||
|         LOG_WARNING(Shader_GLASM, "Stream is not immediate"); | ||||
|     } | ||||
|     ctx.reg_alloc.Consume(stream); | ||||
|     ctx.Add("ENDPRIM;"); | ||||
|   | ||||
| @@ -115,7 +115,7 @@ void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { | ||||
|     if (ctx.profile.support_derivative_control) { | ||||
|         ctx.Add("DDX.FINE {}.x,{};", inst, p); | ||||
|     } else { | ||||
|         // LOG_WARNING | ||||
|         LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device"); | ||||
|         ctx.Add("DDX {}.x,{};", inst, p); | ||||
|     } | ||||
| } | ||||
| @@ -124,7 +124,7 @@ void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { | ||||
|     if (ctx.profile.support_derivative_control) { | ||||
|         ctx.Add("DDY.FINE {}.x,{};", inst, p); | ||||
|     } else { | ||||
|         // LOG_WARNING | ||||
|         LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device"); | ||||
|         ctx.Add("DDY {}.x,{};", inst, p); | ||||
|     } | ||||
| } | ||||
| @@ -133,7 +133,7 @@ void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { | ||||
|     if (ctx.profile.support_derivative_control) { | ||||
|         ctx.Add("DDX.COARSE {}.x,{};", inst, p); | ||||
|     } else { | ||||
|         // LOG_WARNING | ||||
|         LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device"); | ||||
|         ctx.Add("DDX {}.x,{};", inst, p); | ||||
|     } | ||||
| } | ||||
| @@ -142,7 +142,7 @@ void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { | ||||
|     if (ctx.profile.support_derivative_control) { | ||||
|         ctx.Add("DDY.COARSE {}.x,{};", inst, p); | ||||
|     } else { | ||||
|         // LOG_WARNING | ||||
|         LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device"); | ||||
|         ctx.Add("DDY {}.x,{};", inst, p); | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -294,7 +294,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit | ||||
|                         Id main_func) { | ||||
|     const Info& info{program.info}; | ||||
|     if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) { | ||||
|         // LOG_ERROR(HW_GPU, "Fp32 denorm flush and preserve on the same shader"); | ||||
|         LOG_ERROR(Shader_SPIRV, "Fp32 denorm flush and preserve on the same shader"); | ||||
|     } else if (info.uses_fp32_denorms_flush) { | ||||
|         if (profile.support_fp32_denorm_flush) { | ||||
|             ctx.AddCapability(spv::Capability::DenormFlushToZero); | ||||
| @@ -307,7 +307,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit | ||||
|             ctx.AddCapability(spv::Capability::DenormPreserve); | ||||
|             ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U); | ||||
|         } else { | ||||
|             // LOG_WARNING(HW_GPU, "Fp32 denorm preserve used in shader without host support"); | ||||
|             LOG_WARNING(Shader_SPIRV, "Fp32 denorm preserve used in shader without host support"); | ||||
|         } | ||||
|     } | ||||
|     if (!profile.support_separate_denorm_behavior) { | ||||
| @@ -315,7 +315,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit | ||||
|         return; | ||||
|     } | ||||
|     if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) { | ||||
|         // LOG_ERROR(HW_GPU, "Fp16 denorm flush and preserve on the same shader"); | ||||
|         LOG_ERROR(Shader_SPIRV, "Fp16 denorm flush and preserve on the same shader"); | ||||
|     } else if (info.uses_fp16_denorms_flush) { | ||||
|         if (profile.support_fp16_denorm_flush) { | ||||
|             ctx.AddCapability(spv::Capability::DenormFlushToZero); | ||||
| @@ -328,7 +328,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit | ||||
|             ctx.AddCapability(spv::Capability::DenormPreserve); | ||||
|             ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U); | ||||
|         } else { | ||||
|             // LOG_WARNING(HW_GPU, "Fp16 denorm preserve used in shader without host support"); | ||||
|             LOG_WARNING(Shader_SPIRV, "Fp16 denorm preserve used in shader without host support"); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -73,7 +73,7 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& | ||||
|         const auto [scope, semantics]{AtomicArgs(ctx)}; | ||||
|         return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value); | ||||
|     } | ||||
|     // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2, | ||||
|                                     binding, offset, sizeof(u32[2]))}; | ||||
|     const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))}; | ||||
| @@ -140,7 +140,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) { | ||||
|         const auto [scope, semantics]{AtomicArgs(ctx)}; | ||||
|         return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); | ||||
|     } | ||||
|     // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     const Id pointer_1{SharedPointer(ctx, offset, 0)}; | ||||
|     const Id pointer_2{SharedPointer(ctx, offset, 1)}; | ||||
|     const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)}; | ||||
| @@ -266,7 +266,7 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const | ||||
|         const auto [scope, semantics]{AtomicArgs(ctx)}; | ||||
|         return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); | ||||
|     } | ||||
|     // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2, | ||||
|                                     binding, offset, sizeof(u32[2]))}; | ||||
|     const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))}; | ||||
|   | ||||
| @@ -39,7 +39,7 @@ public: | ||||
|         } | ||||
|         const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; | ||||
|         if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { | ||||
|             // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); | ||||
|             LOG_WARNING(Shader_SPIRV, "Not all arguments in PTP are immediate, ignoring"); | ||||
|             return; | ||||
|         } | ||||
|         const IR::Opcode opcode{values[0]->GetOpcode()}; | ||||
| @@ -442,7 +442,7 @@ Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I | ||||
| Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) { | ||||
|     const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||||
|     if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) { | ||||
|         // LOG_WARNING(..., "Typeless image read not supported by host"); | ||||
|         LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host"); | ||||
|         return ctx.ConstantNull(ctx.U32[4]); | ||||
|     } | ||||
|     return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4], | ||||
|   | ||||
| @@ -131,7 +131,7 @@ void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) { | ||||
|     if (stream.IsImmediate()) { | ||||
|         ctx.OpEmitStreamVertex(ctx.Def(stream)); | ||||
|     } else { | ||||
|         // LOG_WARNING(..., "EmitVertex's stream is not constant"); | ||||
|         LOG_WARNING(Shader_SPIRV, "Stream is not immediate"); | ||||
|         ctx.OpEmitStreamVertex(ctx.u32_zero_value); | ||||
|     } | ||||
|     // Restore fixed pipeline point size after emitting the vertex | ||||
| @@ -142,7 +142,7 @@ void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) { | ||||
|     if (stream.IsImmediate()) { | ||||
|         ctx.OpEndStreamPrimitive(ctx.Def(stream)); | ||||
|     } else { | ||||
|         // LOG_WARNING(..., "EndPrimitive's stream is not constant"); | ||||
|         LOG_WARNING(Shader_SPIRV, "Stream is not immediate"); | ||||
|         ctx.OpEndStreamPrimitive(ctx.u32_zero_value); | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -270,7 +270,7 @@ static U1 GetFlowTest(IREmitter& ir, FlowTest flow_test) { | ||||
|     case FlowTest::RGT: | ||||
|         return ir.LogicalAnd(ir.LogicalNot(ir.GetSFlag()), ir.LogicalNot(ir.GetZFlag())); | ||||
|     case FlowTest::FCSM_TR: | ||||
|         // LOG_WARNING(ShaderDecompiler, "FCSM_TR CC State (Stubbed)"); | ||||
|         LOG_WARNING(Shader, "(STUBBED) FCSM_TR"); | ||||
|         return ir.Imm1(false); | ||||
|     case FlowTest::CSM_TA: | ||||
|     case FlowTest::CSM_TR: | ||||
|   | ||||
| @@ -46,7 +46,7 @@ void TranslatorVisitor::ISBERD(u64 insn) { | ||||
|     if (isberd.shift != Shift::Default) { | ||||
|         throw NotImplementedException("Shift {}", isberd.shift.Value()); | ||||
|     } | ||||
|     // LOG_WARNING(..., "ISBERD is stubbed"); | ||||
|     LOG_WARNING(Shader, "(STUBBED) called"); | ||||
|     X(isberd.dest_reg, X(isberd.src_reg)); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -118,7 +118,7 @@ enum class SpecialRegister : u64 { | ||||
|     case SpecialRegister::SR_THREAD_KILL: | ||||
|         return IR::U32{ir.Select(ir.IsHelperInvocation(), ir.Imm32(-1), ir.Imm32(0))}; | ||||
|     case SpecialRegister::SR_INVOCATION_INFO: | ||||
|         // LOG_WARNING(..., "SR_INVOCATION_INFO is stubbed"); | ||||
|         LOG_WARNING(Shader, "(STUBBED) SR_INVOCATION_INFO"); | ||||
|         return ir.Imm32(0x00ff'0000); | ||||
|     case SpecialRegister::SR_TID: { | ||||
|         const IR::Value tid{ir.LocalInvocationId()}; | ||||
| @@ -140,10 +140,10 @@ enum class SpecialRegister : u64 { | ||||
|     case SpecialRegister::SR_CTAID_Z: | ||||
|         return ir.WorkgroupIdZ(); | ||||
|     case SpecialRegister::SR_WSCALEFACTOR_XY: | ||||
|         // LOG_WARNING(..., "SR_WSCALEFACTOR_XY is stubbed"); | ||||
|         LOG_WARNING(Shader, "(STUBBED) SR_WSCALEFACTOR_XY"); | ||||
|         return ir.Imm32(Common::BitCast<u32>(1.0f)); | ||||
|     case SpecialRegister::SR_WSCALEFACTOR_Z: | ||||
|         // LOG_WARNING(..., "SR_WSCALEFACTOR_Z is stubbed"); | ||||
|         LOG_WARNING(Shader, "(STUBBED) SR_WSCALEFACTOR_Z"); | ||||
|         return ir.Imm32(Common::BitCast<u32>(1.0f)); | ||||
|     case SpecialRegister::SR_LANEID: | ||||
|         return ir.LaneId(); | ||||
| @@ -160,7 +160,7 @@ enum class SpecialRegister : u64 { | ||||
|     case SpecialRegister::SR_Y_DIRECTION: | ||||
|         return ir.BitCast<IR::U32>(ir.YDirection()); | ||||
|     case SpecialRegister::SR_AFFINITY: | ||||
|         // LOG_WARNING(..., "SR_AFFINITY is stubbed"); | ||||
|         LOG_WARNING(Shader, "(STUBBED) SR_AFFINITY"); | ||||
|         return ir.Imm32(0); // This is the default value hardware returns. | ||||
|     default: | ||||
|         throw NotImplementedException("S2R special register {}", special_register); | ||||
|   | ||||
| @@ -48,7 +48,7 @@ void TranslatorVisitor::VOTE(u64 insn) { | ||||
| } | ||||
|  | ||||
| void TranslatorVisitor::VOTE_vtg(u64) { | ||||
|     // LOG_WARNING(ShaderDecompiler, "VOTE.VTG: Stubbed!"); | ||||
|     LOG_WARNING(Shader, "(STUBBED) called"); | ||||
| } | ||||
|  | ||||
| } // namespace Shader::Maxwell | ||||
|   | ||||
		Reference in New Issue
	
	Block a user