glsl: Cleanup/Address feedback
This commit is contained in:
		| @@ -156,8 +156,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) { | ||||
|             ctx.Add("for(;;){{"); | ||||
|             break; | ||||
|         case IR::AbstractSyntaxNode::Type::Repeat: | ||||
|             ctx.Add("if({}){{continue;}}else{{break;}}}}", | ||||
|                     ctx.var_alloc.Consume(node.data.repeat.cond)); | ||||
|             ctx.Add("if(!{}){{break;}}}}", ctx.var_alloc.Consume(node.data.repeat.cond)); | ||||
|             break; | ||||
|         default: | ||||
|             throw NotImplementedException("AbstractSyntaxNode Type {}", node.type); | ||||
| @@ -166,7 +165,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) { | ||||
| } | ||||
|  | ||||
| std::string GlslVersionSpecifier(const EmitContext& ctx) { | ||||
|     if (ctx.uses_y_direction || ctx.info.stores_legacy_varyings) { | ||||
|     if (ctx.uses_y_direction || ctx.info.stores_legacy_varyings || ctx.info.loads_legacy_varyings) { | ||||
|         return " compatibility"; | ||||
|     } | ||||
|     return ""; | ||||
| @@ -187,7 +186,8 @@ void DefineVariables(const EmitContext& ctx, std::string& header) { | ||||
|         const auto type{static_cast<GlslVarType>(i)}; | ||||
|         const auto& tracker{ctx.var_alloc.GetUseTracker(type)}; | ||||
|         const auto type_name{ctx.var_alloc.GetGlslType(type)}; | ||||
|         const auto precise{IsPreciseType(type) ? "precise " : ""}; | ||||
|         const bool has_precise_bug{ctx.stage == Stage::Fragment && ctx.profile.has_gl_precise_bug}; | ||||
|         const auto precise{!has_precise_bug && IsPreciseType(type) ? "precise " : ""}; | ||||
|         // Temps/return types that are never used are stored at index 0 | ||||
|         if (tracker.uses_temp) { | ||||
|             header += fmt::format("{}{} t{}={}(0);", precise, type_name, | ||||
|   | ||||
| @@ -98,7 +98,7 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi | ||||
|  | ||||
| void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, | ||||
|                                 std::string_view value) { | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset, | ||||
|                pointer_offset); | ||||
|     ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;", | ||||
| @@ -171,7 +171,7 @@ void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Val | ||||
|  | ||||
| void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                              const IR::Value& offset, std::string_view value) { | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||
| @@ -182,7 +182,7 @@ void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||
|  | ||||
| void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                              const IR::Value& offset, std::string_view value) { | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||
| @@ -195,7 +195,7 @@ void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||
|  | ||||
| void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                              const IR::Value& offset, std::string_view value) { | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||
| @@ -207,7 +207,7 @@ void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||
|  | ||||
| void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                              const IR::Value& offset, std::string_view value) { | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||
| @@ -220,7 +220,7 @@ void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& | ||||
|  | ||||
| void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                              const IR::Value& offset, std::string_view value) { | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 Atomics not supported, fallback to non-atomic"); | ||||
|     LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic"); | ||||
|     ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst, | ||||
|                ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, | ||||
|                binding.U32(), ctx.var_alloc.Consume(offset)); | ||||
|   | ||||
| @@ -17,8 +17,7 @@ void CompositeInsert(EmitContext& ctx, std::string_view result, std::string_view | ||||
|         // The result is aliased with the composite | ||||
|         ctx.Add("{}.{}={};", composite, SWIZZLE[index], object); | ||||
|     } else { | ||||
|         ctx.Add("{}={};", result, composite); | ||||
|         ctx.Add("{}.{}={};", result, SWIZZLE[index], object); | ||||
|         ctx.Add("{}={};{}.{}={};", result, composite, result, SWIZZLE[index], object); | ||||
|     } | ||||
| } | ||||
| } // Anonymous namespace | ||||
|   | ||||
| @@ -30,7 +30,7 @@ std::string InputVertexIndex(EmitContext& ctx, std::string_view vertex) { | ||||
|     return IsInputArray(ctx.stage) ? fmt::format("[{}]", vertex) : ""; | ||||
| } | ||||
|  | ||||
| std::string OutputVertexIndex(EmitContext& ctx) { | ||||
| std::string_view OutputVertexIndex(EmitContext& ctx) { | ||||
|     return ctx.stage == Stage::TessellationControl ? "[gl_InvocationID]" : ""; | ||||
| } | ||||
|  | ||||
| @@ -40,7 +40,7 @@ void GetCbuf(EmitContext& ctx, std::string_view ret, const IR::Value& binding, | ||||
|     const bool is_immediate{offset.IsImmediate()}; | ||||
|     if (is_immediate) { | ||||
|         const s32 signed_offset{static_cast<s32>(offset.U32())}; | ||||
|         static constexpr u32 cbuf_size{4096 * 16}; | ||||
|         static constexpr u32 cbuf_size{0x10000}; | ||||
|         if (signed_offset < 0 || offset.U32() > cbuf_size) { | ||||
|             LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds"); | ||||
|             ctx.Add("{}=0u;", ret); | ||||
| @@ -140,7 +140,7 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding | ||||
|                       const IR::Value& offset) { | ||||
|     const auto cbuf{fmt::format("{}_cbuf{}", ctx.stage_name, binding.U32())}; | ||||
|     if (offset.IsImmediate()) { | ||||
|         static constexpr u32 cbuf_size{4096 * 16}; | ||||
|         static constexpr u32 cbuf_size{0x10000}; | ||||
|         const u32 u32_offset{offset.U32()}; | ||||
|         const s32 signed_offset{static_cast<s32>(offset.U32())}; | ||||
|         if (signed_offset < 0 || u32_offset > cbuf_size) { | ||||
| @@ -308,21 +308,13 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view val | ||||
|     case IR::Attribute::ColorFrontDiffuseG: | ||||
|     case IR::Attribute::ColorFrontDiffuseB: | ||||
|     case IR::Attribute::ColorFrontDiffuseA: | ||||
|         if (ctx.stage == Stage::Fragment) { | ||||
|             ctx.Add("gl_Color.{}={};", swizzle, value); | ||||
|         } else { | ||||
|             ctx.Add("gl_FrontColor.{}={};", swizzle, value); | ||||
|         } | ||||
|         ctx.Add("gl_FrontColor.{}={};", swizzle, value); | ||||
|         break; | ||||
|     case IR::Attribute::ColorFrontSpecularR: | ||||
|     case IR::Attribute::ColorFrontSpecularG: | ||||
|     case IR::Attribute::ColorFrontSpecularB: | ||||
|     case IR::Attribute::ColorFrontSpecularA: | ||||
|         if (ctx.stage == Stage::Fragment) { | ||||
|             ctx.Add("gl_SecondaryColor.{}={};", swizzle, value); | ||||
|         } else { | ||||
|             ctx.Add("gl_FrontSecondaryColor.{}={};", swizzle, value); | ||||
|         } | ||||
|         ctx.Add("gl_FrontSecondaryColor.{}={};", swizzle, value); | ||||
|         break; | ||||
|     case IR::Attribute::ColorBackDiffuseR: | ||||
|     case IR::Attribute::ColorBackDiffuseG: | ||||
|   | ||||
| @@ -28,6 +28,7 @@ void SetSignFlag(EmitContext& ctx, IR::Inst& inst, std::string_view result) { | ||||
|     sign->Invalidate(); | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
| void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) { | ||||
|     const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)}; | ||||
|     if (IR::Inst* const carry{inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) { | ||||
|   | ||||
| @@ -20,6 +20,7 @@ void SharedWriteCas(EmitContext& ctx, std::string_view offset, std::string_view | ||||
|     ctx.Add(cas_loop, smem, smem, smem, value, bit_offset, num_bits); | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
| void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddU32("{}=bitfieldExtract(smem[{}>>2],int({}%4)*8,8);", inst, offset, offset); | ||||
| } | ||||
|   | ||||
| @@ -43,7 +43,7 @@ void UseShuffleNv(EmitContext& ctx, IR::Inst& inst, std::string_view shfl_op, | ||||
|     ctx.AddU32("{}={}({},{},{},shfl_in_bounds);", inst, shfl_op, value, index, width); | ||||
|     SetInBoundsFlag(ctx, inst); | ||||
| } | ||||
| } // namespace | ||||
| } // Anonymous namespace | ||||
|  | ||||
| void EmitLaneId(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU32("{}=gl_SubGroupInvocationARB&31u;", inst); | ||||
|   | ||||
| @@ -177,8 +177,7 @@ Id VarAlloc::Alloc(GlslVarType type) { | ||||
|  | ||||
| void VarAlloc::Free(Id id) { | ||||
|     if (id.is_valid == 0) { | ||||
|         // throw LogicError("Freeing invalid variable"); | ||||
|         return; | ||||
|         throw LogicError("Freeing invalid variable"); | ||||
|     } | ||||
|     auto& use_tracker{GetUseTracker(id.type)}; | ||||
|     use_tracker.var_use[id.index] = false; | ||||
|   | ||||
| @@ -105,6 +105,8 @@ struct Profile { | ||||
|     bool has_broken_signed_operations{}; | ||||
|     /// Dynamic vec4 indexing is broken on some OpenGL drivers | ||||
|     bool has_gl_component_indexing_bug{}; | ||||
|     /// The precise type qualifier is broken in the fragment stage of some drivers | ||||
|     bool has_gl_precise_bug{}; | ||||
|     /// Ignores SPIR-V ordered vs unordered using GLSL semantics | ||||
|     bool ignore_nan_fp_comparisons{}; | ||||
| }; | ||||
|   | ||||
| @@ -196,6 +196,8 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo | ||||
|           .has_broken_spirv_clamp = true, | ||||
|           .has_broken_unsigned_image_offsets = true, | ||||
|           .has_broken_signed_operations = true, | ||||
|           .has_gl_component_indexing_bug = device.HasComponentIndexingBug(), | ||||
|           .has_gl_precise_bug = device.HasPreciseBug(), | ||||
|           .ignore_nan_fp_comparisons = true, | ||||
|       } { | ||||
|     if (use_asynchronous_shaders) { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user