glsl: Reorganize backend code, remove unneeded [[maybe_unused]]
This commit is contained in:
		| @@ -37,7 +37,6 @@ add_library(shader_recompiler STATIC | ||||
|     backend/glsl/emit_glsl_convert.cpp | ||||
|     backend/glsl/emit_glsl_floating_point.cpp | ||||
|     backend/glsl/emit_glsl_image.cpp | ||||
|     backend/glsl/emit_glsl_image_atomic.cpp | ||||
|     backend/glsl/emit_glsl_instructions.h | ||||
|     backend/glsl/emit_glsl_integer.cpp | ||||
|     backend/glsl/emit_glsl_logical.cpp | ||||
|   | ||||
| @@ -0,0 +1,22 @@ | ||||
| // Copyright 2021 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #include <string_view> | ||||
|  | ||||
| #include "shader_recompiler/backend/glsl/emit_context.h" | ||||
| #include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" | ||||
| #include "shader_recompiler/exception.h" | ||||
|  | ||||
| namespace Shader::Backend::GLSL { | ||||
|  | ||||
| void EmitJoin(EmitContext&) { | ||||
|     throw NotImplementedException("Join shouldn't be emitted"); | ||||
| } | ||||
|  | ||||
| void EmitDemoteToHelperInvocation(EmitContext& ctx, | ||||
|                                   [[maybe_unused]] std::string_view continue_label) { | ||||
|     ctx.Add("discard;"); | ||||
| } | ||||
|  | ||||
| } // namespace Shader::Backend::GLSL | ||||
|   | ||||
| @@ -14,8 +14,7 @@ void EmitConvertS16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertS16F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertS16F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddS32("{}=int(float({}))&0xffff;", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -29,13 +28,11 @@ void EmitConvertS32F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertS32F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddS32("{}=int(float({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertS32F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertS32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddS32("{}=int(double({}));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -44,13 +41,11 @@ void EmitConvertS64F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertS64F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertS64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddS64("{}=int64_t(double(float({})));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertS64F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertS64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddS64("{}=int64_t(double({}));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -74,13 +69,11 @@ void EmitConvertU32F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertU32F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU32("{}=uint(float({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertU32F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertU32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU32("{}=uint(double({}));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -89,23 +82,19 @@ void EmitConvertU64F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertU64F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertU64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU64("{}=uint64_t(float({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertU64F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU64("{}=uint64_t(double({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertU64U32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertU64U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU64("{}=uint64_t(uint({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertU32U64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertU32U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU32("{}=uint(uint64_t({}));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -119,13 +108,11 @@ void EmitConvertF32F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertF32F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF32("{}=float(double({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertF64F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF64("{}=double(float({}));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -179,13 +166,11 @@ void EmitConvertF32S16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertF32S32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF32S32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF32("{}=float(int({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertF32S64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF32S64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF32("{}=float(double(int64_t({})));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -194,18 +179,15 @@ void EmitConvertF32U8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::In | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertF32U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF32U16(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF32("{}=float(uint({}&0xffff));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertF32U32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF32("{}=float(uint({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertF32U64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF32U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF32("{}=float(double(uint64_t({})));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -219,13 +201,11 @@ void EmitConvertF64S16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertF64S32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF64S32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF64("{}=double(int({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertF64S64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF64S64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF64("{}=double(int64_t({}));", inst, value); | ||||
| } | ||||
|  | ||||
| @@ -239,13 +219,11 @@ void EmitConvertF64U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitConvertF64U32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF64U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF64("{}=double(uint({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitConvertF64U64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitConvertF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddF64("{}=double(uint64_t({}));", inst, value); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -12,8 +12,7 @@ | ||||
|  | ||||
| namespace Shader::Backend::GLSL { | ||||
| namespace { | ||||
| std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, | ||||
|                     [[maybe_unused]] const IR::Value& index) { | ||||
| std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info) { | ||||
|     if (info.type == TextureType::Buffer) { | ||||
|         return fmt::format("tex{}", ctx.texture_buffer_bindings.at(info.descriptor_index)); | ||||
|     } else { | ||||
| @@ -21,8 +20,7 @@ std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, | ||||
|     } | ||||
| } | ||||
|  | ||||
| std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, | ||||
|                   [[maybe_unused]] const IR::Value& index) { | ||||
| std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info) { | ||||
|     if (info.type == TextureType::Buffer) { | ||||
|         return fmt::format("img{}", ctx.image_buffer_bindings.at(info.descriptor_index)); | ||||
|     } else { | ||||
| @@ -139,16 +137,14 @@ IR::Inst* PrepareSparse(IR::Inst& inst) { | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
| void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                                 [[maybe_unused]] const IR::Value& index, | ||||
|                                 [[maybe_unused]] std::string_view coords, | ||||
|                                 [[maybe_unused]] std::string_view bias_lc, | ||||
|                                 [[maybe_unused]] const IR::Value& offset) { | ||||
| void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, | ||||
|                                 [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                                 std::string_view bias_lc, const IR::Value& offset) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     if (info.has_lod_clamp) { | ||||
|         throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples"); | ||||
|     } | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; | ||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
| @@ -179,11 +175,9 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                                 [[maybe_unused]] const IR::Value& index, | ||||
|                                 [[maybe_unused]] std::string_view coords, | ||||
|                                 [[maybe_unused]] std::string_view lod_lc, | ||||
|                                 [[maybe_unused]] const IR::Value& offset) { | ||||
| void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, | ||||
|                                 [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                                 std::string_view lod_lc, const IR::Value& offset) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     if (info.has_bias) { | ||||
|         throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples"); | ||||
| @@ -191,7 +185,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse | ||||
|     if (info.has_lod_clamp) { | ||||
|         throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples"); | ||||
|     } | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     if (!sparse_inst) { | ||||
| @@ -214,13 +208,10 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, | ||||
|                                     [[maybe_unused]] IR::Inst& inst, | ||||
| void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, | ||||
|                                     [[maybe_unused]] const IR::Value& index, | ||||
|                                     [[maybe_unused]] std::string_view coords, | ||||
|                                     [[maybe_unused]] std::string_view dref, | ||||
|                                     [[maybe_unused]] std::string_view bias_lc, | ||||
|                                     [[maybe_unused]] const IR::Value& offset) { | ||||
|                                     std::string_view coords, std::string_view dref, | ||||
|                                     std::string_view bias_lc, const IR::Value& offset) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     if (sparse_inst) { | ||||
| @@ -232,7 +223,7 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, | ||||
|     if (info.has_lod_clamp) { | ||||
|         throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples"); | ||||
|     } | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; | ||||
|     const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; | ||||
|     const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; | ||||
| @@ -272,13 +263,10 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, | ||||
|                                     [[maybe_unused]] IR::Inst& inst, | ||||
| void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, | ||||
|                                     [[maybe_unused]] const IR::Value& index, | ||||
|                                     [[maybe_unused]] std::string_view coords, | ||||
|                                     [[maybe_unused]] std::string_view dref, | ||||
|                                     [[maybe_unused]] std::string_view lod_lc, | ||||
|                                     [[maybe_unused]] const IR::Value& offset) { | ||||
|                                     std::string_view coords, std::string_view dref, | ||||
|                                     std::string_view lod_lc, const IR::Value& offset) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     if (sparse_inst) { | ||||
| @@ -290,7 +278,7 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, | ||||
|     if (info.has_lod_clamp) { | ||||
|         throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples"); | ||||
|     } | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; | ||||
|     const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; | ||||
|     const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; | ||||
| @@ -325,13 +313,10 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                      [[maybe_unused]] const IR::Value& index, | ||||
|                      [[maybe_unused]] std::string_view coords, | ||||
|                      [[maybe_unused]] const IR::Value& offset, | ||||
|                      [[maybe_unused]] const IR::Value& offset2) { | ||||
| void EmitImageGather(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                      std::string_view coords, const IR::Value& offset, const IR::Value& offset2) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     if (!sparse_inst) { | ||||
| @@ -370,14 +355,11 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins | ||||
|               info.gather_component); | ||||
| } | ||||
|  | ||||
| void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                          [[maybe_unused]] const IR::Value& index, | ||||
|                          [[maybe_unused]] std::string_view coords, | ||||
|                          [[maybe_unused]] const IR::Value& offset, | ||||
|                          [[maybe_unused]] const IR::Value& offset2, | ||||
|                          [[maybe_unused]] std::string_view dref) { | ||||
| void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                          std::string_view coords, const IR::Value& offset, const IR::Value& offset2, | ||||
|                          std::string_view dref) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     if (!sparse_inst) { | ||||
| @@ -413,10 +395,8 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: | ||||
|               *sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel); | ||||
| } | ||||
|  | ||||
| void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                     [[maybe_unused]] const IR::Value& index, | ||||
|                     [[maybe_unused]] std::string_view coords, | ||||
|                     [[maybe_unused]] std::string_view offset, [[maybe_unused]] std::string_view lod, | ||||
| void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                     std::string_view coords, std::string_view offset, std::string_view lod, | ||||
|                     [[maybe_unused]] std::string_view ms) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     if (info.has_bias) { | ||||
| @@ -425,7 +405,7 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst | ||||
|     if (info.has_lod_clamp) { | ||||
|         throw NotImplementedException("EmitImageFetch Lod clamp samples"); | ||||
|     } | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||
|     if (!sparse_inst) { | ||||
| @@ -453,11 +433,10 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                               [[maybe_unused]] const IR::Value& index, | ||||
|                               [[maybe_unused]] std::string_view lod) { | ||||
| void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, | ||||
|                               [[maybe_unused]] const IR::Value& index, std::string_view lod) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     switch (info.type) { | ||||
|     case TextureType::Color1D: | ||||
|         return ctx.AddU32x4( | ||||
| @@ -481,20 +460,16 @@ void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused] | ||||
|     throw LogicError("Unspecified image type {}", info.type.Value()); | ||||
| } | ||||
|  | ||||
| void EmitImageQueryLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] const IR::Value& index, | ||||
|                        [[maybe_unused]] std::string_view coords) { | ||||
| void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                        std::string_view coords) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords); | ||||
| } | ||||
|  | ||||
| void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] const IR::Value& index, | ||||
|                        [[maybe_unused]] std::string_view coords, | ||||
|                        [[maybe_unused]] const IR::Value& derivatives, | ||||
|                        [[maybe_unused]] const IR::Value& offset, | ||||
|                        [[maybe_unused]] const IR::Value& lod_clamp) { | ||||
| void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                        std::string_view coords, const IR::Value& derivatives, | ||||
|                        const IR::Value& offset, [[maybe_unused]] const IR::Value& lod_clamp) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     if (info.has_lod_clamp) { | ||||
|         throw NotImplementedException("EmitImageGradient Lod clamp samples"); | ||||
| @@ -506,7 +481,7 @@ void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     if (!offset.IsEmpty()) { | ||||
|         throw NotImplementedException("EmitImageGradient offset"); | ||||
|     } | ||||
|     const auto texture{Texture(ctx, info, index)}; | ||||
|     const auto texture{Texture(ctx, info)}; | ||||
|     const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; | ||||
|     const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp}; | ||||
|     const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)}; | ||||
| @@ -519,63 +494,65 @@ void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitImageRead([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                    [[maybe_unused]] const IR::Value& index, | ||||
|                    [[maybe_unused]] std::string_view coords) { | ||||
| void EmitImageRead(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                    std::string_view coords) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto sparse_inst{PrepareSparse(inst)}; | ||||
|     if (sparse_inst) { | ||||
|         throw NotImplementedException("EmitImageRead Sparse"); | ||||
|     } | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, TexelFetchCastToInt(coords, info)); | ||||
| } | ||||
|  | ||||
| void EmitImageWrite([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                     [[maybe_unused]] const IR::Value& index, | ||||
|                     [[maybe_unused]] std::string_view coords, | ||||
|                     [[maybe_unused]] std::string_view color) { | ||||
| void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                     std::string_view coords, std::string_view color) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
|                            std::string_view coords, std::string_view value) { | ||||
| void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, | ||||
|                            [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                            std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
|                            std::string_view coords, std::string_view value) { | ||||
| void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, | ||||
|                            [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                            std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
|                            std::string_view coords, std::string_view value) { | ||||
| void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, | ||||
|                            [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                            std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
|                            std::string_view coords, std::string_view value) { | ||||
| void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, | ||||
|                            [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                            std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
|                            std::string_view coords, std::string_view value) { | ||||
| void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, | ||||
|                            [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                            std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
| @@ -590,34 +567,35 @@ void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
| void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                           std::string_view coords, std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
| void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                          std::string_view coords, std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
| void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, | ||||
|                           std::string_view coords, std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|  | ||||
| void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, | ||||
|                                std::string_view coords, std::string_view value) { | ||||
| void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, | ||||
|                                [[maybe_unused]] const IR::Value& index, std::string_view coords, | ||||
|                                std::string_view value) { | ||||
|     const auto info{inst.Flags<IR::TextureInstInfo>()}; | ||||
|     const auto image{Image(ctx, info, index)}; | ||||
|     const auto image{Image(ctx, info)}; | ||||
|     ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, TexelFetchCastToInt(coords, info), | ||||
|                value); | ||||
| } | ||||
|   | ||||
| @@ -30,14 +30,7 @@ void EmitIdentity(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | ||||
| void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | ||||
| void EmitReference(EmitContext& ctx, const IR::Value& value); | ||||
| void EmitPhiMove(EmitContext& ctx, const IR::Value& phi, const IR::Value& value); | ||||
| void EmitBranch(EmitContext& ctx, std::string_view label); | ||||
| void EmitBranchConditional(EmitContext& ctx, std::string_view condition, | ||||
|                            std::string_view true_label, std::string_view false_label); | ||||
| void EmitLoopMerge(EmitContext& ctx, std::string_view merge_label, std::string_view continue_label); | ||||
| void EmitSelectionMerge(EmitContext& ctx, std::string_view merge_label); | ||||
| void EmitReturn(EmitContext& ctx); | ||||
| void EmitJoin(EmitContext& ctx); | ||||
| void EmitUnreachable(EmitContext& ctx); | ||||
| void EmitDemoteToHelperInvocation(EmitContext& ctx, std::string_view continue_label); | ||||
| void EmitBarrier(EmitContext& ctx); | ||||
| void EmitWorkgroupMemoryBarrier(EmitContext& ctx); | ||||
|   | ||||
| @@ -156,13 +156,11 @@ void EmitBitwiseNot32(EmitContext& ctx, IR::Inst& inst, std::string_view value) | ||||
|     ctx.AddU32("{}=~{};", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitFindSMsb32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                     [[maybe_unused]] std::string_view value) { | ||||
| void EmitFindSMsb32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU32("{}=findMSB(int({}));", inst, value); | ||||
| } | ||||
|  | ||||
| void EmitFindUMsb32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                     [[maybe_unused]] std::string_view value) { | ||||
| void EmitFindUMsb32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||||
|     ctx.AddU32("{}=findMSB(uint({}));", inst, value); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -21,19 +21,19 @@ void SsboWriteCas(EmitContext& ctx, const IR::Value& binding, std::string_view o | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
| void EmitLoadGlobalU8([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitLoadGlobalU8(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitLoadGlobalS8([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitLoadGlobalS8(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitLoadGlobalU16([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitLoadGlobalU16(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitLoadGlobalS16([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitLoadGlobalS16(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| @@ -49,19 +49,19 @@ void EmitLoadGlobal128(EmitContext& ctx, IR::Inst& inst, std::string_view addres | ||||
|     ctx.AddU32x4("{}=LoadGlobal128({});", inst, address); | ||||
| } | ||||
|  | ||||
| void EmitWriteGlobalU8([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitWriteGlobalU8(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitWriteGlobalS8([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitWriteGlobalS8(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitWriteGlobalU16([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitWriteGlobalU16(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitWriteGlobalS16([[maybe_unused]] EmitContext& ctx) { | ||||
| void EmitWriteGlobalS16(EmitContext&) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| @@ -77,33 +77,29 @@ void EmitWriteGlobal128(EmitContext& ctx, std::string_view address, std::string_ | ||||
|     ctx.Add("WriteGlobal128({},{});", address, value); | ||||
| } | ||||
|  | ||||
| void EmitLoadStorageU8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, | ||||
|                        [[maybe_unused]] const IR::Value& binding, | ||||
|                        [[maybe_unused]] const IR::Value& offset) { | ||||
| void EmitLoadStorageU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                        const IR::Value& offset) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     ctx.AddU32("{}=bitfieldExtract({}_ssbo{}[{}>>2],int({}%4)*8,8);", inst, ctx.stage_name, | ||||
|                binding.U32(), offset_var, offset_var); | ||||
| } | ||||
|  | ||||
| void EmitLoadStorageS8([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, | ||||
|                        [[maybe_unused]] const IR::Value& binding, | ||||
|                        [[maybe_unused]] const IR::Value& offset) { | ||||
| void EmitLoadStorageS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                        const IR::Value& offset) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     ctx.AddS32("{}=bitfieldExtract(int({}_ssbo{}[{}>>2]),int({}%4)*8,8);", inst, ctx.stage_name, | ||||
|                binding.U32(), offset_var, offset_var); | ||||
| } | ||||
|  | ||||
| void EmitLoadStorageU16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, | ||||
|                         [[maybe_unused]] const IR::Value& binding, | ||||
|                         [[maybe_unused]] const IR::Value& offset) { | ||||
| void EmitLoadStorageU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                         const IR::Value& offset) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     ctx.AddU32("{}=bitfieldExtract({}_ssbo{}[{}>>2],int(({}>>1)%2)*16,16);", inst, ctx.stage_name, | ||||
|                binding.U32(), offset_var, offset_var); | ||||
| } | ||||
|  | ||||
| void EmitLoadStorageS16([[maybe_unused]] EmitContext& ctx, IR::Inst& inst, | ||||
|                         [[maybe_unused]] const IR::Value& binding, | ||||
|                         [[maybe_unused]] const IR::Value& offset) { | ||||
| void EmitLoadStorageS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, | ||||
|                         const IR::Value& offset) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     ctx.AddS32("{}=bitfieldExtract(int({}_ssbo{}[{}>>2]),int(({}>>1)%2)*16,16);", inst, | ||||
|                ctx.stage_name, binding.U32(), offset_var, offset_var); | ||||
| @@ -132,37 +128,29 @@ void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& bindi | ||||
|                  binding.U32(), offset_var); | ||||
| } | ||||
|  | ||||
| void EmitWriteStorageU8([[maybe_unused]] EmitContext& ctx, | ||||
|                         [[maybe_unused]] const IR::Value& binding, | ||||
|                         [[maybe_unused]] const IR::Value& offset, | ||||
|                         [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, | ||||
|                         std::string_view value) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     const auto bit_offset{fmt::format("int({}%4)*8", offset_var)}; | ||||
|     SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 8); | ||||
| } | ||||
|  | ||||
| void EmitWriteStorageS8([[maybe_unused]] EmitContext& ctx, | ||||
|                         [[maybe_unused]] const IR::Value& binding, | ||||
|                         [[maybe_unused]] const IR::Value& offset, | ||||
|                         [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, | ||||
|                         std::string_view value) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     const auto bit_offset{fmt::format("int({}%4)*8", offset_var)}; | ||||
|     SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 8); | ||||
| } | ||||
|  | ||||
| void EmitWriteStorageU16([[maybe_unused]] EmitContext& ctx, | ||||
|                          [[maybe_unused]] const IR::Value& binding, | ||||
|                          [[maybe_unused]] const IR::Value& offset, | ||||
|                          [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, | ||||
|                          std::string_view value) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     const auto bit_offset{fmt::format("int(({}>>1)%2)*16", offset_var)}; | ||||
|     SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 16); | ||||
| } | ||||
|  | ||||
| void EmitWriteStorageS16([[maybe_unused]] EmitContext& ctx, | ||||
|                          [[maybe_unused]] const IR::Value& binding, | ||||
|                          [[maybe_unused]] const IR::Value& offset, | ||||
|                          [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, | ||||
|                          std::string_view value) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     const auto bit_offset{fmt::format("int(({}>>1)%2)*16", offset_var)}; | ||||
|     SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 16); | ||||
| @@ -181,10 +169,8 @@ void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Va | ||||
|     ctx.Add("{}_ssbo{}[({}+4)>>2]={}.y;", ctx.stage_name, binding.U32(), offset_var, value); | ||||
| } | ||||
|  | ||||
| void EmitWriteStorage128([[maybe_unused]] EmitContext& ctx, | ||||
|                          [[maybe_unused]] const IR::Value& binding, | ||||
|                          [[maybe_unused]] const IR::Value& offset, | ||||
|                          [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, | ||||
|                          std::string_view value) { | ||||
|     const auto offset_var{ctx.var_alloc.Consume(offset)}; | ||||
|     ctx.Add("{}_ssbo{}[{}>>2]={}.x;", ctx.stage_name, binding.U32(), offset_var, value); | ||||
|     ctx.Add("{}_ssbo{}[({}+4)>>2]={}.y;", ctx.stage_name, binding.U32(), offset_var, value); | ||||
|   | ||||
| @@ -14,84 +14,6 @@ | ||||
|  | ||||
| namespace Shader::Backend::GLSL { | ||||
|  | ||||
| void EmitPhi(EmitContext& ctx, IR::Inst& phi) { | ||||
|     const size_t num_args{phi.NumArgs()}; | ||||
|     for (size_t i = 0; i < num_args; ++i) { | ||||
|         ctx.var_alloc.Consume(phi.Arg(i)); | ||||
|     } | ||||
|     if (!phi.Definition<Id>().is_valid) { | ||||
|         // The phi node wasn't forward defined | ||||
|         ctx.var_alloc.PhiDefine(phi, phi.Arg(0).Type()); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitVoid(EmitContext& ctx) {} | ||||
|  | ||||
| void EmitReference(EmitContext& ctx, const IR::Value& value) { | ||||
|     ctx.var_alloc.Consume(value); | ||||
| } | ||||
|  | ||||
| void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value& value) { | ||||
|     IR::Inst& phi{*phi_value.InstRecursive()}; | ||||
|     const auto phi_type{phi.Arg(0).Type()}; | ||||
|     if (!phi.Definition<Id>().is_valid) { | ||||
|         // The phi node wasn't forward defined | ||||
|         ctx.var_alloc.PhiDefine(phi, phi_type); | ||||
|     } | ||||
|     const auto phi_reg{ctx.var_alloc.Consume(IR::Value{&phi})}; | ||||
|     const auto val_reg{ctx.var_alloc.Consume(value)}; | ||||
|     if (phi_reg == val_reg) { | ||||
|         return; | ||||
|     } | ||||
|     ctx.Add("{}={};", phi_reg, val_reg); | ||||
| } | ||||
|  | ||||
| void EmitBranch(EmitContext& ctx, std::string_view label) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitBranchConditional(EmitContext& ctx, std::string_view condition, | ||||
|                            std::string_view true_label, std::string_view false_label) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitLoopMerge(EmitContext& ctx, std::string_view merge_label, | ||||
|                    std::string_view continue_label) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitSelectionMerge(EmitContext& ctx, std::string_view merge_label) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitReturn(EmitContext& ctx) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitJoin(EmitContext& ctx) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitUnreachable(EmitContext& ctx) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitDemoteToHelperInvocation(EmitContext& ctx, std::string_view continue_label) { | ||||
|     ctx.Add("discard;"); | ||||
| } | ||||
|  | ||||
| void EmitPrologue(EmitContext& ctx) {} | ||||
|  | ||||
| void EmitEpilogue(EmitContext& ctx) {} | ||||
|  | ||||
| void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) { | ||||
|     ctx.Add("EmitStreamVertex(int({}));", ctx.var_alloc.Consume(stream)); | ||||
| } | ||||
|  | ||||
| void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) { | ||||
|     ctx.Add("EndStreamPrimitive(int({}));", ctx.var_alloc.Consume(stream)); | ||||
| } | ||||
|  | ||||
| void EmitGetRegister(EmitContext& ctx) { | ||||
|     NotImplemented(); | ||||
| } | ||||
| @@ -156,26 +78,6 @@ void EmitSetOFlag(EmitContext& ctx) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU1("{}=false;", inst); | ||||
| } | ||||
|  | ||||
| void EmitUndefU8(EmitContext& ctx, IR::Inst& inst) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitUndefU16(EmitContext& ctx, IR::Inst& inst) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitUndefU32(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU32("{}=0u;", inst); | ||||
| } | ||||
|  | ||||
| void EmitUndefU64(EmitContext& ctx, IR::Inst& inst) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|  | ||||
| void EmitGetZeroFromOp(EmitContext& ctx) { | ||||
|     NotImplemented(); | ||||
| } | ||||
|   | ||||
| @@ -9,68 +9,55 @@ | ||||
| #include "shader_recompiler/frontend/ir/value.h" | ||||
|  | ||||
| namespace Shader::Backend::GLSL { | ||||
| void EmitLoadSharedU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                       [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddU32("{}=bitfieldExtract(smem[{}>>2],int({}%4)*8,8);", inst, offset, offset); | ||||
| } | ||||
|  | ||||
| void EmitLoadSharedS8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                       [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedS8(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddS32("{}=bitfieldExtract(int(smem[{}>>2]),int({}%4)*8,8);", inst, offset, offset); | ||||
| } | ||||
|  | ||||
| void EmitLoadSharedU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedU16(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddU32("{}=bitfieldExtract(smem[{}>>2],int(({}>>1)%2)*16,16);", inst, offset, offset); | ||||
| } | ||||
|  | ||||
| void EmitLoadSharedS16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedS16(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddS32("{}=bitfieldExtract(int(smem[{}>>2]),int(({}>>1)%2)*16,16);", inst, offset, offset); | ||||
| } | ||||
|  | ||||
| void EmitLoadSharedU32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedU32(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddU32("{}=smem[{}>>2];", inst, offset); | ||||
| } | ||||
|  | ||||
| void EmitLoadSharedU64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                        [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedU64(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, offset, offset); | ||||
| } | ||||
|  | ||||
| void EmitLoadSharedU128([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, | ||||
|                         [[maybe_unused]] std::string_view offset) { | ||||
| void EmitLoadSharedU128(EmitContext& ctx, IR::Inst& inst, std::string_view offset) { | ||||
|     ctx.AddU32x4("{}=uvec4(smem[{}>>2],smem[({}+4)>>2],smem[({}+8)>>2],smem[({}+12)>>2]);", inst, | ||||
|                  offset, offset, offset, offset); | ||||
| } | ||||
|  | ||||
| void EmitWriteSharedU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view offset, | ||||
|                        [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteSharedU8(EmitContext& ctx, std::string_view offset, std::string_view value) { | ||||
|     ctx.Add("smem[{}>>2]=bitfieldInsert(smem[{}>>2],{},int({}%4)*8,8);", offset, offset, value, | ||||
|             offset); | ||||
| } | ||||
|  | ||||
| void EmitWriteSharedU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view offset, | ||||
|                         [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteSharedU16(EmitContext& ctx, std::string_view offset, std::string_view value) { | ||||
|     ctx.Add("smem[{}>>2]=bitfieldInsert(smem[{}>>2],{},int(({}>>1)%2)*16,16);", offset, offset, | ||||
|             value, offset); | ||||
| } | ||||
|  | ||||
| void EmitWriteSharedU32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view offset, | ||||
|                         [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteSharedU32(EmitContext& ctx, std::string_view offset, std::string_view value) { | ||||
|     ctx.Add("smem[{}>>2]={};", offset, value); | ||||
| } | ||||
|  | ||||
| void EmitWriteSharedU64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view offset, | ||||
|                         [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteSharedU64(EmitContext& ctx, std::string_view offset, std::string_view value) { | ||||
|     ctx.Add("smem[{}>>2]={}.x;", offset, value); | ||||
|     ctx.Add("smem[({}+4)>>2]={}.y;", offset, value); | ||||
| } | ||||
|  | ||||
| void EmitWriteSharedU128([[maybe_unused]] EmitContext& ctx, | ||||
|                          [[maybe_unused]] std::string_view offset, | ||||
|                          [[maybe_unused]] std::string_view value) { | ||||
| void EmitWriteSharedU128(EmitContext& ctx, std::string_view offset, std::string_view value) { | ||||
|     ctx.Add("smem[{}>>2]={}.x;", offset, value); | ||||
|     ctx.Add("smem[({}+4)>>2]={}.y;", offset, value); | ||||
|     ctx.Add("smem[({}+8)>>2]={}.z;", offset, value); | ||||
|   | ||||
| @@ -0,0 +1,61 @@ | ||||
| // Copyright 2021 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #include <string_view> | ||||
|  | ||||
| #include "shader_recompiler/backend/glsl/emit_context.h" | ||||
| #include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" | ||||
| #include "shader_recompiler/frontend/ir/value.h" | ||||
|  | ||||
| namespace Shader::Backend::GLSL { | ||||
|  | ||||
| void EmitPhi(EmitContext& ctx, IR::Inst& phi) { | ||||
|     const size_t num_args{phi.NumArgs()}; | ||||
|     for (size_t i = 0; i < num_args; ++i) { | ||||
|         ctx.var_alloc.Consume(phi.Arg(i)); | ||||
|     } | ||||
|     if (!phi.Definition<Id>().is_valid) { | ||||
|         // The phi node wasn't forward defined | ||||
|         ctx.var_alloc.PhiDefine(phi, phi.Arg(0).Type()); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void EmitVoid(EmitContext&) {} | ||||
|  | ||||
| void EmitReference(EmitContext& ctx, const IR::Value& value) { | ||||
|     ctx.var_alloc.Consume(value); | ||||
| } | ||||
|  | ||||
| void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value& value) { | ||||
|     IR::Inst& phi{*phi_value.InstRecursive()}; | ||||
|     const auto phi_type{phi.Arg(0).Type()}; | ||||
|     if (!phi.Definition<Id>().is_valid) { | ||||
|         // The phi node wasn't forward defined | ||||
|         ctx.var_alloc.PhiDefine(phi, phi_type); | ||||
|     } | ||||
|     const auto phi_reg{ctx.var_alloc.Consume(IR::Value{&phi})}; | ||||
|     const auto val_reg{ctx.var_alloc.Consume(value)}; | ||||
|     if (phi_reg == val_reg) { | ||||
|         return; | ||||
|     } | ||||
|     ctx.Add("{}={};", phi_reg, val_reg); | ||||
| } | ||||
|  | ||||
| void EmitPrologue(EmitContext&) { | ||||
|     // TODO | ||||
| } | ||||
|  | ||||
| void EmitEpilogue(EmitContext&) { | ||||
|     // TODO | ||||
| } | ||||
|  | ||||
| void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) { | ||||
|     ctx.Add("EmitStreamVertex(int({}));", ctx.var_alloc.Consume(stream)); | ||||
| } | ||||
|  | ||||
| void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) { | ||||
|     ctx.Add("EndStreamPrimitive(int({}));", ctx.var_alloc.Consume(stream)); | ||||
| } | ||||
|  | ||||
| } // namespace Shader::Backend::GLSL | ||||
|   | ||||
| @@ -0,0 +1,32 @@ | ||||
| // Copyright 2021 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #include <string_view> | ||||
|  | ||||
| #include "shader_recompiler/backend/glsl/emit_context.h" | ||||
| #include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" | ||||
|  | ||||
| namespace Shader::Backend::GLSL { | ||||
|  | ||||
| void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU1("{}=false;", inst); | ||||
| } | ||||
|  | ||||
| void EmitUndefU8(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU32("{}=0u;", inst); | ||||
| } | ||||
|  | ||||
| void EmitUndefU16(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU32("{}=0u;", inst); | ||||
| } | ||||
|  | ||||
| void EmitUndefU32(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU32("{}=0u;", inst); | ||||
| } | ||||
|  | ||||
| void EmitUndefU64(EmitContext& ctx, IR::Inst& inst) { | ||||
|     ctx.AddU64("{}=0u;", inst); | ||||
| } | ||||
|  | ||||
| } // namespace Shader::Backend::GLSL | ||||
|   | ||||
		Reference in New Issue
	
	Block a user