shader_recompiler, video_core: Resolve clang errors
Silences the following warnings-turned-errors: -Wsign-conversion -Wunused-private-field -Wbraced-scalar-init -Wunused-variable And some other errors
This commit is contained in:
		| @@ -59,7 +59,7 @@ public: | ||||
|     } | ||||
|  | ||||
|     std::string code; | ||||
|     RegAlloc reg_alloc{*this}; | ||||
|     RegAlloc reg_alloc{}; | ||||
|     const Info& info; | ||||
|     const Profile& profile; | ||||
|     const RuntimeInfo& runtime_info; | ||||
|   | ||||
| @@ -86,7 +86,7 @@ struct ScalarF64 : Value {}; | ||||
|  | ||||
| class RegAlloc { | ||||
| public: | ||||
|     RegAlloc(EmitContext& ctx_) : ctx{ctx_} {} | ||||
|     RegAlloc() = default; | ||||
|  | ||||
|     Register Define(IR::Inst& inst); | ||||
|  | ||||
| @@ -142,7 +142,6 @@ private: | ||||
|  | ||||
|     void Free(Id id); | ||||
|  | ||||
|     EmitContext& ctx; | ||||
|     size_t num_used_registers{}; | ||||
|     size_t num_used_long_registers{}; | ||||
|     std::bitset<NUM_REGS> register_use{}; | ||||
|   | ||||
| @@ -22,7 +22,7 @@ void Compare(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string | ||||
| } | ||||
|  | ||||
| bool IsPrecise(const IR::Inst& inst) { | ||||
|     return {inst.Flags<IR::FpControl>().no_contraction}; | ||||
|     return inst.Flags<IR::FpControl>().no_contraction; | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
|   | ||||
| @@ -109,7 +109,7 @@ private: | ||||
|             return; | ||||
|         } | ||||
|         if (offset.IsImmediate()) { | ||||
|             Add(spv::ImageOperandsMask::ConstOffset, ctx.SConst(offset.U32())); | ||||
|             Add(spv::ImageOperandsMask::ConstOffset, ctx.SConst(static_cast<s32>(offset.U32()))); | ||||
|             return; | ||||
|         } | ||||
|         IR::Inst* const inst{offset.InstRecursive()}; | ||||
| @@ -117,16 +117,21 @@ private: | ||||
|             switch (inst->GetOpcode()) { | ||||
|             case IR::Opcode::CompositeConstructU32x2: | ||||
|                 Add(spv::ImageOperandsMask::ConstOffset, | ||||
|                     ctx.SConst(inst->Arg(0).U32(), inst->Arg(1).U32())); | ||||
|                     ctx.SConst(static_cast<s32>(inst->Arg(0).U32()), | ||||
|                                static_cast<s32>(inst->Arg(1).U32()))); | ||||
|                 return; | ||||
|             case IR::Opcode::CompositeConstructU32x3: | ||||
|                 Add(spv::ImageOperandsMask::ConstOffset, | ||||
|                     ctx.SConst(inst->Arg(0).U32(), inst->Arg(1).U32(), inst->Arg(2).U32())); | ||||
|                     ctx.SConst(static_cast<s32>(inst->Arg(0).U32()), | ||||
|                                static_cast<s32>(inst->Arg(1).U32()), | ||||
|                                static_cast<s32>(inst->Arg(2).U32()))); | ||||
|                 return; | ||||
|             case IR::Opcode::CompositeConstructU32x4: | ||||
|                 Add(spv::ImageOperandsMask::ConstOffset, | ||||
|                     ctx.SConst(inst->Arg(0).U32(), inst->Arg(1).U32(), inst->Arg(2).U32(), | ||||
|                                inst->Arg(3).U32())); | ||||
|                     ctx.SConst(static_cast<s32>(inst->Arg(0).U32()), | ||||
|                                static_cast<s32>(inst->Arg(1).U32()), | ||||
|                                static_cast<s32>(inst->Arg(2).U32()), | ||||
|                                static_cast<s32>(inst->Arg(3).U32()))); | ||||
|                 return; | ||||
|             default: | ||||
|                 break; | ||||
|   | ||||
| @@ -67,7 +67,8 @@ constexpr OpcodeMeta META_TABLE[]{ | ||||
| }; | ||||
| constexpr size_t CalculateNumArgsOf(Opcode op) { | ||||
|     const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types}; | ||||
|     return std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void)); | ||||
|     return static_cast<size_t>( | ||||
|         std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void))); | ||||
| } | ||||
|  | ||||
| constexpr u8 NUM_ARGS[]{ | ||||
|   | ||||
| @@ -161,7 +161,6 @@ private: | ||||
|     Environment& env; | ||||
|     ObjectPool<Block>& block_pool; | ||||
|     boost::container::small_vector<Function, 1> functions; | ||||
|     FunctionId current_function_id{0}; | ||||
|     Location program_start; | ||||
|     bool exits_to_dispatcher{}; | ||||
|     Block* dispatch_block{}; | ||||
|   | ||||
| @@ -313,9 +313,7 @@ bool NeedsLift(Node goto_stmt, Node label_stmt) noexcept { | ||||
|  | ||||
| class GotoPass { | ||||
| public: | ||||
|     explicit GotoPass(Flow::CFG& cfg, ObjectPool<IR::Inst>& inst_pool_, | ||||
|                       ObjectPool<IR::Block>& block_pool_, ObjectPool<Statement>& stmt_pool) | ||||
|         : inst_pool{inst_pool_}, block_pool{block_pool_}, pool{stmt_pool} { | ||||
|     explicit GotoPass(Flow::CFG& cfg, ObjectPool<Statement>& stmt_pool) : pool{stmt_pool} { | ||||
|         std::vector gotos{BuildTree(cfg)}; | ||||
|         for (const Node& goto_stmt : gotos | std::views::reverse) { | ||||
|             RemoveGoto(goto_stmt); | ||||
| @@ -616,8 +614,6 @@ private: | ||||
|         return parent_tree.insert(std::next(loop), *new_goto); | ||||
|     } | ||||
|  | ||||
|     ObjectPool<IR::Inst>& inst_pool; | ||||
|     ObjectPool<IR::Block>& block_pool; | ||||
|     ObjectPool<Statement>& pool; | ||||
|     Statement root_stmt{FunctionTag{}}; | ||||
| }; | ||||
| @@ -864,7 +860,6 @@ private: | ||||
|     ObjectPool<IR::Block>& block_pool; | ||||
|     Environment& env; | ||||
|     IR::AbstractSyntaxList& syntax_list; | ||||
|     u32 loop_id{}; | ||||
|  | ||||
| // TODO: C++20 Remove this when all compilers support constexpr std::vector | ||||
| #if __cpp_lib_constexpr_vector >= 201907 | ||||
| @@ -878,7 +873,7 @@ private: | ||||
| IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, | ||||
|                                 Environment& env, Flow::CFG& cfg) { | ||||
|     ObjectPool<Statement> stmt_pool{64}; | ||||
|     GotoPass goto_pass{cfg, inst_pool, block_pool, stmt_pool}; | ||||
|     GotoPass goto_pass{cfg, stmt_pool}; | ||||
|     Statement& root{goto_pass.RootStatement()}; | ||||
|     IR::AbstractSyntaxList syntax_list; | ||||
|     TranslatePass{inst_pool, block_pool, stmt_pool, env, root, syntax_list}; | ||||
|   | ||||
| @@ -59,14 +59,14 @@ IR::U32U64 ApplyIntegerAtomOp(IR::IREmitter& ir, const IR::U32U64& offset, const | ||||
| IR::Value ApplyFpAtomOp(IR::IREmitter& ir, const IR::U64& offset, const IR::Value& op_b, AtomOp op, | ||||
|                         AtomSize size) { | ||||
|     static constexpr IR::FpControl f16_control{ | ||||
|         .no_contraction{false}, | ||||
|         .rounding{IR::FpRounding::RN}, | ||||
|         .fmz_mode{IR::FmzMode::DontCare}, | ||||
|         .no_contraction = false, | ||||
|         .rounding = IR::FpRounding::RN, | ||||
|         .fmz_mode = IR::FmzMode::DontCare, | ||||
|     }; | ||||
|     static constexpr IR::FpControl f32_control{ | ||||
|         .no_contraction{false}, | ||||
|         .rounding{IR::FpRounding::RN}, | ||||
|         .fmz_mode{IR::FmzMode::FTZ}, | ||||
|         .no_contraction = false, | ||||
|         .rounding = IR::FpRounding::RN, | ||||
|         .fmz_mode = IR::FmzMode::FTZ, | ||||
|     }; | ||||
|     switch (op) { | ||||
|     case AtomOp::ADD: | ||||
|   | ||||
| @@ -104,7 +104,9 @@ void I2F(TranslatorVisitor& v, u64 insn, IR::U32U64 src) { | ||||
|         .rounding = CastFpRounding(i2f.fp_rounding), | ||||
|         .fmz_mode = IR::FmzMode::DontCare, | ||||
|     }; | ||||
|     auto value{v.ir.ConvertIToF(dst_bitsize, conversion_src_bitsize, is_signed, src, fp_control)}; | ||||
|     auto value{v.ir.ConvertIToF(static_cast<size_t>(dst_bitsize), | ||||
|                                 static_cast<size_t>(conversion_src_bitsize), is_signed, src, | ||||
|                                 fp_control)}; | ||||
|     if (i2f.neg != 0) { | ||||
|         if (i2f.abs != 0 || !is_signed) { | ||||
|             // We know the value is positive | ||||
|   | ||||
| @@ -80,10 +80,10 @@ void TranslatorVisitor::ALD(u64 insn) { | ||||
|         for (u32 element = 0; element < num_elements; ++element) { | ||||
|             if (ald.patch != 0) { | ||||
|                 const IR::Patch patch{offset / 4 + element}; | ||||
|                 F(ald.dest_reg + element, ir.GetPatch(patch)); | ||||
|                 F(ald.dest_reg + static_cast<int>(element), ir.GetPatch(patch)); | ||||
|             } else { | ||||
|                 const IR::Attribute attr{offset / 4 + element}; | ||||
|                 F(ald.dest_reg + element, ir.GetAttribute(attr, vertex)); | ||||
|                 F(ald.dest_reg + static_cast<int>(element), ir.GetAttribute(attr, vertex)); | ||||
|             } | ||||
|         } | ||||
|         return; | ||||
| @@ -92,7 +92,7 @@ void TranslatorVisitor::ALD(u64 insn) { | ||||
|         throw NotImplementedException("Indirect patch read"); | ||||
|     } | ||||
|     HandleIndexed(*this, ald.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) { | ||||
|         F(ald.dest_reg + element, ir.GetAttributeIndexed(final_offset, vertex)); | ||||
|         F(ald.dest_reg + static_cast<int>(element), ir.GetAttributeIndexed(final_offset, vertex)); | ||||
|     }); | ||||
| } | ||||
|  | ||||
| @@ -121,10 +121,10 @@ void TranslatorVisitor::AST(u64 insn) { | ||||
|         for (u32 element = 0; element < num_elements; ++element) { | ||||
|             if (ast.patch != 0) { | ||||
|                 const IR::Patch patch{offset / 4 + element}; | ||||
|                 ir.SetPatch(patch, F(ast.src_reg + element)); | ||||
|                 ir.SetPatch(patch, F(ast.src_reg + static_cast<int>(element))); | ||||
|             } else { | ||||
|                 const IR::Attribute attr{offset / 4 + element}; | ||||
|                 ir.SetAttribute(attr, F(ast.src_reg + element), vertex); | ||||
|                 ir.SetAttribute(attr, F(ast.src_reg + static_cast<int>(element)), vertex); | ||||
|             } | ||||
|         } | ||||
|         return; | ||||
| @@ -133,7 +133,7 @@ void TranslatorVisitor::AST(u64 insn) { | ||||
|         throw NotImplementedException("Indexed tessellation patch store"); | ||||
|     } | ||||
|     HandleIndexed(*this, ast.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) { | ||||
|         ir.SetAttributeIndexed(final_offset, F(ast.src_reg + element), vertex); | ||||
|         ir.SetAttributeIndexed(final_offset, F(ast.src_reg + static_cast<int>(element)), vertex); | ||||
|     }); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -69,9 +69,6 @@ TextureType GetType(Type type) { | ||||
| } | ||||
|  | ||||
| IR::Value MakeCoords(TranslatorVisitor& v, IR::Reg reg, Type type) { | ||||
|     const auto array{[&](int index) { | ||||
|         return v.ir.BitFieldExtract(v.X(reg + index), v.ir.Imm32(0), v.ir.Imm32(16)); | ||||
|     }}; | ||||
|     switch (type) { | ||||
|     case Type::_1D: | ||||
|     case Type::BUFFER_1D: | ||||
|   | ||||
| @@ -160,10 +160,10 @@ unsigned SwizzleMask(u64 swizzle) { | ||||
| IR::Value MakeColor(IR::IREmitter& ir, IR::Reg reg, int num_regs) { | ||||
|     std::array<IR::U32, 4> colors; | ||||
|     for (int i = 0; i < num_regs; ++i) { | ||||
|         colors[i] = ir.GetReg(reg + i); | ||||
|         colors[static_cast<size_t>(i)] = ir.GetReg(reg + i); | ||||
|     } | ||||
|     for (int i = num_regs; i < 4; ++i) { | ||||
|         colors[i] = ir.Imm32(0); | ||||
|         colors[static_cast<size_t>(i)] = ir.Imm32(0); | ||||
|     } | ||||
|     return ir.CompositeConstruct(colors[0], colors[1], colors[2], colors[3]); | ||||
| } | ||||
| @@ -211,12 +211,12 @@ void TranslatorVisitor::SULD(u64 insn) { | ||||
|     if (is_typed) { | ||||
|         const int num_regs{SizeInRegs(suld.size)}; | ||||
|         for (int i = 0; i < num_regs; ++i) { | ||||
|             X(dest_reg + i, IR::U32{ir.CompositeExtract(result, i)}); | ||||
|             X(dest_reg + i, IR::U32{ir.CompositeExtract(result, static_cast<size_t>(i))}); | ||||
|         } | ||||
|     } else { | ||||
|         const unsigned mask{SwizzleMask(suld.swizzle)}; | ||||
|         const int bits{std::popcount(mask)}; | ||||
|         if (!IR::IsAligned(dest_reg, bits == 3 ? 4 : bits)) { | ||||
|         if (!IR::IsAligned(dest_reg, bits == 3 ? 4 : static_cast<size_t>(bits))) { | ||||
|             throw NotImplementedException("Unaligned destination register"); | ||||
|         } | ||||
|         for (unsigned component = 0; component < 4; ++component) { | ||||
|   | ||||
| @@ -314,8 +314,8 @@ std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias) | ||||
|             return std::nullopt; | ||||
|         } | ||||
|         const StorageBufferAddr storage_buffer{ | ||||
|             .index{index.U32()}, | ||||
|             .offset{offset.U32()}, | ||||
|             .index = index.U32(), | ||||
|             .offset = offset.U32(), | ||||
|         }; | ||||
|         if (!Common::IsAligned(storage_buffer.offset, 16)) { | ||||
|             // The SSBO pointer has to be aligned | ||||
| @@ -484,7 +484,7 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program) { | ||||
|             .cbuf_index = storage_buffer.index, | ||||
|             .cbuf_offset = storage_buffer.offset, | ||||
|             .count = 1, | ||||
|             .is_written{info.writes.contains(storage_buffer)}, | ||||
|             .is_written = info.writes.contains(storage_buffer), | ||||
|         }); | ||||
|     } | ||||
|     for (const StorageInst& storage_inst : info.to_replace) { | ||||
|   | ||||
| @@ -104,9 +104,7 @@ public: | ||||
|  | ||||
|     template <typename Spec> | ||||
|     static auto MakeConfigureSpecFunc() { | ||||
|         return [](GraphicsPipeline* pipeline, bool is_indexed) { | ||||
|             pipeline->ConfigureImpl<Spec>(is_indexed); | ||||
|         }; | ||||
|         return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); }; | ||||
|     } | ||||
|  | ||||
| private: | ||||
|   | ||||
		Reference in New Issue
	
	Block a user