Run clang-format
This commit is contained in:
		@@ -321,11 +321,12 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
 | 
			
		||||
    case IR::Attribute::PositionY:
 | 
			
		||||
    case IR::Attribute::PositionZ:
 | 
			
		||||
    case IR::Attribute::PositionW:
 | 
			
		||||
        return ctx.OpLoad(ctx.F32[1], ctx.need_input_position_indirect ?
 | 
			
		||||
                                      AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position,
 | 
			
		||||
                                                  ctx.u32_zero_value, ctx.Const(element))
 | 
			
		||||
                                      : AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position,
 | 
			
		||||
                                                    ctx.Const(element)));
 | 
			
		||||
        return ctx.OpLoad(
 | 
			
		||||
            ctx.F32[1],
 | 
			
		||||
            ctx.need_input_position_indirect
 | 
			
		||||
                ? AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, ctx.u32_zero_value,
 | 
			
		||||
                              ctx.Const(element))
 | 
			
		||||
                : AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, ctx.Const(element)));
 | 
			
		||||
    case IR::Attribute::InstanceId:
 | 
			
		||||
        if (ctx.profile.support_vertex_instance_id) {
 | 
			
		||||
            return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id));
 | 
			
		||||
 
 | 
			
		||||
@@ -729,7 +729,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
 | 
			
		||||
                    else
 | 
			
		||||
                        return OpAccessChain(input_f32, input_position, u32_zero_value,
 | 
			
		||||
                                             masked_index);
 | 
			
		||||
                }  else {
 | 
			
		||||
                } else {
 | 
			
		||||
                    if (is_array)
 | 
			
		||||
                        return OpAccessChain(input_f32, input_position, vertex, masked_index);
 | 
			
		||||
                    else
 | 
			
		||||
@@ -1390,7 +1390,8 @@ void EmitContext::DefineInputs(const IR::Program& program) {
 | 
			
		||||
                           static_cast<unsigned>(spv::BuiltIn::Position));
 | 
			
		||||
            Decorate(input_position_struct, spv::Decoration::Block);
 | 
			
		||||
        } else {
 | 
			
		||||
            const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::FragCoord : spv::BuiltIn::Position};
 | 
			
		||||
            const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::FragCoord
 | 
			
		||||
                                                    : spv::BuiltIn::Position};
 | 
			
		||||
            input_position = DefineInput(*this, F32[4], true, built_in);
 | 
			
		||||
 | 
			
		||||
            if (profile.support_geometry_shader_passthrough) {
 | 
			
		||||
 
 | 
			
		||||
@@ -172,7 +172,10 @@ std::map<IR::Attribute, IR::Attribute> GenerateLegacyToGenericMappings(
 | 
			
		||||
    return mapping;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program, const Shader::VaryingState &passthrough_mask, bool passthrough_position, std::optional<IR::Attribute> passthrough_layer_attr) {
 | 
			
		||||
void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program,
 | 
			
		||||
                             const Shader::VaryingState& passthrough_mask,
 | 
			
		||||
                             bool passthrough_position,
 | 
			
		||||
                             std::optional<IR::Attribute> passthrough_layer_attr) {
 | 
			
		||||
    for (u32 i = 0; i < program.output_vertices; i++) {
 | 
			
		||||
        // Assign generics from input
 | 
			
		||||
        for (u32 j = 0; j < 32; j++) {
 | 
			
		||||
@@ -198,7 +201,8 @@ void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program, cons
 | 
			
		||||
 | 
			
		||||
        if (passthrough_layer_attr) {
 | 
			
		||||
            // Assign layer
 | 
			
		||||
            ir.SetAttribute(IR::Attribute::Layer, ir.GetAttribute(*passthrough_layer_attr), ir.Imm32(0));
 | 
			
		||||
            ir.SetAttribute(IR::Attribute::Layer, ir.GetAttribute(*passthrough_layer_attr),
 | 
			
		||||
                            ir.Imm32(0));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Emit vertex
 | 
			
		||||
@@ -209,21 +213,23 @@ void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program, cons
 | 
			
		||||
 | 
			
		||||
u32 GetOutputTopologyVertices(OutputTopology output_topology) {
 | 
			
		||||
    switch (output_topology) {
 | 
			
		||||
        case OutputTopology::PointList:
 | 
			
		||||
            return 1;
 | 
			
		||||
        case OutputTopology::LineStrip:
 | 
			
		||||
            return 2;
 | 
			
		||||
        default:
 | 
			
		||||
            return 3;
 | 
			
		||||
    case OutputTopology::PointList:
 | 
			
		||||
        return 1;
 | 
			
		||||
    case OutputTopology::LineStrip:
 | 
			
		||||
        return 2;
 | 
			
		||||
    default:
 | 
			
		||||
        return 3;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void LowerGeometryPassthrough(const IR::Program& program, const HostTranslateInfo& host_info) {
 | 
			
		||||
    for (IR::Block *const block : program.blocks) {
 | 
			
		||||
        for (IR::Inst &inst : block->Instructions()) {
 | 
			
		||||
    for (IR::Block* const block : program.blocks) {
 | 
			
		||||
        for (IR::Inst& inst : block->Instructions()) {
 | 
			
		||||
            if (inst.GetOpcode() == IR::Opcode::Epilogue) {
 | 
			
		||||
                IR::IREmitter ir{*block, IR::Block::InstructionList::s_iterator_to(inst)};
 | 
			
		||||
                EmitGeometryPassthrough(ir, program, program.info.passthrough, program.info.passthrough.AnyComponent(IR::Attribute::PositionX), {});
 | 
			
		||||
                EmitGeometryPassthrough(
 | 
			
		||||
                    ir, program, program.info.passthrough,
 | 
			
		||||
                    program.info.passthrough.AnyComponent(IR::Attribute::PositionX), {});
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -407,7 +413,6 @@ IR::Program GenerateGeometryPassthrough(ObjectPool<IR::Inst>& inst_pool,
 | 
			
		||||
    program.output_topology = output_topology;
 | 
			
		||||
    program.output_vertices = GetOutputTopologyVertices(output_topology);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    program.is_geometry_passthrough = false;
 | 
			
		||||
    program.info.loads.mask = source_program.info.stores.mask;
 | 
			
		||||
    program.info.stores.mask = source_program.info.stores.mask;
 | 
			
		||||
@@ -420,7 +425,8 @@ IR::Program GenerateGeometryPassthrough(ObjectPool<IR::Inst>& inst_pool,
 | 
			
		||||
    node.data.block = current_block;
 | 
			
		||||
 | 
			
		||||
    IR::IREmitter ir{*current_block};
 | 
			
		||||
    EmitGeometryPassthrough(ir, program, program.info.stores, true, source_program.info.emulated_layer);
 | 
			
		||||
    EmitGeometryPassthrough(ir, program, program.info.stores, true,
 | 
			
		||||
                            source_program.info.emulated_layer);
 | 
			
		||||
 | 
			
		||||
    IR::Block* return_block{block_pool.Create(inst_pool)};
 | 
			
		||||
    IR::IREmitter{*return_block}.Epilogue();
 | 
			
		||||
 
 | 
			
		||||
@@ -15,8 +15,9 @@ struct HostTranslateInfo {
 | 
			
		||||
    bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered
 | 
			
		||||
    bool support_snorm_render_buffer{};  ///< True when the device supports SNORM render buffers
 | 
			
		||||
    bool support_viewport_index_layer{}; ///< True when the device supports gl_Layer in VS
 | 
			
		||||
    u32 min_ssbo_alignment{};  ///< Minimum alignment supported by the device for SSBOs
 | 
			
		||||
    bool support_geometry_shader_passthrough{}; ///< True when the device supports geometry passthrough shaders
 | 
			
		||||
    u32 min_ssbo_alignment{};            ///< Minimum alignment supported by the device for SSBOs
 | 
			
		||||
    bool support_geometry_shader_passthrough{}; ///< True when the device supports geometry
 | 
			
		||||
                                                ///< passthrough shaders
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Shader
 | 
			
		||||
 
 | 
			
		||||
@@ -538,7 +538,8 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program, const HostTranslateIn
 | 
			
		||||
        const IR::U32 index{IR::Value{static_cast<u32>(info.set.index_of(it))}};
 | 
			
		||||
        IR::Block* const block{storage_inst.block};
 | 
			
		||||
        IR::Inst* const inst{storage_inst.inst};
 | 
			
		||||
        const IR::U32 offset{StorageOffset(*block, *inst, storage_buffer, host_info.min_ssbo_alignment)};
 | 
			
		||||
        const IR::U32 offset{
 | 
			
		||||
            StorageOffset(*block, *inst, storage_buffer, host_info.min_ssbo_alignment)};
 | 
			
		||||
        Replace(*block, *inst, index, offset);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1941,7 +1941,8 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
 | 
			
		||||
    const u32 alignment = runtime.GetStorageBufferAlignment();
 | 
			
		||||
 | 
			
		||||
    const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment);
 | 
			
		||||
    const u32 aligned_size = Common::AlignUp(static_cast<u32>(gpu_addr - aligned_gpu_addr) + size, alignment);
 | 
			
		||||
    const u32 aligned_size =
 | 
			
		||||
        Common::AlignUp(static_cast<u32>(gpu_addr - aligned_gpu_addr) + size, alignment);
 | 
			
		||||
 | 
			
		||||
    const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr);
 | 
			
		||||
    if (!cpu_addr || size == 0) {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user