renderer_vulkan: Fix some validation errors
* Temporarily add glm until I figure out how to fix the alignment
This commit is contained in:
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -64,3 +64,6 @@
|
|||||||
[submodule "glslang"]
|
[submodule "glslang"]
|
||||||
path = externals/glslang
|
path = externals/glslang
|
||||||
url = https://github.com/KhronosGroup/glslang
|
url = https://github.com/KhronosGroup/glslang
|
||||||
|
[submodule "glm"]
|
||||||
|
path = externals/glm
|
||||||
|
url = https://github.com/g-truc/glm
|
||||||
|
@ -9,6 +9,7 @@ cmake_policy(SET CMP0069 NEW)
|
|||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules")
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/externals/cmake-modules")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/externals/cmake-modules")
|
||||||
include(DownloadExternals)
|
include(DownloadExternals)
|
||||||
|
include(GNUInstallDirs)
|
||||||
include(CMakeDependentOption)
|
include(CMakeDependentOption)
|
||||||
|
|
||||||
project(citra LANGUAGES C CXX ASM)
|
project(citra LANGUAGES C CXX ASM)
|
||||||
@ -157,7 +158,7 @@ message(STATUS "Target architecture: ${ARCHITECTURE}")
|
|||||||
# Configure C++ standard
|
# Configure C++ standard
|
||||||
# ===========================
|
# ===========================
|
||||||
|
|
||||||
set(CMAKE_CXX_STANDARD 23)
|
set(CMAKE_CXX_STANDARD 20)
|
||||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
# set up output paths for executable binaries
|
# set up output paths for executable binaries
|
||||||
|
3
externals/CMakeLists.txt
vendored
3
externals/CMakeLists.txt
vendored
@ -60,6 +60,9 @@ add_subdirectory(glad)
|
|||||||
# glslang
|
# glslang
|
||||||
add_subdirectory(glslang)
|
add_subdirectory(glslang)
|
||||||
|
|
||||||
|
# glm
|
||||||
|
add_subdirectory(glm)
|
||||||
|
|
||||||
# inih
|
# inih
|
||||||
add_subdirectory(inih)
|
add_subdirectory(inih)
|
||||||
|
|
||||||
|
1
externals/glm
vendored
Submodule
1
externals/glm
vendored
Submodule
Submodule externals/glm added at cc98465e35
@ -103,6 +103,7 @@ else()
|
|||||||
|
|
||||||
if (MINGW)
|
if (MINGW)
|
||||||
add_definitions(-DMINGW_HAS_SECURE_API)
|
add_definitions(-DMINGW_HAS_SECURE_API)
|
||||||
|
add_compile_options("-Wa,-mbig-obj")
|
||||||
if (COMPILE_WITH_DWARF)
|
if (COMPILE_WITH_DWARF)
|
||||||
add_compile_options("-gdwarf")
|
add_compile_options("-gdwarf")
|
||||||
endif()
|
endif()
|
||||||
|
@ -263,6 +263,10 @@ target_link_libraries(citra-qt PRIVATE audio_core common core input_common netwo
|
|||||||
target_link_libraries(citra-qt PRIVATE Boost::boost glad nihstro-headers Qt5::Widgets Qt5::Multimedia)
|
target_link_libraries(citra-qt PRIVATE Boost::boost glad nihstro-headers Qt5::Widgets Qt5::Multimedia)
|
||||||
target_link_libraries(citra-qt PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads)
|
target_link_libraries(citra-qt PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads)
|
||||||
|
|
||||||
|
if (NOT WIN32)
|
||||||
|
target_include_directories(citra-qt PRIVATE ${Qt5Gui_PRIVATE_INCLUDE_DIRS})
|
||||||
|
endif()
|
||||||
|
|
||||||
target_compile_definitions(citra-qt PRIVATE
|
target_compile_definitions(citra-qt PRIVATE
|
||||||
# Use QStringBuilder for string concatenation to reduce
|
# Use QStringBuilder for string concatenation to reduce
|
||||||
# the overall number of temporary strings created.
|
# the overall number of temporary strings created.
|
||||||
|
@ -25,6 +25,10 @@
|
|||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
#include "video_core/video_core.h"
|
#include "video_core/video_core.h"
|
||||||
|
|
||||||
|
#if !defined(WIN32)
|
||||||
|
#include <qpa/qplatformnativeinterface.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
EmuThread::EmuThread(Frontend::GraphicsContext& core_context) : core_context(core_context) {}
|
EmuThread::EmuThread(Frontend::GraphicsContext& core_context) : core_context(core_context) {}
|
||||||
|
|
||||||
EmuThread::~EmuThread() = default;
|
EmuThread::~EmuThread() = default;
|
||||||
@ -50,6 +54,7 @@ void EmuThread::run() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
emit LoadProgress(VideoCore::LoadCallbackStage::Complete, 0, 0);
|
emit LoadProgress(VideoCore::LoadCallbackStage::Complete, 0, 0);
|
||||||
|
emit HideLoadingScreen();
|
||||||
|
|
||||||
core_context.MakeCurrent();
|
core_context.MakeCurrent();
|
||||||
|
|
||||||
|
@ -77,6 +77,7 @@ add_library(common STATIC
|
|||||||
logging/backend.h
|
logging/backend.h
|
||||||
logging/filter.cpp
|
logging/filter.cpp
|
||||||
logging/filter.h
|
logging/filter.h
|
||||||
|
logging/formatter.h
|
||||||
logging/log.h
|
logging/log.h
|
||||||
logging/text_formatter.cpp
|
logging/text_formatter.cpp
|
||||||
logging/text_formatter.h
|
logging/text_formatter.h
|
||||||
|
@ -31,8 +31,10 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// 64 bit offsets for MSVC and MinGW. MinGW also needs this for using _wstat64
|
// 64 bit offsets for MSVC and MinGW. MinGW also needs this for using _wstat64
|
||||||
|
#ifndef __MINGW64__
|
||||||
#define stat _stat64
|
#define stat _stat64
|
||||||
#define fstat _fstat64
|
#define fstat _fstat64
|
||||||
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <type_traits>
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
// adapted from https://github.com/fmtlib/fmt/issues/2704
|
// adapted from https://github.com/fmtlib/fmt/issues/2704
|
||||||
// a generic formatter for enum classes
|
// a generic formatter for enum classes
|
||||||
|
@ -972,7 +972,7 @@ unsigned InterpreterMainLoop(ARMul_State* cpu) {
|
|||||||
|
|
||||||
// GCC and Clang have a C++ extension to support a lookup table of labels. Otherwise, fallback to a
|
// GCC and Clang have a C++ extension to support a lookup table of labels. Otherwise, fallback to a
|
||||||
// clunky switch statement.
|
// clunky switch statement.
|
||||||
#if defined __GNUC__ || defined __clang__
|
#if defined __GNUC__ || (defined __clang__ && !defined _MSC_VER)
|
||||||
#define GOTO_NEXT_INST \
|
#define GOTO_NEXT_INST \
|
||||||
GDB_BP_CHECK; \
|
GDB_BP_CHECK; \
|
||||||
if (num_instrs >= cpu->NumInstrsToExecute) \
|
if (num_instrs >= cpu->NumInstrsToExecute) \
|
||||||
|
@ -514,33 +514,6 @@ bool MemorySystem::IsValidPhysicalAddress(const PAddr paddr) const {
|
|||||||
return GetPhysicalRef(paddr);
|
return GetPhysicalRef(paddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
PAddr MemorySystem::ClampPhysicalAddress(PAddr base, PAddr address) const {
|
|
||||||
struct MemoryArea {
|
|
||||||
PAddr paddr_base;
|
|
||||||
u32 size;
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr std::array memory_areas = {
|
|
||||||
MemoryArea{VRAM_PADDR, VRAM_SIZE},
|
|
||||||
MemoryArea{DSP_RAM_PADDR, DSP_RAM_SIZE},
|
|
||||||
MemoryArea{FCRAM_PADDR, FCRAM_N3DS_SIZE},
|
|
||||||
MemoryArea{N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE},
|
|
||||||
};
|
|
||||||
|
|
||||||
const auto area =
|
|
||||||
std::ranges::find_if(memory_areas, [&](const MemoryArea& area) {
|
|
||||||
return base >= area.paddr_base && base <= area.paddr_base + area.size;
|
|
||||||
});
|
|
||||||
|
|
||||||
if (area == memory_areas.end()) {
|
|
||||||
LOG_ERROR(HW_Memory, "Unknown base address used for clamping {:#08X} at PC {:#08X}", base,
|
|
||||||
Core::GetRunningCore().GetPC());
|
|
||||||
return address;
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::clamp(address, area->paddr_base, area->paddr_base + area->size);
|
|
||||||
}
|
|
||||||
|
|
||||||
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
||||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS];
|
u8* page_pointer = impl->current_page_table->pointers[vaddr >> CITRA_PAGE_BITS];
|
||||||
if (page_pointer) {
|
if (page_pointer) {
|
||||||
@ -594,23 +567,18 @@ u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
MemoryRef MemorySystem::GetPhysicalRef(PAddr address) const {
|
MemoryRef MemorySystem::GetPhysicalRef(PAddr address) const {
|
||||||
struct MemoryArea {
|
|
||||||
PAddr paddr_base;
|
|
||||||
u32 size;
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr std::array memory_areas = {
|
constexpr std::array memory_areas = {
|
||||||
MemoryArea{VRAM_PADDR, VRAM_SIZE},
|
std::make_pair(VRAM_PADDR, VRAM_SIZE),
|
||||||
MemoryArea{DSP_RAM_PADDR, DSP_RAM_SIZE},
|
std::make_pair(DSP_RAM_PADDR, DSP_RAM_SIZE),
|
||||||
MemoryArea{FCRAM_PADDR, FCRAM_N3DS_SIZE},
|
std::make_pair(FCRAM_PADDR, FCRAM_N3DS_SIZE),
|
||||||
MemoryArea{N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE},
|
std::make_pair(N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE),
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto area =
|
const auto area =
|
||||||
std::ranges::find_if(memory_areas, [&](const MemoryArea& area) {
|
std::ranges::find_if(memory_areas, [&](const auto& area) {
|
||||||
// Note: the region end check is inclusive because the user can pass in an address that
|
// Note: the region end check is inclusive because the user can pass in an address that
|
||||||
// represents an open right bound
|
// represents an open right bound
|
||||||
return address >= area.paddr_base && address <= area.paddr_base + area.size;
|
return address >= area.first && address <= area.first + area.second;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (area == memory_areas.end()) {
|
if (area == memory_areas.end()) {
|
||||||
@ -619,10 +587,10 @@ MemoryRef MemorySystem::GetPhysicalRef(PAddr address) const {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 offset_into_region = address - area->paddr_base;
|
u32 offset_into_region = address - area->first;
|
||||||
|
|
||||||
std::shared_ptr<BackingMem> target_mem = nullptr;
|
std::shared_ptr<BackingMem> target_mem = nullptr;
|
||||||
switch (area->paddr_base) {
|
switch (area->first) {
|
||||||
case VRAM_PADDR:
|
case VRAM_PADDR:
|
||||||
target_mem = impl->vram_mem;
|
target_mem = impl->vram_mem;
|
||||||
break;
|
break;
|
||||||
|
@ -565,9 +565,6 @@ public:
|
|||||||
/// Returns true if the address refers to a valid memory region
|
/// Returns true if the address refers to a valid memory region
|
||||||
bool IsValidPhysicalAddress(PAddr paddr) const;
|
bool IsValidPhysicalAddress(PAddr paddr) const;
|
||||||
|
|
||||||
/// Clamps the address to the boundaries of the memory region pointed by base
|
|
||||||
PAddr ClampPhysicalAddress(PAddr base, PAddr address) const;
|
|
||||||
|
|
||||||
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
||||||
u32 GetFCRAMOffset(const u8* pointer) const;
|
u32 GetFCRAMOffset(const u8* pointer) const;
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ create_target_directory_groups(video_core)
|
|||||||
target_include_directories(video_core PRIVATE ../../externals/vulkan-headers/include)
|
target_include_directories(video_core PRIVATE ../../externals/vulkan-headers/include)
|
||||||
target_include_directories(video_core PRIVATE ../../externals/vma)
|
target_include_directories(video_core PRIVATE ../../externals/vma)
|
||||||
target_link_libraries(video_core PUBLIC common core)
|
target_link_libraries(video_core PUBLIC common core)
|
||||||
target_link_libraries(video_core PRIVATE glad glslang SPIRV nihstro-headers Boost::serialization)
|
target_link_libraries(video_core PRIVATE glad glm::glm SPIRV glslang nihstro-headers Boost::serialization)
|
||||||
set_target_properties(video_core PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO})
|
set_target_properties(video_core PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO})
|
||||||
|
|
||||||
if (ARCHITECTURE_x86_64)
|
if (ARCHITECTURE_x86_64)
|
||||||
|
@ -47,9 +47,9 @@ class RasterizerAccelerated;
|
|||||||
template <class T>
|
template <class T>
|
||||||
class RasterizerCache : NonCopyable {
|
class RasterizerCache : NonCopyable {
|
||||||
public:
|
public:
|
||||||
using TextureRuntime = typename T::Runtime;
|
using TextureRuntime = typename T::RuntimeType;
|
||||||
using Surface = std::shared_ptr<typename T::Surface>;
|
using Surface = std::shared_ptr<typename T::SurfaceType>;
|
||||||
using Watcher = SurfaceWatcher<typename T::Surface>;
|
using Watcher = SurfaceWatcher<typename T::SurfaceType>;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Declare rasterizer interval types
|
/// Declare rasterizer interval types
|
||||||
@ -755,7 +755,7 @@ auto RasterizerCache<T>::GetFillSurface(const GPU::Regs::MemoryFillConfig& confi
|
|||||||
params.type = SurfaceType::Fill;
|
params.type = SurfaceType::Fill;
|
||||||
params.res_scale = std::numeric_limits<u16>::max();
|
params.res_scale = std::numeric_limits<u16>::max();
|
||||||
|
|
||||||
Surface new_surface = std::make_shared<typename T::Surface>(params, runtime);
|
Surface new_surface = std::make_shared<typename T::SurfaceType>(params, runtime);
|
||||||
|
|
||||||
std::memcpy(&new_surface->fill_data[0], &config.value_32bit, 4);
|
std::memcpy(&new_surface->fill_data[0], &config.value_32bit, 4);
|
||||||
if (config.fill_32bit) {
|
if (config.fill_32bit) {
|
||||||
@ -1211,7 +1211,7 @@ void RasterizerCache<T>::InvalidateRegion(PAddr addr, u32 size, const Surface& r
|
|||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
auto RasterizerCache<T>::CreateSurface(SurfaceParams& params) -> Surface {
|
auto RasterizerCache<T>::CreateSurface(SurfaceParams& params) -> Surface {
|
||||||
Surface surface = std::make_shared<typename T::Surface>(params, runtime);
|
Surface surface = std::make_shared<typename T::SurfaceType>(params, runtime);
|
||||||
surface->invalid_regions.insert(surface->GetInterval());
|
surface->invalid_regions.insert(surface->GetInterval());
|
||||||
|
|
||||||
return surface;
|
return surface;
|
||||||
|
@ -112,14 +112,15 @@ void Driver::ReportDriverInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Driver::DeduceVendor() {
|
void Driver::DeduceVendor() {
|
||||||
if (gpu_vendor.contains("NVIDIA")) {
|
if (gpu_vendor.find("NVIDIA") != gpu_vendor.npos) {
|
||||||
vendor = Vendor::Nvidia;
|
vendor = Vendor::Nvidia;
|
||||||
} else if (gpu_vendor.contains("ATI") ||
|
} else if ((gpu_vendor.find("ATI") != gpu_vendor.npos) ||
|
||||||
gpu_vendor.contains("Advanced Micro Devices")) {
|
(gpu_vendor.find("AMD") != gpu_vendor.npos) ||
|
||||||
|
(gpu_vendor.find("Advanced Micro Devices") != gpu_vendor.npos)) {
|
||||||
vendor = Vendor::AMD;
|
vendor = Vendor::AMD;
|
||||||
} else if (gpu_vendor.contains("Intel")) {
|
} else if (gpu_vendor.find("Intel") != gpu_vendor.npos) {
|
||||||
vendor = Vendor::Intel;
|
vendor = Vendor::Intel;
|
||||||
} else if (gpu_vendor.contains("GDI Generic")) {
|
} else if (gpu_vendor.find("GDI Generic") != gpu_vendor.npos) {
|
||||||
vendor = Vendor::Generic;
|
vendor = Vendor::Generic;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,8 +152,8 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct Traits {
|
struct Traits {
|
||||||
using Runtime = TextureRuntime;
|
using RuntimeType = TextureRuntime;
|
||||||
using Surface = Surface;
|
using SurfaceType = Surface;
|
||||||
};
|
};
|
||||||
|
|
||||||
using RasterizerCache = VideoCore::RasterizerCache<Traits>;
|
using RasterizerCache = VideoCore::RasterizerCache<Traits>;
|
||||||
|
@ -250,7 +250,7 @@ inline vk::CullModeFlags CullMode(Pica::RasterizerRegs::CullMode mode) {
|
|||||||
return vk::CullModeFlagBits::eNone;
|
return vk::CullModeFlagBits::eNone;
|
||||||
case Pica::RasterizerRegs::CullMode::KeepClockWise:
|
case Pica::RasterizerRegs::CullMode::KeepClockWise:
|
||||||
case Pica::RasterizerRegs::CullMode::KeepCounterClockWise:
|
case Pica::RasterizerRegs::CullMode::KeepCounterClockWise:
|
||||||
return vk::CullModeFlagBits::eBack;
|
return vk::CullModeFlagBits::eNone;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#define VULKAN_HPP_NO_CONSTRUCTORS
|
#define VULKAN_HPP_NO_CONSTRUCTORS
|
||||||
|
#define GLM_FORCE_DEPTH_ZERO_TO_ONE
|
||||||
|
#include <glm/gtc/matrix_transform.hpp>
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
@ -32,8 +34,8 @@ layout (location = 0) out vec2 frag_tex_coord;
|
|||||||
// The third column performs translation.
|
// The third column performs translation.
|
||||||
// The third row could be used for projection, which we don't need in 2D. It hence is assumed to
|
// The third row could be used for projection, which we don't need in 2D. It hence is assumed to
|
||||||
// implicitly be [0, 0, 1]
|
// implicitly be [0, 0, 1]
|
||||||
layout (push_constant) uniform DrawInfo {
|
layout (push_constant, std140) uniform DrawInfo {
|
||||||
mat3x2 modelview_matrix;
|
mat4 modelview_matrix;
|
||||||
vec4 i_resolution;
|
vec4 i_resolution;
|
||||||
vec4 o_resolution;
|
vec4 o_resolution;
|
||||||
int screen_id_l;
|
int screen_id_l;
|
||||||
@ -42,39 +44,38 @@ layout (push_constant) uniform DrawInfo {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
// Multiply input position by the rotscale part of the matrix and then manually translate by
|
vec4 position = vec4(vert_position, 0.0, 1.0) * modelview_matrix;
|
||||||
// the last column. This is equivalent to using a full 3x3 matrix and expanding the vector
|
gl_Position = vec4(position.x, -position.y, 0.0, 1.0);
|
||||||
// to `vec3(vert_position.xy, 1.0)`
|
|
||||||
gl_Position = vec4(mat2(modelview_matrix) * vert_position + modelview_matrix[2], 0.0, 1.0);
|
|
||||||
gl_Position.y = -gl_Position.y;
|
|
||||||
frag_tex_coord = vert_tex_coord;
|
frag_tex_coord = vert_tex_coord;
|
||||||
}
|
}
|
||||||
)";
|
)";
|
||||||
|
|
||||||
constexpr std::string_view fragment_shader = R"(
|
constexpr std::string_view fragment_shader = R"(
|
||||||
version 450 core
|
#version 450 core
|
||||||
#extension GL_ARB_separate_shader_objects : enable
|
#extension GL_ARB_separate_shader_objects : enable
|
||||||
layout (location = 0) in vec2 frag_tex_coord;
|
layout (location = 0) in vec2 frag_tex_coord;
|
||||||
layout (location = 0) out vec4 color;
|
layout (location = 0) out vec4 color;
|
||||||
|
|
||||||
layout (push_constant) uniform DrawInfo {
|
layout (push_constant, std140) uniform DrawInfo {
|
||||||
mat3x2 modelview_matrix;
|
mat4 modelview_matrix;
|
||||||
vec4 i_resolution;
|
vec4 i_resolution;
|
||||||
vec4 o_resolution;
|
vec4 o_resolution;
|
||||||
int screen_id_l;
|
int screen_id_l;
|
||||||
int screen_id_r;
|
int screen_id_r;
|
||||||
int layer;
|
int layer;
|
||||||
|
int reverse_interlaced;
|
||||||
};
|
};
|
||||||
|
|
||||||
layout (set = 0, binding = 0) uniform sampler2D screen_textures[3];
|
layout (set = 0, binding = 0) uniform texture2D screen_textures[3];
|
||||||
|
layout (set = 0, binding = 1) uniform sampler screen_sampler;
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
color = texture(screen_textures[screen_id_l], frag_tex_coord);
|
color = texture(sampler2D(screen_textures[screen_id_l], screen_sampler), frag_tex_coord);
|
||||||
}
|
}
|
||||||
)";
|
)";
|
||||||
|
|
||||||
constexpr std::string_view fragment_shader_anaglyph = R"(
|
constexpr std::string_view fragment_shader_anaglyph = R"(
|
||||||
version 450 core
|
#version 450 core
|
||||||
#extension GL_ARB_separate_shader_objects : enable
|
#extension GL_ARB_separate_shader_objects : enable
|
||||||
layout (location = 0) in vec2 frag_tex_coord;
|
layout (location = 0) in vec2 frag_tex_coord;
|
||||||
layout (location = 0) out vec4 color;
|
layout (location = 0) out vec4 color;
|
||||||
@ -91,32 +92,8 @@ const mat3 r = mat3(-0.011,-0.032,-0.007,
|
|||||||
0.377, 0.761, 0.009,
|
0.377, 0.761, 0.009,
|
||||||
-0.026,-0.093, 1.234);
|
-0.026,-0.093, 1.234);
|
||||||
|
|
||||||
layout (push_constant) uniform DrawInfo {
|
layout (push_constant, std140) uniform DrawInfo {
|
||||||
mat3x2 modelview_matrix;
|
mat4 modelview_matrix;
|
||||||
vec4 i_resolution;
|
|
||||||
vec4 o_resolution;
|
|
||||||
int screen_id_l;
|
|
||||||
int screen_id_r;
|
|
||||||
int layer;
|
|
||||||
};
|
|
||||||
|
|
||||||
layout (set = 0, binding = 0) uniform sampler2D screen_textures[3];
|
|
||||||
|
|
||||||
void main() {
|
|
||||||
vec4 color_tex_l = texture(screen_textures[screen_id_l], frag_tex_coord);
|
|
||||||
vec4 color_tex_r = texture(screen_textures[screen_id_r], frag_tex_coord);
|
|
||||||
color = vec4(color_tex_l.rgb*l+color_tex_r.rgb*r, color_tex_l.a);
|
|
||||||
}
|
|
||||||
)";
|
|
||||||
|
|
||||||
constexpr std::string_view fragment_shader_interlaced = R"(
|
|
||||||
version 450 core
|
|
||||||
#extension GL_ARB_separate_shader_objects : enable
|
|
||||||
layout (location = 0) in vec2 frag_tex_coord;
|
|
||||||
layout (location = 0) out vec4 color;
|
|
||||||
|
|
||||||
layout (push_constant) uniform DrawInfo {
|
|
||||||
mat3x2 modelview_matrix;
|
|
||||||
vec4 i_resolution;
|
vec4 i_resolution;
|
||||||
vec4 o_resolution;
|
vec4 o_resolution;
|
||||||
int screen_id_l;
|
int screen_id_l;
|
||||||
@ -125,14 +102,41 @@ layout (push_constant) uniform DrawInfo {
|
|||||||
int reverse_interlaced;
|
int reverse_interlaced;
|
||||||
};
|
};
|
||||||
|
|
||||||
layout (set = 0, binding = 0) uniform sampler2D screen_textures[3];
|
layout (set = 0, binding = 0) uniform texture2D screen_textures[3];
|
||||||
|
layout (set = 0, binding = 1) uniform sampler screen_sampler;
|
||||||
|
|
||||||
|
void main() {
|
||||||
|
vec4 color_tex_l = texture(sampler2D(screen_textures[screen_id_l], screen_sampler), frag_tex_coord);
|
||||||
|
vec4 color_tex_r = texture(sampler2D(screen_textures[screen_id_r], screen_sampler), frag_tex_coord);
|
||||||
|
color = vec4(color_tex_l.rgb*l+color_tex_r.rgb*r, color_tex_l.a);
|
||||||
|
}
|
||||||
|
)";
|
||||||
|
|
||||||
|
constexpr std::string_view fragment_shader_interlaced = R"(
|
||||||
|
#version 450 core
|
||||||
|
#extension GL_ARB_separate_shader_objects : enable
|
||||||
|
layout (location = 0) in vec2 frag_tex_coord;
|
||||||
|
layout (location = 0) out vec4 color;
|
||||||
|
|
||||||
|
layout (push_constant, std140) uniform DrawInfo {
|
||||||
|
mat4 modelview_matrix;
|
||||||
|
vec4 i_resolution;
|
||||||
|
vec4 o_resolution;
|
||||||
|
int screen_id_l;
|
||||||
|
int screen_id_r;
|
||||||
|
int layer;
|
||||||
|
int reverse_interlaced;
|
||||||
|
};
|
||||||
|
|
||||||
|
layout (set = 0, binding = 0) uniform texture2D screen_textures[3];
|
||||||
|
layout (set = 0, binding = 1) uniform sampler screen_sampler;
|
||||||
|
|
||||||
void main() {
|
void main() {
|
||||||
float screen_row = o_resolution.x * frag_tex_coord.x;
|
float screen_row = o_resolution.x * frag_tex_coord.x;
|
||||||
if (int(screen_row) % 2 == reverse_interlaced)
|
if (int(screen_row) % 2 == reverse_interlaced)
|
||||||
color = texture(screen_textures[screen_id_l], frag_tex_coord);
|
color = texture(sampler2D(screen_textures[screen_id_l], screen_sampler), frag_tex_coord);
|
||||||
else
|
else
|
||||||
color = texture(screen_textures[screen_id_r], frag_tex_coord);
|
color = texture(sampler2D(screen_textures[screen_id_r], screen_sampler), frag_tex_coord);
|
||||||
}
|
}
|
||||||
)";
|
)";
|
||||||
|
|
||||||
@ -194,15 +198,16 @@ RendererVulkan::RendererVulkan(Frontend::EmuWindow& window)
|
|||||||
|
|
||||||
RendererVulkan::~RendererVulkan() {
|
RendererVulkan::~RendererVulkan() {
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
|
|
||||||
device.destroyPipelineLayout(present_pipeline_layout);
|
device.destroyPipelineLayout(present_pipeline_layout);
|
||||||
|
device.destroyShaderModule(present_vertex_shader);
|
||||||
device.destroyDescriptorSetLayout(present_descriptor_layout);
|
device.destroyDescriptorSetLayout(present_descriptor_layout);
|
||||||
device.destroyDescriptorUpdateTemplate(present_update_template);
|
device.destroyDescriptorUpdateTemplate(present_update_template);
|
||||||
device.destroyShaderModule(present_vertex_shader);
|
|
||||||
for (u32 i = 0; i < PRESENT_PIPELINES; i++) {
|
for (u32 i = 0; i < PRESENT_PIPELINES; i++) {
|
||||||
device.destroyPipeline(present_pipelines[i]);
|
device.destroyPipeline(present_pipelines[i]);
|
||||||
device.destroyShaderModule(present_shaders[i]);
|
device.destroyShaderModule(present_shaders[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (std::size_t i = 0; i < present_samplers.size(); i++) {
|
for (std::size_t i = 0; i < present_samplers.size(); i++) {
|
||||||
device.destroySampler(present_samplers[i]);
|
device.destroySampler(present_samplers[i]);
|
||||||
}
|
}
|
||||||
@ -268,27 +273,18 @@ void RendererVulkan::BeginRendering() {
|
|||||||
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
||||||
command_buffer.bindPipeline(vk::PipelineBindPoint::eGraphics, present_pipelines[current_pipeline]);
|
command_buffer.bindPipeline(vk::PipelineBindPoint::eGraphics, present_pipelines[current_pipeline]);
|
||||||
|
|
||||||
|
std::array<vk::DescriptorImageInfo, 4> present_textures;
|
||||||
for (std::size_t i = 0; i < screen_infos.size(); i++) {
|
for (std::size_t i = 0; i < screen_infos.size(); i++) {
|
||||||
runtime.Transition(command_buffer, screen_infos[i].display_texture,
|
const auto& info = screen_infos[i];
|
||||||
vk::ImageLayout::eShaderReadOnlyOptimal, 0, 1);
|
present_textures[i] = vk::DescriptorImageInfo{
|
||||||
|
.imageView = info.display_texture ? info.display_texture->image_view
|
||||||
|
: info.texture.alloc.image_view,
|
||||||
|
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::array present_textures = {
|
present_textures[3] = vk::DescriptorImageInfo{
|
||||||
vk::DescriptorImageInfo{
|
.sampler = present_samplers[current_sampler]
|
||||||
.sampler = present_samplers[current_sampler],
|
|
||||||
.imageView = screen_infos[0].display_texture.image_view,
|
|
||||||
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
|
||||||
},
|
|
||||||
vk::DescriptorImageInfo{
|
|
||||||
.sampler = present_samplers[current_sampler],
|
|
||||||
.imageView = screen_infos[1].display_texture.image_view,
|
|
||||||
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
|
||||||
},
|
|
||||||
vk::DescriptorImageInfo{
|
|
||||||
.sampler = present_samplers[current_sampler],
|
|
||||||
.imageView = screen_infos[2].display_texture.image_view,
|
|
||||||
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const vk::DescriptorSetAllocateInfo alloc_info = {
|
const vk::DescriptorSetAllocateInfo alloc_info = {
|
||||||
@ -299,7 +295,7 @@ void RendererVulkan::BeginRendering() {
|
|||||||
|
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
vk::DescriptorSet set = device.allocateDescriptorSets(alloc_info)[0];
|
vk::DescriptorSet set = device.allocateDescriptorSets(alloc_info)[0];
|
||||||
device.updateDescriptorSetWithTemplate(set, present_update_template, present_textures.data());
|
device.updateDescriptorSetWithTemplate(set, present_update_template, present_textures[0]);
|
||||||
|
|
||||||
command_buffer.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, present_pipeline_layout,
|
command_buffer.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, present_pipeline_layout,
|
||||||
0, 1, &set, 0, nullptr);
|
0, 1, &set, 0, nullptr);
|
||||||
@ -398,35 +394,55 @@ void RendererVulkan::CompileShaders() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::BuildLayouts() {
|
void RendererVulkan::BuildLayouts() {
|
||||||
const vk::DescriptorSetLayoutBinding present_layout_binding = {
|
const std::array present_layout_bindings = {
|
||||||
.binding = 0,
|
vk::DescriptorSetLayoutBinding{
|
||||||
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
.binding = 0,
|
||||||
.descriptorCount = 3,
|
.descriptorType = vk::DescriptorType::eSampledImage,
|
||||||
.stageFlags = vk::ShaderStageFlagBits::eFragment
|
.descriptorCount = 3,
|
||||||
|
.stageFlags = vk::ShaderStageFlagBits::eFragment
|
||||||
|
},
|
||||||
|
vk::DescriptorSetLayoutBinding{
|
||||||
|
.binding = 1,
|
||||||
|
.descriptorType = vk::DescriptorType::eSampler,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.stageFlags = vk::ShaderStageFlagBits::eFragment
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const vk::DescriptorSetLayoutCreateInfo present_layout_info = {
|
const vk::DescriptorSetLayoutCreateInfo present_layout_info = {
|
||||||
.bindingCount = 1,
|
.bindingCount = static_cast<u32>(present_layout_bindings.size()),
|
||||||
.pBindings = &present_layout_binding
|
.pBindings = present_layout_bindings.data()
|
||||||
};
|
|
||||||
|
|
||||||
const vk::DescriptorUpdateTemplateEntry update_template_entry = {
|
|
||||||
.dstBinding = 0,
|
|
||||||
.dstArrayElement = 0,
|
|
||||||
.descriptorCount = 3,
|
|
||||||
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
|
||||||
.offset = 0,
|
|
||||||
.stride = sizeof(vk::DescriptorImageInfo)
|
|
||||||
};
|
|
||||||
|
|
||||||
const vk::DescriptorUpdateTemplateCreateInfo template_info = {
|
|
||||||
.descriptorUpdateEntryCount = 1,
|
|
||||||
.pDescriptorUpdateEntries = &update_template_entry,
|
|
||||||
.descriptorSetLayout = present_descriptor_layout
|
|
||||||
};
|
};
|
||||||
|
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
present_descriptor_layout = device.createDescriptorSetLayout(present_layout_info);
|
present_descriptor_layout = device.createDescriptorSetLayout(present_layout_info);
|
||||||
|
|
||||||
|
const std::array update_template_entries = {
|
||||||
|
vk::DescriptorUpdateTemplateEntry{
|
||||||
|
.dstBinding = 0,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 3,
|
||||||
|
.descriptorType = vk::DescriptorType::eSampledImage,
|
||||||
|
.offset = 0,
|
||||||
|
.stride = sizeof(vk::DescriptorImageInfo)
|
||||||
|
},
|
||||||
|
vk::DescriptorUpdateTemplateEntry{
|
||||||
|
.dstBinding = 1,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.descriptorType = vk::DescriptorType::eSampler,
|
||||||
|
.offset = 3 * sizeof(vk::DescriptorImageInfo),
|
||||||
|
.stride = 0
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const vk::DescriptorUpdateTemplateCreateInfo template_info = {
|
||||||
|
.descriptorUpdateEntryCount = static_cast<u32>(update_template_entries.size()),
|
||||||
|
.pDescriptorUpdateEntries = update_template_entries.data(),
|
||||||
|
.templateType = vk::DescriptorUpdateTemplateType::eDescriptorSet,
|
||||||
|
.descriptorSetLayout = present_descriptor_layout
|
||||||
|
};
|
||||||
|
|
||||||
present_update_template = device.createDescriptorUpdateTemplate(template_info);
|
present_update_template = device.createDescriptorUpdateTemplate(template_info);
|
||||||
|
|
||||||
const vk::PushConstantRange push_range = {
|
const vk::PushConstantRange push_range = {
|
||||||
@ -639,8 +655,22 @@ void RendererVulkan::DrawSingleScreenRotated(u32 screen_id, float x, float y, fl
|
|||||||
vk::ShaderStageFlagBits::eFragment | vk::ShaderStageFlagBits::eVertex,
|
vk::ShaderStageFlagBits::eFragment | vk::ShaderStageFlagBits::eVertex,
|
||||||
0, sizeof(draw_info), &draw_info);
|
0, sizeof(draw_info), &draw_info);
|
||||||
|
|
||||||
|
const vk::ClearValue clear_value = {
|
||||||
|
.color = clear_color
|
||||||
|
};
|
||||||
|
|
||||||
|
const vk::RenderPassBeginInfo begin_info = {
|
||||||
|
.renderPass = renderpass_cache.GetPresentRenderpass(),
|
||||||
|
.framebuffer = swapchain.GetFramebuffer(),
|
||||||
|
.clearValueCount = 1,
|
||||||
|
.pClearValues = &clear_value,
|
||||||
|
};
|
||||||
|
|
||||||
|
command_buffer.beginRenderPass(begin_info, vk::SubpassContents::eInline);
|
||||||
|
|
||||||
command_buffer.bindVertexBuffers(0, vertex_buffer.GetHandle(), {0});
|
command_buffer.bindVertexBuffers(0, vertex_buffer.GetHandle(), {0});
|
||||||
command_buffer.draw(4, 1, offset / sizeof(ScreenRectVertex), 0);
|
command_buffer.draw(4, 1, offset / sizeof(ScreenRectVertex), 0);
|
||||||
|
command_buffer.endRenderPass();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::DrawSingleScreen(u32 screen_id, float x, float y, float w, float h) {
|
void RendererVulkan::DrawSingleScreen(u32 screen_id, float x, float y, float w, float h) {
|
||||||
@ -675,8 +705,22 @@ void RendererVulkan::DrawSingleScreen(u32 screen_id, float x, float y, float w,
|
|||||||
vk::ShaderStageFlagBits::eFragment | vk::ShaderStageFlagBits::eVertex,
|
vk::ShaderStageFlagBits::eFragment | vk::ShaderStageFlagBits::eVertex,
|
||||||
0, sizeof(draw_info), &draw_info);
|
0, sizeof(draw_info), &draw_info);
|
||||||
|
|
||||||
|
const vk::ClearValue clear_value = {
|
||||||
|
.color = clear_color
|
||||||
|
};
|
||||||
|
|
||||||
|
const vk::RenderPassBeginInfo begin_info = {
|
||||||
|
.renderPass = renderpass_cache.GetPresentRenderpass(),
|
||||||
|
.framebuffer = swapchain.GetFramebuffer(),
|
||||||
|
.clearValueCount = 1,
|
||||||
|
.pClearValues = &clear_value,
|
||||||
|
};
|
||||||
|
|
||||||
|
command_buffer.beginRenderPass(begin_info, vk::SubpassContents::eInline);
|
||||||
|
|
||||||
command_buffer.bindVertexBuffers(0, vertex_buffer.GetHandle(), {0});
|
command_buffer.bindVertexBuffers(0, vertex_buffer.GetHandle(), {0});
|
||||||
command_buffer.draw(4, 1, offset / sizeof(ScreenRectVertex), 0);
|
command_buffer.draw(4, 1, offset / sizeof(ScreenRectVertex), 0);
|
||||||
|
command_buffer.endRenderPass();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::DrawSingleScreenStereoRotated(u32 screen_id_l, u32 screen_id_r,
|
void RendererVulkan::DrawSingleScreenStereoRotated(u32 screen_id_l, u32 screen_id_r,
|
||||||
@ -778,8 +822,11 @@ void RendererVulkan::DrawScreens(const Layout::FramebufferLayout& layout, bool f
|
|||||||
const auto& bottom_screen = layout.bottom_screen;
|
const auto& bottom_screen = layout.bottom_screen;
|
||||||
|
|
||||||
// Set projection matrix
|
// Set projection matrix
|
||||||
draw_info.modelview =
|
//draw_info.modelview =
|
||||||
MakeOrthographicMatrix(static_cast<float>(layout.width), static_cast<float>(layout.height), flipped);
|
// MakeOrthographicMatrix(static_cast<float>(layout.width), static_cast<float>(layout.height), flipped);
|
||||||
|
draw_info.modelview = glm::transpose(glm::ortho(0.f, static_cast<float>(layout.width),
|
||||||
|
static_cast<float>(layout.height), 0.0f,
|
||||||
|
0.f, 1.f));
|
||||||
|
|
||||||
const bool stereo_single_screen =
|
const bool stereo_single_screen =
|
||||||
Settings::values.render_3d == Settings::StereoRenderOption::Anaglyph ||
|
Settings::values.render_3d == Settings::StereoRenderOption::Anaglyph ||
|
||||||
@ -920,6 +967,8 @@ void RendererVulkan::SwapBuffers() {
|
|||||||
swapchain.Create(layout.width, layout.height, false);
|
swapchain.Create(layout.width, layout.height, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
swapchain.AcquireNextImage();
|
||||||
|
|
||||||
const vk::Viewport viewport = {
|
const vk::Viewport viewport = {
|
||||||
.x = 0.0f,
|
.x = 0.0f,
|
||||||
.y = 0.0f,
|
.y = 0.0f,
|
||||||
@ -934,21 +983,14 @@ void RendererVulkan::SwapBuffers() {
|
|||||||
.extent = {layout.width, layout.height}
|
.extent = {layout.width, layout.height}
|
||||||
};
|
};
|
||||||
|
|
||||||
const vk::ClearValue clear_value = {
|
|
||||||
.color = clear_color
|
|
||||||
};
|
|
||||||
|
|
||||||
const vk::RenderPassBeginInfo begin_info = {
|
|
||||||
.renderPass = renderpass_cache.GetPresentRenderpass(),
|
|
||||||
.framebuffer = swapchain.GetFramebuffer(),
|
|
||||||
.clearValueCount = 1,
|
|
||||||
.pClearValues = &clear_value,
|
|
||||||
};
|
|
||||||
|
|
||||||
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
||||||
command_buffer.setViewport(0, viewport);
|
command_buffer.setViewport(0, viewport);
|
||||||
command_buffer.setScissor(0, scissor);
|
command_buffer.setScissor(0, scissor);
|
||||||
command_buffer.beginRenderPass(begin_info, vk::SubpassContents::eInline);
|
|
||||||
|
for (auto& info : screen_infos) {
|
||||||
|
auto alloc = info.display_texture ? info.display_texture : &info.texture.alloc;
|
||||||
|
runtime.Transition(command_buffer, *alloc, vk::ImageLayout::eShaderReadOnlyOptimal, 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
DrawScreens(layout, false);
|
DrawScreens(layout, false);
|
||||||
|
|
||||||
@ -956,8 +998,8 @@ void RendererVulkan::SwapBuffers() {
|
|||||||
vertex_buffer.Flush();
|
vertex_buffer.Flush();
|
||||||
rasterizer->FlushBuffers();
|
rasterizer->FlushBuffers();
|
||||||
|
|
||||||
command_buffer.endRenderPass();
|
|
||||||
scheduler.Submit(false, true, swapchain.GetAvailableSemaphore(), swapchain.GetPresentSemaphore());
|
scheduler.Submit(false, true, swapchain.GetAvailableSemaphore(), swapchain.GetPresentSemaphore());
|
||||||
|
swapchain.Present();
|
||||||
|
|
||||||
// Inform texture runtime about the switch
|
// Inform texture runtime about the switch
|
||||||
runtime.OnSlotSwitch(scheduler.GetCurrentSlotIndex());
|
runtime.OnSlotSwitch(scheduler.GetCurrentSlotIndex());
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
|
#include <glm/glm.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/math_util.h"
|
#include "common/math_util.h"
|
||||||
#include "core/hw/gpu.h"
|
#include "core/hw/gpu.h"
|
||||||
@ -30,7 +31,7 @@ struct TextureInfo {
|
|||||||
|
|
||||||
/// Structure used for storing information about the display target for each 3DS screen
|
/// Structure used for storing information about the display target for each 3DS screen
|
||||||
struct ScreenInfo {
|
struct ScreenInfo {
|
||||||
ImageAlloc display_texture;
|
ImageAlloc* display_texture = nullptr;
|
||||||
Common::Rectangle<float> display_texcoords;
|
Common::Rectangle<float> display_texcoords;
|
||||||
TextureInfo texture;
|
TextureInfo texture;
|
||||||
vk::Sampler sampler;
|
vk::Sampler sampler;
|
||||||
@ -38,7 +39,7 @@ struct ScreenInfo {
|
|||||||
|
|
||||||
// Uniform data used for presenting the 3DS screens
|
// Uniform data used for presenting the 3DS screens
|
||||||
struct PresentUniformData {
|
struct PresentUniformData {
|
||||||
std::array<float, 3 * 2> modelview;
|
glm::mat4 modelview;
|
||||||
Common::Vec4f i_resolution;
|
Common::Vec4f i_resolution;
|
||||||
Common::Vec4f o_resolution;
|
Common::Vec4f o_resolution;
|
||||||
int screen_id_l = 0;
|
int screen_id_l = 0;
|
||||||
|
@ -44,7 +44,8 @@ constexpr vk::ImageUsageFlags GetImageUsage(vk::ImageAspectFlags aspect) {
|
|||||||
if (aspect & vk::ImageAspectFlagBits::eDepth) {
|
if (aspect & vk::ImageAspectFlagBits::eDepth) {
|
||||||
return usage | vk::ImageUsageFlagBits::eDepthStencilAttachment;
|
return usage | vk::ImageUsageFlagBits::eDepthStencilAttachment;
|
||||||
} else {
|
} else {
|
||||||
return usage | vk::ImageUsageFlagBits::eColorAttachment;
|
return usage | vk::ImageUsageFlagBits::eStorage |
|
||||||
|
vk::ImageUsageFlagBits::eColorAttachment;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -59,7 +60,8 @@ constexpr vk::FormatFeatureFlags GetFormatFeatures(vk::ImageAspectFlags aspect)
|
|||||||
if (aspect & vk::ImageAspectFlagBits::eDepth) {
|
if (aspect & vk::ImageAspectFlagBits::eDepth) {
|
||||||
return usage | vk::FormatFeatureFlagBits::eDepthStencilAttachment;
|
return usage | vk::FormatFeatureFlagBits::eDepthStencilAttachment;
|
||||||
} else {
|
} else {
|
||||||
return usage | vk::FormatFeatureFlagBits::eColorAttachment;
|
return usage | vk::FormatFeatureFlagBits::eStorageImage |
|
||||||
|
vk::FormatFeatureFlagBits::eColorAttachment;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -51,11 +51,12 @@ Instance::Instance(Frontend::EmuWindow& window) {
|
|||||||
surface = CreateSurface(instance, window);
|
surface = CreateSurface(instance, window);
|
||||||
|
|
||||||
// TODO: GPU select dialog
|
// TODO: GPU select dialog
|
||||||
physical_device = instance.enumeratePhysicalDevices()[0];
|
auto physical_devices = instance.enumeratePhysicalDevices();
|
||||||
device_limits = physical_device.getProperties().limits;
|
physical_device = physical_devices[0];
|
||||||
|
device_properties = physical_device.getProperties();
|
||||||
|
|
||||||
// Create logical device
|
// Create logical device
|
||||||
CreateDevice(true);
|
CreateDevice(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
Instance::~Instance() {
|
Instance::~Instance() {
|
||||||
@ -154,11 +155,6 @@ bool Instance::CreateDevice(bool validation_enabled) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// List available device extensions
|
|
||||||
for (const auto& extension : extension_list) {
|
|
||||||
LOG_INFO(Render_Vulkan, "Vulkan extension: {}", extension.extensionName);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper lambda for adding extensions
|
// Helper lambda for adding extensions
|
||||||
std::array<const char*, 6> enabled_extensions;
|
std::array<const char*, 6> enabled_extensions;
|
||||||
u32 enabled_extension_count = 0;
|
u32 enabled_extension_count = 0;
|
||||||
@ -223,7 +219,6 @@ bool Instance::CreateDevice(bool validation_enabled) {
|
|||||||
|
|
||||||
static constexpr float queue_priorities[] = {1.0f};
|
static constexpr float queue_priorities[] = {1.0f};
|
||||||
|
|
||||||
const std::array layers = {"VK_LAYER_KHRONOS_validation"};
|
|
||||||
const std::array queue_infos = {
|
const std::array queue_infos = {
|
||||||
vk::DeviceQueueCreateInfo{
|
vk::DeviceQueueCreateInfo{
|
||||||
.queueFamilyIndex = graphics_queue_family_index,
|
.queueFamilyIndex = graphics_queue_family_index,
|
||||||
@ -249,12 +244,6 @@ bool Instance::CreateDevice(bool validation_enabled) {
|
|||||||
device_info.queueCreateInfoCount = 2;
|
device_info.queueCreateInfoCount = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable debug layer on debug builds
|
|
||||||
if (validation_enabled) {
|
|
||||||
device_info.enabledLayerCount = static_cast<u32>(layers.size());
|
|
||||||
device_info.ppEnabledLayerNames = layers.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create logical device
|
// Create logical device
|
||||||
device = physical_device.createDevice(device_info);
|
device = physical_device.createDevice(device_info);
|
||||||
VULKAN_HPP_DEFAULT_DISPATCHER.init(device);
|
VULKAN_HPP_DEFAULT_DISPATCHER.init(device);
|
||||||
|
@ -98,7 +98,7 @@ public:
|
|||||||
|
|
||||||
/// Returns the minimum required alignment for uniforms
|
/// Returns the minimum required alignment for uniforms
|
||||||
vk::DeviceSize UniformMinAlignment() const {
|
vk::DeviceSize UniformMinAlignment() const {
|
||||||
return device_limits.minUniformBufferOffsetAlignment;
|
return device_properties.limits.minUniformBufferOffsetAlignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -116,7 +116,6 @@ private:
|
|||||||
vk::PhysicalDevice physical_device;
|
vk::PhysicalDevice physical_device;
|
||||||
vk::Instance instance;
|
vk::Instance instance;
|
||||||
vk::SurfaceKHR surface;
|
vk::SurfaceKHR surface;
|
||||||
vk::PhysicalDeviceLimits device_limits;
|
|
||||||
vk::PhysicalDeviceProperties device_properties;
|
vk::PhysicalDeviceProperties device_properties;
|
||||||
VmaAllocator allocator;
|
VmaAllocator allocator;
|
||||||
|
|
||||||
|
@ -139,6 +139,7 @@ PipelineCache::PipelineCache(const Instance& instance, TaskScheduler& scheduler,
|
|||||||
descriptor_dirty.fill(true);
|
descriptor_dirty.fill(true);
|
||||||
|
|
||||||
LoadDiskCache();
|
LoadDiskCache();
|
||||||
|
BuildLayout();
|
||||||
trivial_vertex_shader = Compile(GenerateTrivialVertexShader(), vk::ShaderStageFlagBits::eVertex,
|
trivial_vertex_shader = Compile(GenerateTrivialVertexShader(), vk::ShaderStageFlagBits::eVertex,
|
||||||
instance.GetDevice(), ShaderOptimization::Debug);
|
instance.GetDevice(), ShaderOptimization::Debug);
|
||||||
}
|
}
|
||||||
@ -239,25 +240,21 @@ void PipelineCache::UseFragmentShader(const Pica::Regs& regs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PipelineCache::BindTexture(u32 binding, vk::ImageView image_view) {
|
void PipelineCache::BindTexture(u32 binding, vk::ImageView image_view) {
|
||||||
const DescriptorData data = {
|
const vk::DescriptorImageInfo image_info = {
|
||||||
.image_info = vk::DescriptorImageInfo{
|
.imageView = image_view,
|
||||||
.imageView = image_view,
|
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
||||||
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
SetBinding(1, binding, data);
|
SetBinding(1, binding, DescriptorData{image_info});
|
||||||
}
|
}
|
||||||
|
|
||||||
void PipelineCache::BindStorageImage(u32 binding, vk::ImageView image_view) {
|
void PipelineCache::BindStorageImage(u32 binding, vk::ImageView image_view) {
|
||||||
const DescriptorData data = {
|
const vk::DescriptorImageInfo image_info = {
|
||||||
.image_info = vk::DescriptorImageInfo{
|
.imageView = image_view,
|
||||||
.imageView = image_view,
|
.imageLayout = vk::ImageLayout::eGeneral
|
||||||
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
SetBinding(3, binding, data);
|
SetBinding(3, binding, DescriptorData{image_info});
|
||||||
}
|
}
|
||||||
|
|
||||||
void PipelineCache::BindBuffer(u32 binding, vk::Buffer buffer, u32 offset, u32 size) {
|
void PipelineCache::BindBuffer(u32 binding, vk::Buffer buffer, u32 offset, u32 size) {
|
||||||
@ -370,6 +367,7 @@ void PipelineCache::BuildLayout() {
|
|||||||
const vk::DescriptorUpdateTemplateCreateInfo template_info = {
|
const vk::DescriptorUpdateTemplateCreateInfo template_info = {
|
||||||
.descriptorUpdateEntryCount = set.binding_count,
|
.descriptorUpdateEntryCount = set.binding_count,
|
||||||
.pDescriptorUpdateEntries = update_entries.data(),
|
.pDescriptorUpdateEntries = update_entries.data(),
|
||||||
|
.templateType = vk::DescriptorUpdateTemplateType::eDescriptorSet,
|
||||||
.descriptorSetLayout = descriptor_set_layouts[i]
|
.descriptorSetLayout = descriptor_set_layouts[i]
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -398,7 +396,7 @@ vk::Pipeline PipelineCache::BuildPipeline(const PipelineInfo& info) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
shader_stages[i] = vk::PipelineShaderStageCreateInfo{
|
shader_stages[shader_count++] = vk::PipelineShaderStageCreateInfo{
|
||||||
.stage = ToVkShaderStage(i),
|
.stage = ToVkShaderStage(i),
|
||||||
.module = shader,
|
.module = shader,
|
||||||
.pName = "main"
|
.pName = "main"
|
||||||
@ -569,6 +567,8 @@ vk::Pipeline PipelineCache::BuildPipeline(const PipelineInfo& info) {
|
|||||||
return VK_NULL_HANDLE;
|
return VK_NULL_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static_assert(sizeof(vk::DescriptorBufferInfo) == sizeof(VkDescriptorBufferInfo));
|
||||||
|
|
||||||
void PipelineCache::BindDescriptorSets() {
|
void PipelineCache::BindDescriptorSets() {
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
for (u32 i = 0; i < RASTERIZER_SET_COUNT; i++) {
|
for (u32 i = 0; i < RASTERIZER_SET_COUNT; i++) {
|
||||||
@ -580,7 +580,7 @@ void PipelineCache::BindDescriptorSets() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
vk::DescriptorSet set = device.allocateDescriptorSets(alloc_info)[0];
|
vk::DescriptorSet set = device.allocateDescriptorSets(alloc_info)[0];
|
||||||
device.updateDescriptorSetWithTemplate(set, update_templates[i], update_data[i].data());
|
device.updateDescriptorSetWithTemplate(set, update_templates[i], update_data[i][0]);
|
||||||
|
|
||||||
descriptor_sets[i] = set;
|
descriptor_sets[i] = set;
|
||||||
descriptor_dirty[i] = false;
|
descriptor_dirty[i] = false;
|
||||||
@ -600,6 +600,7 @@ void PipelineCache::LoadDiskCache() {
|
|||||||
FileUtil::IOFile cache_file{cache_path, "r"};
|
FileUtil::IOFile cache_file{cache_path, "r"};
|
||||||
if (!cache_file.IsOpen()) {
|
if (!cache_file.IsOpen()) {
|
||||||
LOG_INFO(Render_Vulkan, "No pipeline cache found");
|
LOG_INFO(Render_Vulkan, "No pipeline cache found");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 cache_file_size = cache_file.GetSize();
|
const u32 cache_file_size = cache_file.GetSize();
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#define VULKAN_HPP_NO_CONSTRUCTORS
|
#define VULKAN_HPP_NO_CONSTRUCTORS
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/frontend/emu_window.h"
|
#include "core/frontend/emu_window.h"
|
||||||
#include "video_core/renderer_vulkan/vk_common.h"
|
#include "video_core/renderer_vulkan/vk_common.h"
|
||||||
@ -41,13 +42,16 @@ inline vk::SurfaceKHR CreateSurface(const vk::Instance& instance, const Frontend
|
|||||||
|
|
||||||
if (instance.createWin32SurfaceKHR(&win32_ci, nullptr, &surface) != vk::Result::eSuccess) {
|
if (instance.createWin32SurfaceKHR(&win32_ci, nullptr, &surface) != vk::Result::eSuccess) {
|
||||||
LOG_CRITICAL(Render_Vulkan, "Failed to initialize Win32 surface");
|
LOG_CRITICAL(Render_Vulkan, "Failed to initialize Win32 surface");
|
||||||
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#elif VK_USE_PLATFORM_XLIB_KHR
|
#elif VK_USE_PLATFORM_XLIB_KHR
|
||||||
if (window_info.type == Frontend::WindowSystemType::X11) {
|
if (window_info.type == Frontend::WindowSystemType::X11) {
|
||||||
const vk::XlibSurfaceCreateInfoKHR xlib_ci{{},
|
const vk::XlibSurfaceCreateInfoKHR xlib_ci = {
|
||||||
static_cast<Display*>(window_info.display_connection),
|
.dpy = static_cast<Display*>(window_info.display_connection),
|
||||||
reinterpret_cast<Window>(window_info.render_surface)};
|
.window = reinterpret_cast<Window>(window_info.render_surface)
|
||||||
|
};
|
||||||
|
|
||||||
if (instance.createXlibSurfaceKHR(&xlib_ci, nullptr, &surface) != vk::Result::eSuccess) {
|
if (instance.createXlibSurfaceKHR(&xlib_ci, nullptr, &surface) != vk::Result::eSuccess) {
|
||||||
LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface");
|
LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface");
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
|
@ -124,7 +124,7 @@ RasterizerVulkan::RasterizerVulkan(Frontend::EmuWindow& emu_window, const Instan
|
|||||||
default_texture = runtime.Allocate(1, 1, VideoCore::PixelFormat::RGBA8,
|
default_texture = runtime.Allocate(1, 1, VideoCore::PixelFormat::RGBA8,
|
||||||
VideoCore::TextureType::Texture2D);
|
VideoCore::TextureType::Texture2D);
|
||||||
runtime.Transition(scheduler.GetUploadCommandBuffer(), default_texture,
|
runtime.Transition(scheduler.GetUploadCommandBuffer(), default_texture,
|
||||||
vk::ImageLayout::eShaderReadOnlyOptimal, 0, 1);
|
vk::ImageLayout::eGeneral, 0, 1);
|
||||||
|
|
||||||
uniform_block_data.lighting_lut_dirty.fill(true);
|
uniform_block_data.lighting_lut_dirty.fill(true);
|
||||||
|
|
||||||
@ -149,6 +149,14 @@ RasterizerVulkan::RasterizerVulkan(Frontend::EmuWindow& emu_window, const Instan
|
|||||||
|
|
||||||
// Since we don't have access to VK_EXT_descriptor_indexing we need to intiallize
|
// Since we don't have access to VK_EXT_descriptor_indexing we need to intiallize
|
||||||
// all descriptor sets even the ones we don't use. Use default_texture for this
|
// all descriptor sets even the ones we don't use. Use default_texture for this
|
||||||
|
const u32 vs_uniform_size = sizeof(Pica::Shader::VSUniformData);
|
||||||
|
const u32 fs_uniform_size = sizeof(Pica::Shader::UniformData);
|
||||||
|
pipeline_cache.BindBuffer(0, uniform_buffer.GetHandle(), 0, vs_uniform_size);
|
||||||
|
pipeline_cache.BindBuffer(1, uniform_buffer.GetHandle(), vs_uniform_size, fs_uniform_size);
|
||||||
|
pipeline_cache.BindTexelBuffer(2, texture_lf_buffer.GetView());
|
||||||
|
pipeline_cache.BindTexelBuffer(3, texture_buffer.GetView(0));
|
||||||
|
pipeline_cache.BindTexelBuffer(4, texture_buffer.GetView(1));
|
||||||
|
|
||||||
for (u32 i = 0; i < 4; i++) {
|
for (u32 i = 0; i < 4; i++) {
|
||||||
pipeline_cache.BindTexture(i, default_texture.image_view);
|
pipeline_cache.BindTexture(i, default_texture.image_view);
|
||||||
pipeline_cache.BindSampler(i, default_sampler);
|
pipeline_cache.BindSampler(i, default_sampler);
|
||||||
@ -584,48 +592,6 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
|||||||
surfaces_rect.bottom, surfaces_rect.top))
|
surfaces_rect.bottom, surfaces_rect.top))
|
||||||
};
|
};
|
||||||
|
|
||||||
auto valid_surface = color_surface ? color_surface : depth_surface;
|
|
||||||
const FramebufferInfo framebuffer_info = {
|
|
||||||
.color = color_surface ? color_surface->alloc.image_view : VK_NULL_HANDLE,
|
|
||||||
.depth = depth_surface ? depth_surface->alloc.image_view : VK_NULL_HANDLE,
|
|
||||||
.renderpass = renderpass_cache.GetRenderpass(pipeline_info.color_attachment,
|
|
||||||
pipeline_info.depth_attachment, false),
|
|
||||||
.width = valid_surface->GetScaledWidth(),
|
|
||||||
.height = valid_surface->GetScaledHeight()
|
|
||||||
};
|
|
||||||
|
|
||||||
auto [it, new_framebuffer] = framebuffers.try_emplace(framebuffer_info, vk::Framebuffer{});
|
|
||||||
if (new_framebuffer) {
|
|
||||||
it->second = CreateFramebuffer(framebuffer_info);
|
|
||||||
}
|
|
||||||
|
|
||||||
ImageAlloc color_alloc =
|
|
||||||
color_surface ? color_surface->alloc : ImageAlloc{};
|
|
||||||
ImageAlloc depth_alloc =
|
|
||||||
depth_surface ? depth_surface->alloc : ImageAlloc{};
|
|
||||||
|
|
||||||
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
|
||||||
runtime.Transition(command_buffer, color_alloc,
|
|
||||||
vk::ImageLayout::eColorAttachmentOptimal, 0, color_alloc.levels);
|
|
||||||
runtime.Transition(command_buffer, depth_alloc,
|
|
||||||
vk::ImageLayout::eDepthStencilReadOnlyOptimal, 0, depth_alloc.levels);
|
|
||||||
|
|
||||||
const vk::RenderPassBeginInfo renderpass_begin = {
|
|
||||||
.renderPass =
|
|
||||||
renderpass_cache.GetRenderpass(pipeline_info.color_attachment,
|
|
||||||
pipeline_info.depth_attachment, false),
|
|
||||||
.framebuffer = it->second,
|
|
||||||
.renderArea = vk::Rect2D{
|
|
||||||
.offset = {static_cast<s32>(draw_rect.left), static_cast<s32>(draw_rect.bottom)},
|
|
||||||
.extent = {draw_rect.GetWidth(), draw_rect.GetHeight()}
|
|
||||||
},
|
|
||||||
|
|
||||||
.clearValueCount = 0,
|
|
||||||
.pClearValues = nullptr
|
|
||||||
};
|
|
||||||
|
|
||||||
renderpass_cache.EnterRenderpass(renderpass_begin);
|
|
||||||
|
|
||||||
// Sync the viewport
|
// Sync the viewport
|
||||||
pipeline_cache.SetViewport(surfaces_rect.left + viewport_rect_unscaled.left * res_scale,
|
pipeline_cache.SetViewport(surfaces_rect.left + viewport_rect_unscaled.left * res_scale,
|
||||||
surfaces_rect.bottom + viewport_rect_unscaled.bottom * res_scale,
|
surfaces_rect.bottom + viewport_rect_unscaled.bottom * res_scale,
|
||||||
@ -659,13 +625,6 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
|||||||
uniform_block_data.dirty = true;
|
uniform_block_data.dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*bool need_duplicate_texture = false;
|
|
||||||
auto CheckBarrier = [&need_duplicate_texture, &color_surface](vk::ImageView handle) {
|
|
||||||
if (color_surface && color_surface->alloc.image_view == handle) {
|
|
||||||
need_duplicate_texture = true;
|
|
||||||
}
|
|
||||||
};*/
|
|
||||||
|
|
||||||
auto CheckBarrier = [this, &color_surface = color_surface](vk::ImageView image_view, u32 texture_index) {
|
auto CheckBarrier = [this, &color_surface = color_surface](vk::ImageView image_view, u32 texture_index) {
|
||||||
if (color_surface && color_surface->alloc.image_view == image_view) {
|
if (color_surface && color_surface->alloc.image_view == image_view) {
|
||||||
//auto temp_tex = backend->CreateTexture(texture->GetInfo());
|
//auto temp_tex = backend->CreateTexture(texture->GetInfo());
|
||||||
@ -676,6 +635,8 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
||||||
|
|
||||||
// Sync and bind the texture surfaces
|
// Sync and bind the texture surfaces
|
||||||
const auto pica_textures = regs.texturing.GetTextures();
|
const auto pica_textures = regs.texturing.GetTextures();
|
||||||
for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
|
for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
|
||||||
@ -847,6 +808,47 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
|||||||
// Enable scissor test to prevent drawing outside of the framebuffer region
|
// Enable scissor test to prevent drawing outside of the framebuffer region
|
||||||
pipeline_cache.SetScissor(draw_rect.left, draw_rect.bottom, draw_rect.GetWidth(), draw_rect.GetHeight());
|
pipeline_cache.SetScissor(draw_rect.left, draw_rect.bottom, draw_rect.GetWidth(), draw_rect.GetHeight());
|
||||||
|
|
||||||
|
auto valid_surface = color_surface ? color_surface : depth_surface;
|
||||||
|
const FramebufferInfo framebuffer_info = {
|
||||||
|
.color = color_surface ? color_surface->alloc.image_view : VK_NULL_HANDLE,
|
||||||
|
.depth = depth_surface ? depth_surface->alloc.image_view : VK_NULL_HANDLE,
|
||||||
|
.renderpass = renderpass_cache.GetRenderpass(pipeline_info.color_attachment,
|
||||||
|
pipeline_info.depth_attachment, false),
|
||||||
|
.width = valid_surface->GetScaledWidth(),
|
||||||
|
.height = valid_surface->GetScaledHeight()
|
||||||
|
};
|
||||||
|
|
||||||
|
auto [it, new_framebuffer] = framebuffers.try_emplace(framebuffer_info, vk::Framebuffer{});
|
||||||
|
if (new_framebuffer) {
|
||||||
|
it->second = CreateFramebuffer(framebuffer_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (color_surface) {
|
||||||
|
runtime.Transition(command_buffer, color_surface->alloc,
|
||||||
|
vk::ImageLayout::eColorAttachmentOptimal,
|
||||||
|
0, color_surface->alloc.levels);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (depth_surface) {
|
||||||
|
runtime.Transition(command_buffer, depth_surface->alloc,
|
||||||
|
vk::ImageLayout::eDepthStencilAttachmentOptimal,
|
||||||
|
0, depth_surface->alloc.levels);
|
||||||
|
}
|
||||||
|
|
||||||
|
const vk::RenderPassBeginInfo renderpass_begin = {
|
||||||
|
.renderPass =
|
||||||
|
renderpass_cache.GetRenderpass(pipeline_info.color_attachment,
|
||||||
|
pipeline_info.depth_attachment, false),
|
||||||
|
.framebuffer = it->second,
|
||||||
|
.renderArea = vk::Rect2D{
|
||||||
|
.offset = {static_cast<s32>(draw_rect.left), static_cast<s32>(draw_rect.bottom)},
|
||||||
|
.extent = {draw_rect.GetWidth(), draw_rect.GetHeight()}
|
||||||
|
},
|
||||||
|
|
||||||
|
.clearValueCount = 0,
|
||||||
|
.pClearValues = nullptr
|
||||||
|
};
|
||||||
|
|
||||||
// Draw the vertex batch
|
// Draw the vertex batch
|
||||||
bool succeeded = true;
|
bool succeeded = true;
|
||||||
if (accelerate) {
|
if (accelerate) {
|
||||||
@ -854,6 +856,7 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
|||||||
} else {
|
} else {
|
||||||
pipeline_cache.UseTrivialVertexShader();
|
pipeline_cache.UseTrivialVertexShader();
|
||||||
pipeline_cache.UseTrivialGeometryShader();
|
pipeline_cache.UseTrivialGeometryShader();
|
||||||
|
pipeline_cache.BindPipeline(pipeline_info);
|
||||||
|
|
||||||
// Bind the vertex buffer at the current mapped offset. This effectively means
|
// Bind the vertex buffer at the current mapped offset. This effectively means
|
||||||
// that when base_vertex is zero the GPU will start drawing from the current mapped
|
// that when base_vertex is zero the GPU will start drawing from the current mapped
|
||||||
@ -872,8 +875,9 @@ bool RasterizerVulkan::Draw(bool accelerate, bool is_indexed) {
|
|||||||
std::memcpy(array_ptr, vertex_batch.data() + base_vertex, vertex_size);
|
std::memcpy(array_ptr, vertex_batch.data() + base_vertex, vertex_size);
|
||||||
vertex_buffer.Commit(vertex_size);
|
vertex_buffer.Commit(vertex_size);
|
||||||
|
|
||||||
pipeline_cache.BindPipeline(pipeline_info);
|
renderpass_cache.EnterRenderpass(renderpass_begin);
|
||||||
command_buffer.draw(vertices, 1, base_vertex, 0);
|
command_buffer.draw(vertices, 1, base_vertex, 0);
|
||||||
|
renderpass_cache.ExitRenderpass();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1582,7 +1586,7 @@ bool RasterizerVulkan::AccelerateDisplay(const GPU::Regs::FramebufferConfig& con
|
|||||||
(float)src_rect.bottom / (float)scaled_height, (float)src_rect.left / (float)scaled_width,
|
(float)src_rect.bottom / (float)scaled_height, (float)src_rect.left / (float)scaled_width,
|
||||||
(float)src_rect.top / (float)scaled_height, (float)src_rect.right / (float)scaled_width);
|
(float)src_rect.top / (float)scaled_height, (float)src_rect.right / (float)scaled_width);
|
||||||
|
|
||||||
screen_info.display_texture = src_surface->alloc;
|
screen_info.display_texture = &src_surface->alloc;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -109,9 +109,9 @@ void RenderpassCache::CreatePresentRenderpass(vk::Format format) {
|
|||||||
vk::RenderPass RenderpassCache::GetRenderpass(VideoCore::PixelFormat color, VideoCore::PixelFormat depth,
|
vk::RenderPass RenderpassCache::GetRenderpass(VideoCore::PixelFormat color, VideoCore::PixelFormat depth,
|
||||||
bool is_clear) const {
|
bool is_clear) const {
|
||||||
const u32 color_index =
|
const u32 color_index =
|
||||||
color == VideoCore::PixelFormat::Invalid ? 0 : static_cast<u32>(color);
|
color == VideoCore::PixelFormat::Invalid ? 0 : (static_cast<u32>(color) + 1);
|
||||||
const u32 depth_index =
|
const u32 depth_index =
|
||||||
depth == VideoCore::PixelFormat::Invalid ? 0 : (static_cast<u32>(depth) - 13);
|
depth == VideoCore::PixelFormat::Invalid ? 0 : (static_cast<u32>(depth) - 14);
|
||||||
|
|
||||||
ASSERT(color_index <= MAX_COLOR_FORMATS && depth_index <= MAX_DEPTH_FORMATS);
|
ASSERT(color_index <= MAX_COLOR_FORMATS && depth_index <= MAX_DEPTH_FORMATS);
|
||||||
return cached_renderpasses[color_index][depth_index][is_clear];
|
return cached_renderpasses[color_index][depth_index][is_clear];
|
||||||
|
@ -7,8 +7,8 @@
|
|||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "video_core/renderer_vulkan/vk_shader.h"
|
#include "video_core/renderer_vulkan/vk_shader.h"
|
||||||
#include <glslang/Public/ShaderLang.h>
|
#include <glslang/Public/ShaderLang.h>
|
||||||
#include <glslang/SPIRV/GlslangToSpv.h>
|
|
||||||
#include <glslang/Include/ResourceLimits.h>
|
#include <glslang/Include/ResourceLimits.h>
|
||||||
|
#include <SPIRV/GlslangToSpv.h>
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ struct LightSrc {
|
|||||||
float dist_atten_scale;
|
float dist_atten_scale;
|
||||||
};
|
};
|
||||||
|
|
||||||
layout (std140) uniform shader_data {
|
layout (set = 0, binding = 1, std140) uniform shader_data {
|
||||||
int framebuffer_scale;
|
int framebuffer_scale;
|
||||||
int alphatest_ref;
|
int alphatest_ref;
|
||||||
float depth_scale;
|
float depth_scale;
|
||||||
|
@ -154,25 +154,27 @@ std::tuple<u8*, u32, bool> StreamBuffer::Map(u32 size, u32 alignment) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void StreamBuffer::Commit(u32 size) {
|
void StreamBuffer::Commit(u32 size) {
|
||||||
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
if (size > 0) {
|
||||||
|
vk::CommandBuffer command_buffer = scheduler.GetRenderCommandBuffer();
|
||||||
|
|
||||||
auto [access_mask, stage_mask] = ToVkAccessStageFlags(usage);
|
auto [access_mask, stage_mask] = ToVkAccessStageFlags(usage);
|
||||||
const vk::BufferMemoryBarrier buffer_barrier = {
|
const vk::BufferMemoryBarrier buffer_barrier = {
|
||||||
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
|
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
|
||||||
.dstAccessMask = access_mask,
|
.dstAccessMask = access_mask,
|
||||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
.buffer = buffer,
|
.buffer = buffer,
|
||||||
.offset = buffer_offset,
|
.offset = buffer_offset,
|
||||||
.size = size
|
.size = size
|
||||||
};
|
};
|
||||||
|
|
||||||
command_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, stage_mask,
|
command_buffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, stage_mask,
|
||||||
vk::DependencyFlagBits::eByRegion, {}, buffer_barrier, {});
|
vk::DependencyFlagBits::eByRegion, {}, buffer_barrier, {});
|
||||||
|
|
||||||
|
|
||||||
buffer_offset += size;
|
buffer_offset += size;
|
||||||
available_size -= size;
|
available_size -= size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void StreamBuffer::Flush() {
|
void StreamBuffer::Flush() {
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <span>
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "video_core/renderer_vulkan/vk_common.h"
|
#include "video_core/renderer_vulkan/vk_common.h"
|
||||||
|
|
||||||
|
@ -84,7 +84,9 @@ void Swapchain::Create(u32 width, u32 height, bool vsync_enabled) {
|
|||||||
device.destroyImageView(image.image_view);
|
device.destroyImageView(image.image_view);
|
||||||
device.destroyFramebuffer(image.framebuffer);
|
device.destroyFramebuffer(image.framebuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
swapchain_images.clear();
|
swapchain_images.clear();
|
||||||
|
swapchain_images.resize(images.size());
|
||||||
|
|
||||||
std::ranges::transform(images, swapchain_images.begin(), [&](vk::Image image) -> Image {
|
std::ranges::transform(images, swapchain_images.begin(), [&](vk::Image image) -> Image {
|
||||||
const vk::ImageViewCreateInfo view_info = {
|
const vk::ImageViewCreateInfo view_info = {
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
TaskScheduler::TaskScheduler(const Instance& instance) : instance{instance} {
|
TaskScheduler::TaskScheduler(const Instance& instance) : instance{instance} {
|
||||||
|
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
const vk::CommandPoolCreateInfo command_pool_info = {
|
const vk::CommandPoolCreateInfo command_pool_info = {
|
||||||
.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
|
.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
|
||||||
|
@ -191,7 +191,8 @@ ImageAlloc TextureRuntime::Allocate(u32 width, u32 height, VideoCore::PixelForma
|
|||||||
.image = image,
|
.image = image,
|
||||||
.image_view = image_view,
|
.image_view = image_view,
|
||||||
.allocation = allocation,
|
.allocation = allocation,
|
||||||
.levels = levels
|
.aspect = aspect,
|
||||||
|
.levels = levels,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,7 +201,11 @@ void TextureRuntime::FormatConvert(VideoCore::PixelFormat format, bool upload,
|
|||||||
const VideoCore::SurfaceType type = VideoCore::GetFormatType(format);
|
const VideoCore::SurfaceType type = VideoCore::GetFormatType(format);
|
||||||
const vk::FormatFeatureFlagBits feature = ToVkFormatFeatures(type);
|
const vk::FormatFeatureFlagBits feature = ToVkFormatFeatures(type);
|
||||||
if (!instance.IsFormatSupported(ToVkFormat(format), feature)) {
|
if (!instance.IsFormatSupported(ToVkFormat(format), feature)) {
|
||||||
LOG_CRITICAL(Render_Vulkan, "Unimplemented format converion!");
|
if (format == VideoCore::PixelFormat::RGB8 && upload) {
|
||||||
|
return Pica::Texture::ConvertBGRToRGBA(source, dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_CRITICAL(Render_Vulkan, "Unimplemented converion for format {}!", format);
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -431,6 +436,11 @@ void TextureRuntime::Transition(vk::CommandBuffer command_buffer, ImageAlloc& al
|
|||||||
info.access = vk::AccessFlagBits::eTransferWrite;
|
info.access = vk::AccessFlagBits::eTransferWrite;
|
||||||
info.stage = vk::PipelineStageFlagBits::eTransfer;
|
info.stage = vk::PipelineStageFlagBits::eTransfer;
|
||||||
break;
|
break;
|
||||||
|
case vk::ImageLayout::eGeneral:
|
||||||
|
info.access = vk::AccessFlagBits::eInputAttachmentRead;
|
||||||
|
info.stage = vk::PipelineStageFlagBits::eColorAttachmentOutput |
|
||||||
|
vk::PipelineStageFlagBits::eFragmentShader;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
LOG_CRITICAL(Render_Vulkan, "Unhandled vulkan image layout {}\n", layout);
|
LOG_CRITICAL(Render_Vulkan, "Unhandled vulkan image layout {}\n", layout);
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -467,7 +477,9 @@ void TextureRuntime::Transition(vk::CommandBuffer command_buffer, ImageAlloc& al
|
|||||||
Surface::Surface(VideoCore::SurfaceParams& params, TextureRuntime& runtime)
|
Surface::Surface(VideoCore::SurfaceParams& params, TextureRuntime& runtime)
|
||||||
: VideoCore::SurfaceBase<Surface>{params}, runtime{runtime}, instance{runtime.GetInstance()},
|
: VideoCore::SurfaceBase<Surface>{params}, runtime{runtime}, instance{runtime.GetInstance()},
|
||||||
scheduler{runtime.GetScheduler()} {
|
scheduler{runtime.GetScheduler()} {
|
||||||
alloc = runtime.Allocate(GetScaledWidth(), GetScaledHeight(), params.pixel_format, texture_type);
|
if (params.pixel_format != VideoCore::PixelFormat::Invalid) {
|
||||||
|
alloc = runtime.Allocate(GetScaledWidth(), GetScaledHeight(), params.pixel_format, texture_type);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Surface::~Surface() {
|
Surface::~Surface() {
|
||||||
|
@ -44,6 +44,9 @@ public:
|
|||||||
RenderpassCache& renderpass_cache);
|
RenderpassCache& renderpass_cache);
|
||||||
~TextureRuntime();
|
~TextureRuntime();
|
||||||
|
|
||||||
|
TextureRuntime(const TextureRuntime&) = delete;
|
||||||
|
TextureRuntime& operator=(const TextureRuntime&) = delete;
|
||||||
|
|
||||||
/// Maps an internal staging buffer of the provided size of pixel uploads/downloads
|
/// Maps an internal staging buffer of the provided size of pixel uploads/downloads
|
||||||
StagingData FindStaging(u32 size, bool upload);
|
StagingData FindStaging(u32 size, bool upload);
|
||||||
|
|
||||||
@ -128,8 +131,8 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct Traits {
|
struct Traits {
|
||||||
using Runtime = TextureRuntime;
|
using RuntimeType = TextureRuntime;
|
||||||
using Surface = Surface;
|
using SurfaceType = Surface;
|
||||||
};
|
};
|
||||||
|
|
||||||
using RasterizerCache = VideoCore::RasterizerCache<Traits>;
|
using RasterizerCache = VideoCore::RasterizerCache<Traits>;
|
||||||
|
@ -226,7 +226,7 @@ void ConvertBGRToRGB(std::span<const std::byte> source, std::span<std::byte> des
|
|||||||
for (std::size_t i = 0; i < source.size(); i += 3) {
|
for (std::size_t i = 0; i < source.size(); i += 3) {
|
||||||
u32 bgr{};
|
u32 bgr{};
|
||||||
std::memcpy(&bgr, source.data() + i, 3);
|
std::memcpy(&bgr, source.data() + i, 3);
|
||||||
const u32 rgb = std::byteswap(bgr << 8);
|
const u32 rgb = Common::swap32(bgr << 8);
|
||||||
std::memcpy(dest.data(), &rgb, 3);
|
std::memcpy(dest.data(), &rgb, 3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -245,7 +245,7 @@ void ConvertBGRToRGBA(std::span<const std::byte> source, std::span<std::byte> de
|
|||||||
void ConvertABGRToRGBA(std::span<const std::byte> source, std::span<std::byte> dest) {
|
void ConvertABGRToRGBA(std::span<const std::byte> source, std::span<std::byte> dest) {
|
||||||
for (u32 i = 0; i < source.size(); i += 4) {
|
for (u32 i = 0; i < source.size(); i += 4) {
|
||||||
const u32 abgr = *reinterpret_cast<const u32*>(source.data() + i);
|
const u32 abgr = *reinterpret_cast<const u32*>(source.data() + i);
|
||||||
const u32 rgba = std::byteswap(abgr);
|
const u32 rgba = Common::swap32(abgr);
|
||||||
std::memcpy(dest.data() + i, &rgba, 4);
|
std::memcpy(dest.data() + i, &rgba, 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <system_error>
|
#include <system_error>
|
||||||
#include <jwt/jwt.hpp>
|
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include <jwt/jwt.hpp>
|
||||||
#include "common/web_result.h"
|
#include "common/web_result.h"
|
||||||
#include "web_service/verify_user_jwt.h"
|
#include "web_service/verify_user_jwt.h"
|
||||||
#include "web_service/web_backend.h"
|
#include "web_service/web_backend.h"
|
||||||
|
Reference in New Issue
Block a user