renderer_vulkan: Implement vulkan instance creation and state manager
* This commits continues to improve the backend code and starts work on replacing the old OpenGLState class with a better one suited for Vulkan. Instance creation is also finally done. The only thing left before working with the rasterizer is the command buffer manager.
This commit is contained in:
@@ -83,8 +83,8 @@ add_library(video_core STATIC
|
||||
renderer_vulkan/vk_rasterizer.h
|
||||
renderer_vulkan/vk_pipeline.cpp
|
||||
renderer_vulkan/vk_pipeline.h
|
||||
renderer_vulkan/vk_pipeline_manager.h
|
||||
renderer_vulkan/vk_pipeline_manager.cpp
|
||||
renderer_vulkan/vk_pipeline_manager.h
|
||||
renderer_vulkan/vk_shader_state.h
|
||||
renderer_vulkan/vk_state.cpp
|
||||
renderer_vulkan/vk_state.h
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Copyright 2022 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
#include "video_core/renderer_opengl/renderer_opengl.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace OpenGL {
|
||||
namespace Vulkan {
|
||||
|
||||
// If the size of this is too small, it ends up creating a soft cap on FPS as the renderer will have
|
||||
// to wait on available presentation frames. There doesn't seem to be much of a downside to a larger
|
||||
@@ -364,7 +364,7 @@ static std::array<GLfloat, 3 * 2> MakeOrthographicMatrix(const float width, cons
|
||||
return matrix;
|
||||
}
|
||||
|
||||
RendererOpenGL::RendererOpenGL(Frontend::EmuWindow& window)
|
||||
RendererVulkan::RendererVulkan(Frontend::EmuWindow& window)
|
||||
: RendererBase{window}, frame_dumper(Core::System::GetInstance().VideoDumper(), window) {
|
||||
|
||||
window.mailbox = std::make_unique<OGLTextureMailbox>();
|
||||
|
@@ -9,9 +9,8 @@
|
||||
#include "common/math_util.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_opengl/frame_dumper_opengl.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
#include "video_core/renderer_opengl/gl_state.h"
|
||||
#include "video_core/renderer_vulkan/vk_resource_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_state.h"
|
||||
|
||||
namespace Layout {
|
||||
struct FramebufferLayout;
|
||||
@@ -20,46 +19,28 @@ struct FramebufferLayout;
|
||||
namespace Frontend {
|
||||
|
||||
struct Frame {
|
||||
u32 width{}; /// Width of the frame (to detect resize)
|
||||
u32 height{}; /// Height of the frame
|
||||
bool color_reloaded = false; /// Texture attachment was recreated (ie: resized)
|
||||
OpenGL::OGLRenderbuffer color{}; /// Buffer shared between the render/present FBO
|
||||
OpenGL::OGLFramebuffer render{}; /// FBO created on the render thread
|
||||
OpenGL::OGLFramebuffer present{}; /// FBO created on the present thread
|
||||
GLsync render_fence{}; /// Fence created on the render thread
|
||||
GLsync present_fence{}; /// Fence created on the presentation thread
|
||||
u32 width = 0, height = 0;
|
||||
bool color_reloaded = false;
|
||||
Vulkan::VKTexture color;
|
||||
Vulkan::VKFramebuffer render, present;
|
||||
vk::UniqueFence render_fence, present_fence;
|
||||
};
|
||||
} // namespace Frontend
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
/// Structure used for storing information about the textures for each 3DS screen
|
||||
struct TextureInfo {
|
||||
OGLTexture resource;
|
||||
GLsizei width;
|
||||
GLsizei height;
|
||||
GPU::Regs::PixelFormat format;
|
||||
GLenum gl_format;
|
||||
GLenum gl_type;
|
||||
};
|
||||
|
||||
/// Structure used for storing information about the display target for each 3DS screen
|
||||
struct ScreenInfo {
|
||||
GLuint display_texture;
|
||||
u32 display_texture;
|
||||
Common::Rectangle<float> display_texcoords;
|
||||
TextureInfo texture;
|
||||
VKTexture texture;
|
||||
GPU::Regs::PixelFormat format;
|
||||
};
|
||||
|
||||
struct PresentationTexture {
|
||||
u32 width = 0;
|
||||
u32 height = 0;
|
||||
OGLTexture texture;
|
||||
};
|
||||
|
||||
class RendererOpenGL : public RendererBase {
|
||||
class RendererVulkan : public RendererBase {
|
||||
public:
|
||||
explicit RendererOpenGL(Frontend::EmuWindow& window);
|
||||
~RendererOpenGL() override;
|
||||
explicit RendererVulkan(Frontend::EmuWindow& window);
|
||||
~RendererVulkan() override;
|
||||
|
||||
/// Initialize the renderer
|
||||
VideoCore::ResultStatus Init() override;
|
||||
@@ -74,12 +55,6 @@ public:
|
||||
/// context
|
||||
void TryPresent(int timeout_ms) override;
|
||||
|
||||
/// Prepares for video dumping (e.g. create necessary buffers, etc)
|
||||
void PrepareVideoDumping() override;
|
||||
|
||||
/// Cleans up after video dumping is ended
|
||||
void CleanupVideoDumping() override;
|
||||
|
||||
private:
|
||||
void InitOpenGLObjects();
|
||||
void ReloadSampler();
|
||||
@@ -106,33 +81,16 @@ private:
|
||||
// Fills active OpenGL texture with the given RGB color.
|
||||
void LoadColorToActiveGLTexture(u8 color_r, u8 color_g, u8 color_b, const TextureInfo& texture);
|
||||
|
||||
OpenGLState state;
|
||||
VulkanState state;
|
||||
|
||||
// OpenGL object IDs
|
||||
OGLVertexArray vertex_array;
|
||||
OGLBuffer vertex_buffer;
|
||||
VKBuffer vertex_buffer;
|
||||
OGLProgram shader;
|
||||
OGLFramebuffer screenshot_framebuffer;
|
||||
VKFramebuffer screenshot_framebuffer;
|
||||
OGLSampler filter_sampler;
|
||||
|
||||
/// Display information for top and bottom screens respectively
|
||||
std::array<ScreenInfo, 3> screen_infos;
|
||||
|
||||
// Shader uniform location indices
|
||||
GLuint uniform_modelview_matrix;
|
||||
GLuint uniform_color_texture;
|
||||
GLuint uniform_color_texture_r;
|
||||
|
||||
// Shader uniform for Dolphin compatibility
|
||||
GLuint uniform_i_resolution;
|
||||
GLuint uniform_o_resolution;
|
||||
GLuint uniform_layer;
|
||||
|
||||
// Shader attribute input indices
|
||||
GLuint attrib_position;
|
||||
GLuint attrib_tex_coord;
|
||||
|
||||
FrameDumperOpenGL frame_dumper;
|
||||
};
|
||||
|
||||
} // namespace OpenGL
|
||||
|
@@ -1,304 +1,178 @@
|
||||
#include "vk_context.h"
|
||||
#include "vk_buffer.h"
|
||||
#include "vk_swapchain.h"
|
||||
#include "vk_texture.h"
|
||||
// Copyright 2022 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <fstream>
|
||||
#include <array>
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
VKInstance::~VKInstance() {
|
||||
|
||||
PipelineLayoutInfo::PipelineLayoutInfo(const std::shared_ptr<VkContext>& context) :
|
||||
context(context)
|
||||
{
|
||||
}
|
||||
|
||||
PipelineLayoutInfo::~PipelineLayoutInfo()
|
||||
{
|
||||
for (int i = 0; i < shader_stages.size(); i++)
|
||||
context->device->destroyShaderModule(shader_stages[i].module);
|
||||
bool VKInstance::Create(vk::Instance instance, vk::PhysicalDevice physical_device,
|
||||
vk::SurfaceKHR surface, bool enable_validation_layer) {
|
||||
this->instance = instance;
|
||||
this->physical_device = physical_device;
|
||||
|
||||
// Determine required extensions and features
|
||||
if (!FindExtensions() || !FindFeatures())
|
||||
return false;
|
||||
|
||||
// Create logical device
|
||||
return CreateDevice(surface, enable_validation_layer);
|
||||
}
|
||||
|
||||
void PipelineLayoutInfo::add_shader_module(std::string_view filepath, vk::ShaderStageFlagBits stage)
|
||||
{
|
||||
std::ifstream shaderfile(filepath.data(), std::ios::ate | std::ios::binary);
|
||||
|
||||
if (!shaderfile.is_open())
|
||||
throw std::runtime_error("[UTIL] Failed to open shader file!");
|
||||
|
||||
size_t size = shaderfile.tellg();
|
||||
std::vector<char> buffer(size);
|
||||
|
||||
shaderfile.seekg(0);
|
||||
shaderfile.read(buffer.data(), size);
|
||||
shaderfile.close();
|
||||
|
||||
auto module = context->device->createShaderModule({ {}, buffer.size(), reinterpret_cast<const uint32_t*>(buffer.data()) });
|
||||
shader_stages.emplace_back(vk::PipelineShaderStageCreateFlags(), stage, module, "main");
|
||||
}
|
||||
|
||||
void PipelineLayoutInfo::add_resource(Resource* resource, vk::DescriptorType type, vk::ShaderStageFlags stages, int binding, int group)
|
||||
{
|
||||
resource_types[group].first[binding] = resource;
|
||||
resource_types[group].second.emplace_back(binding, type, 1, stages);
|
||||
needed[type]++;
|
||||
}
|
||||
|
||||
VkContext::VkContext(vk::UniqueInstance&& instance_, VkWindow* window) :
|
||||
instance(std::move(instance_)), window(window)
|
||||
{
|
||||
create_devices();
|
||||
}
|
||||
|
||||
VkContext::~VkContext()
|
||||
{
|
||||
device->waitIdle();
|
||||
|
||||
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++)
|
||||
for (int j = 0; j < descriptor_sets.size(); j++)
|
||||
device->destroyDescriptorSetLayout(descriptor_layouts[i][j]);
|
||||
}
|
||||
|
||||
void VkContext::create(SwapchainInfo& info)
|
||||
{
|
||||
swapchain_info = info;
|
||||
|
||||
// Initialize context
|
||||
create_renderpass();
|
||||
create_command_buffers();
|
||||
}
|
||||
|
||||
vk::CommandBuffer& VkContext::get_command_buffer()
|
||||
{
|
||||
return command_buffers[window->image_index].get();
|
||||
}
|
||||
|
||||
void VkContext::create_devices(int device_id)
|
||||
{
|
||||
// Pick a physical device
|
||||
auto physical_devices = instance->enumeratePhysicalDevices();
|
||||
physical_device = physical_devices.front();
|
||||
|
||||
// Get available queue family properties
|
||||
auto family_props = physical_device.getQueueFamilyProperties();
|
||||
|
||||
// Discover a queue with both graphics and compute capabilities
|
||||
vk::QueueFlags search = vk::QueueFlagBits::eGraphics | vk::QueueFlagBits::eCompute;
|
||||
for (size_t i = 0; i < family_props.size(); i++)
|
||||
{
|
||||
auto& family = family_props[i];
|
||||
if ((family.queueFlags & search) == search)
|
||||
queue_family = i;
|
||||
bool VKInstance::CreateDevice(vk::SurfaceKHR surface, bool validation_enabled) {
|
||||
// Can't create an instance without a valid surface
|
||||
if (!surface) {
|
||||
LOG_CRITICAL(Render_Vulkan, "Invalid surface provided during instance creation!");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (queue_family == -1)
|
||||
throw std::runtime_error("[VK] Could not find appropriate queue families!\n");
|
||||
auto family_properties = physical_device.getQueueFamilyProperties();
|
||||
if (family_properties.empty()) {
|
||||
LOG_CRITICAL(Render_Vulkan, "Vulkan physical device reported no queues.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const float default_queue_priority = 0.0f;
|
||||
std::array<const char*, 1> device_extensions = { VK_KHR_SWAPCHAIN_EXTENSION_NAME };
|
||||
// Search queue families for graphics and present queues
|
||||
graphics_queue_family_index = -1;
|
||||
present_queue_family_index = -1;
|
||||
for (int i = 0; i < family_properties.size(); i++) {
|
||||
// Check if queue supports graphics
|
||||
if (family_properties[i].queueFlags & vk::QueueFlagBits::eGraphics) {
|
||||
graphics_queue_family_index = i;
|
||||
|
||||
auto queue_info = vk::DeviceQueueCreateInfo({}, queue_family, 1, &default_queue_priority);
|
||||
// If this queue also supports presentation we are finished
|
||||
if (physical_device.getSurfaceSupportKHR(i, surface)) {
|
||||
present_queue_family_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::array<vk::PhysicalDeviceFeatures, 1> features = {};
|
||||
features[0].samplerAnisotropy = true;
|
||||
// Check if queue supports presentation
|
||||
if (physical_device.getSurfaceSupportKHR(i, surface)) {
|
||||
present_queue_family_index = i;
|
||||
}
|
||||
}
|
||||
|
||||
vk::DeviceCreateInfo device_info({}, 1, &queue_info, 0, nullptr, device_extensions.size(), device_extensions.data(), features.data());
|
||||
if (graphics_queue_family_index == -1 ||
|
||||
present_queue_family_index == -1) {
|
||||
LOG_CRITICAL(Render_Vulkan, "Unable to find graphics and/or present queues.");
|
||||
return false;
|
||||
}
|
||||
|
||||
static constexpr float queue_priorities[] = { 1.0f };
|
||||
|
||||
vk::DeviceCreateInfo device_info;
|
||||
device_info.setPEnabledExtensionNames(device_extensions);
|
||||
|
||||
// Create queue create info structs
|
||||
if (graphics_queue_family_index != present_queue_family_index) {
|
||||
std::array<vk::DeviceQueueCreateInfo, 2> queue_infos = {
|
||||
vk::DeviceQueueCreateInfo({}, graphics_queue_family_index, 1, queue_priorities),
|
||||
vk::DeviceQueueCreateInfo({}, present_queue_family_index, 1, queue_priorities)
|
||||
};
|
||||
|
||||
device_info.setQueueCreateInfos(queue_infos);
|
||||
}
|
||||
else {
|
||||
std::array<vk::DeviceQueueCreateInfo, 1> queue_infos = {
|
||||
vk::DeviceQueueCreateInfo({}, graphics_queue_family_index, 1, queue_priorities),
|
||||
};
|
||||
|
||||
device_info.setQueueCreateInfos(queue_infos);
|
||||
}
|
||||
|
||||
// Set device features
|
||||
device_info.setPEnabledFeatures(&device_features);
|
||||
|
||||
// Enable debug layer on debug builds
|
||||
if (validation_enabled) {
|
||||
std::array<const char*, 1> layer_names = { "VK_LAYER_KHRONOS_validation" };
|
||||
device_info.setPEnabledLayerNames(layer_names);
|
||||
}
|
||||
|
||||
// Create logical device
|
||||
device = physical_device.createDeviceUnique(device_info);
|
||||
|
||||
graphics_queue = device->getQueue(queue_family, 0);
|
||||
// Grab the graphics and present queues.
|
||||
graphics_queue = device->getQueue(graphics_queue_family_index, 0);
|
||||
present_queue = device->getQueue(present_queue_family_index, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void VkContext::create_renderpass()
|
||||
bool VKInstance::FindFeatures()
|
||||
{
|
||||
// Color attachment
|
||||
vk::AttachmentReference color_attachment_ref(0, vk::ImageLayout::eColorAttachmentOptimal);
|
||||
vk::AttachmentReference depth_attachment_ref(1, vk::ImageLayout::eDepthStencilAttachmentOptimal);
|
||||
vk::AttachmentDescription attachments[2] =
|
||||
{
|
||||
{
|
||||
{},
|
||||
window->swapchain_info.surface_format.format,
|
||||
vk::SampleCountFlagBits::e1,
|
||||
vk::AttachmentLoadOp::eClear,
|
||||
vk::AttachmentStoreOp::eStore,
|
||||
vk::AttachmentLoadOp::eDontCare,
|
||||
vk::AttachmentStoreOp::eDontCare,
|
||||
vk::ImageLayout::eUndefined,
|
||||
vk::ImageLayout::ePresentSrcKHR
|
||||
},
|
||||
{
|
||||
{},
|
||||
window->swapchain_info.depth_format,
|
||||
vk::SampleCountFlagBits::e1,
|
||||
vk::AttachmentLoadOp::eClear,
|
||||
vk::AttachmentStoreOp::eDontCare,
|
||||
vk::AttachmentLoadOp::eDontCare,
|
||||
vk::AttachmentStoreOp::eDontCare,
|
||||
vk::ImageLayout::eUndefined,
|
||||
vk::ImageLayout::eDepthStencilAttachmentOptimal
|
||||
auto available_features = physical_device.getFeatures();
|
||||
|
||||
// Not having geometry shaders or wide lines will cause issues with rendering.
|
||||
if (!available_features.geometryShader && !available_features.wideLines) {
|
||||
LOG_WARNING(Render_Vulkan, "Geometry shaders not availabe! Rendering will be limited");
|
||||
}
|
||||
|
||||
// Enable some common features other emulators like Dolphin use
|
||||
device_features.dualSrcBlend = available_features.dualSrcBlend;
|
||||
device_features.geometryShader = available_features.geometryShader;
|
||||
device_features.samplerAnisotropy = available_features.samplerAnisotropy;
|
||||
device_features.logicOp = available_features.logicOp;
|
||||
device_features.fragmentStoresAndAtomics = available_features.fragmentStoresAndAtomics;
|
||||
device_features.sampleRateShading = available_features.sampleRateShading;
|
||||
device_features.largePoints = available_features.largePoints;
|
||||
device_features.shaderStorageImageMultisample = available_features.shaderStorageImageMultisample;
|
||||
device_features.occlusionQueryPrecise = available_features.occlusionQueryPrecise;
|
||||
device_features.shaderClipDistance = available_features.shaderClipDistance;
|
||||
device_features.depthClamp = available_features.depthClamp;
|
||||
device_features.textureCompressionBC = available_features.textureCompressionBC;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VKInstance::FindExtensions()
|
||||
{
|
||||
auto extensions = physical_device.enumerateDeviceExtensionProperties();
|
||||
if (extensions.empty()) {
|
||||
LOG_CRITICAL(Render_Vulkan, "No extensions supported by device.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// List available device extensions
|
||||
for (const auto& prop : extensions) {
|
||||
LOG_INFO(Render_Vulkan, "Vulkan extension: {}", prop.extensionName);
|
||||
}
|
||||
|
||||
// Helper lambda for adding extensions
|
||||
auto AddExtension = [&](const char* name, bool required) {
|
||||
auto result = std::find_if(extensions.begin(), extensions.end(), [&](const auto& prop) {
|
||||
return !std::strcmp(name, prop.extensionName);
|
||||
});
|
||||
|
||||
if (result != extensions.end()) {
|
||||
LOG_INFO(Render_Vulkan, "Enabling extension: {}", name);
|
||||
device_extensions.push_back(name);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (required) {
|
||||
LOG_ERROR(Render_Vulkan, "Unable to find required extension {}.", name);
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
vk::SubpassDependency dependency
|
||||
(
|
||||
VK_SUBPASS_EXTERNAL,
|
||||
0,
|
||||
vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests,
|
||||
vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests,
|
||||
vk::AccessFlagBits::eNone,
|
||||
vk::AccessFlagBits::eColorAttachmentWrite | vk::AccessFlagBits::eDepthStencilAttachmentWrite,
|
||||
vk::DependencyFlagBits::eByRegion
|
||||
);
|
||||
|
||||
vk::SubpassDescription subpass({}, vk::PipelineBindPoint::eGraphics, {}, {}, 1, &color_attachment_ref, {}, &depth_attachment_ref);
|
||||
vk::RenderPassCreateInfo renderpass_info({}, 2, attachments, 1, &subpass, 1, &dependency);
|
||||
renderpass = device->createRenderPassUnique(renderpass_info);
|
||||
}
|
||||
|
||||
void VkContext::create_decriptor_sets(PipelineLayoutInfo &info)
|
||||
{
|
||||
std::vector<vk::DescriptorPoolSize> pool_sizes;
|
||||
pool_sizes.reserve(info.needed.size());
|
||||
for (const auto& [type, count] : info.needed)
|
||||
{
|
||||
pool_sizes.emplace_back(type, count * MAX_FRAMES_IN_FLIGHT);
|
||||
// The swapchain extension is required
|
||||
if (!AddExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME, true)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const auto& [group, resource_info] : info.resource_types)
|
||||
{
|
||||
auto& bindings = resource_info.second;
|
||||
vk::DescriptorSetLayoutCreateInfo layout_info({}, bindings.size(), bindings.data());
|
||||
// Add more extensions in the future...
|
||||
|
||||
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++)
|
||||
descriptor_layouts[i].push_back(device->createDescriptorSetLayout(layout_info));
|
||||
}
|
||||
|
||||
vk::DescriptorPoolCreateInfo pool_info({}, MAX_FRAMES_IN_FLIGHT * descriptor_layouts[0].size(), pool_sizes.size(), pool_sizes.data());
|
||||
descriptor_pool = device->createDescriptorPoolUnique(pool_info);
|
||||
|
||||
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++)
|
||||
{
|
||||
vk::DescriptorSetAllocateInfo alloc_info(descriptor_pool.get(), descriptor_layouts[i]);
|
||||
descriptor_sets[i] = device->allocateDescriptorSets(alloc_info);
|
||||
|
||||
for (const auto& [group, resource_info] : info.resource_types)
|
||||
{
|
||||
auto& bindings = resource_info.second;
|
||||
std::array<vk::DescriptorImageInfo, MAX_BINDING_COUNT> image_infos;
|
||||
std::array<vk::DescriptorBufferInfo, MAX_BINDING_COUNT> buffer_infos;
|
||||
|
||||
std::vector<vk::WriteDescriptorSet> descriptor_writes;
|
||||
descriptor_writes.reserve(bindings.size());
|
||||
|
||||
auto& set = descriptor_sets[i][group];
|
||||
for (int j = 0; j < bindings.size(); j++)
|
||||
{
|
||||
switch (bindings[j].descriptorType)
|
||||
{
|
||||
case vk::DescriptorType::eCombinedImageSampler:
|
||||
{
|
||||
VkTexture* texture = reinterpret_cast<VkTexture*>(resource_info.first[j]);
|
||||
image_infos[j] = vk::DescriptorImageInfo(texture->texture_sampler.get(), texture->texture_view.get(),
|
||||
vk::ImageLayout::eShaderReadOnlyOptimal);
|
||||
descriptor_writes.emplace_back(set, j, 0, 1, vk::DescriptorType::eCombinedImageSampler, &image_infos[j]);
|
||||
break;
|
||||
}
|
||||
case vk::DescriptorType::eUniformTexelBuffer:
|
||||
case vk::DescriptorType::eStorageTexelBuffer:
|
||||
{
|
||||
Buffer* buffer = reinterpret_cast<Buffer*>(resource_info.first[j]);
|
||||
descriptor_writes.emplace_back(set, j, 0, 1, bindings[j].descriptorType, nullptr, nullptr, &buffer->buffer_view.get());
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::runtime_error("[VK] Unknown resource");
|
||||
}
|
||||
}
|
||||
|
||||
device->updateDescriptorSets(descriptor_writes, {});
|
||||
descriptor_writes.clear();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void VkContext::create_graphics_pipeline(PipelineLayoutInfo& info)
|
||||
{
|
||||
create_decriptor_sets(info);
|
||||
|
||||
vk::PipelineVertexInputStateCreateInfo vertex_input_info
|
||||
(
|
||||
{},
|
||||
1,
|
||||
&Vertex::binding_desc,
|
||||
Vertex::attribute_desc.size(),
|
||||
Vertex::attribute_desc.data()
|
||||
);
|
||||
|
||||
vk::PipelineInputAssemblyStateCreateInfo input_assembly({}, vk::PrimitiveTopology::eTriangleList, VK_FALSE);
|
||||
vk::Viewport viewport(0, 0, window->swapchain_info.extent.width, window->swapchain_info.extent.height, 0, 1);
|
||||
vk::Rect2D scissor({ 0, 0 }, window->swapchain_info.extent);
|
||||
|
||||
vk::PipelineViewportStateCreateInfo viewport_state({}, 1, &viewport, 1, &scissor);
|
||||
vk::PipelineRasterizationStateCreateInfo rasterizer
|
||||
(
|
||||
{},
|
||||
VK_FALSE,
|
||||
VK_FALSE,
|
||||
vk::PolygonMode::eFill,
|
||||
vk::CullModeFlagBits::eNone,
|
||||
vk::FrontFace::eClockwise,
|
||||
VK_FALSE
|
||||
);
|
||||
|
||||
vk::PipelineMultisampleStateCreateInfo multisampling({}, vk::SampleCountFlagBits::e1, VK_FALSE);
|
||||
vk::PipelineColorBlendAttachmentState colorblend_attachment(VK_FALSE);
|
||||
colorblend_attachment.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
|
||||
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA;
|
||||
|
||||
vk::PipelineColorBlendStateCreateInfo color_blending({}, VK_FALSE, vk::LogicOp::eCopy, 1, &colorblend_attachment, {0});
|
||||
|
||||
vk::PipelineLayoutCreateInfo pipeline_layout_info({}, descriptor_layouts[0], {});
|
||||
pipeline_layout = device->createPipelineLayoutUnique(pipeline_layout_info);
|
||||
|
||||
vk::DynamicState dynamic_states[2] = { vk::DynamicState::eDepthCompareOp, vk::DynamicState::eLineWidth };
|
||||
vk::PipelineDynamicStateCreateInfo dynamic_info({}, 2, dynamic_states);
|
||||
|
||||
// Depth and stencil state containing depth and stencil compare and test operations
|
||||
// We only use depth tests and want depth tests and writes to be enabled and compare with less or equal
|
||||
vk::PipelineDepthStencilStateCreateInfo depth_info({}, VK_TRUE, VK_TRUE, vk::CompareOp::eGreaterOrEqual, VK_FALSE, VK_TRUE);
|
||||
depth_info.back.failOp = vk::StencilOp::eKeep;
|
||||
depth_info.back.passOp = vk::StencilOp::eKeep;
|
||||
depth_info.back.compareOp = vk::CompareOp::eAlways;
|
||||
depth_info.front = depth_info.back;
|
||||
|
||||
vk::GraphicsPipelineCreateInfo pipeline_info
|
||||
(
|
||||
{},
|
||||
info.shader_stages.size(),
|
||||
info.shader_stages.data(),
|
||||
&vertex_input_info,
|
||||
&input_assembly,
|
||||
nullptr,
|
||||
&viewport_state,&rasterizer,
|
||||
&multisampling,
|
||||
&depth_info,
|
||||
&color_blending,
|
||||
&dynamic_info,
|
||||
pipeline_layout.get(),
|
||||
renderpass.get()
|
||||
);
|
||||
|
||||
auto pipeline = device->createGraphicsPipelineUnique(nullptr, pipeline_info);
|
||||
if (pipeline.result == vk::Result::eSuccess)
|
||||
graphics_pipeline = std::move(pipeline.value);
|
||||
else
|
||||
throw std::runtime_error("[VK] Couldn't create graphics pipeline");
|
||||
}
|
||||
|
||||
void VkContext::create_command_buffers()
|
||||
{
|
||||
vk::CommandPoolCreateInfo pool_info(vk::CommandPoolCreateFlagBits::eResetCommandBuffer, queue_family);
|
||||
command_pool = device->createCommandPoolUnique(pool_info);
|
||||
|
||||
command_buffers.resize(window->swapchain_info.image_count);
|
||||
|
||||
vk::CommandBufferAllocateInfo alloc_info(command_pool.get(), vk::CommandBufferLevel::ePrimary, command_buffers.size());
|
||||
command_buffers = device->allocateCommandBuffersUnique(alloc_info);
|
||||
}
|
||||
} // namespace Vulkan
|
||||
|
@@ -12,15 +12,22 @@
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
// If the size of this is too small, it ends up creating a soft cap on FPS as the renderer will have
|
||||
// to wait on available presentation frames. There doesn't seem to be much of a downside to a larger
|
||||
// number but 9 swap textures at 60FPS presentation allows for 800% speed so thats probably fine
|
||||
#ifdef ANDROID
|
||||
// Reduce the size of swap_chain, since the UI only allows upto 200% speed.
|
||||
constexpr std::size_t SWAP_CHAIN_SIZE = 6;
|
||||
#else
|
||||
constexpr std::size_t SWAP_CHAIN_SIZE = 9;
|
||||
#endif
|
||||
// Using multiple command buffers prevents stalling
|
||||
constexpr u32 COMMAND_BUFFER_COUNT = 3;
|
||||
|
||||
struct FrameResources
|
||||
{
|
||||
vk::CommandPool command_pool;
|
||||
std::array<vk::CommandBuffer, COMMAND_BUFFER_COUNT> command_buffers = {};
|
||||
vk::DescriptorPool descriptor_pool;
|
||||
vk::Fence fence;
|
||||
vk::Semaphore semaphore;
|
||||
u64 fence_counter = 0;
|
||||
bool init_command_buffer_used = false;
|
||||
bool semaphore_used = false;
|
||||
|
||||
std::vector<std::function<void()>> cleanup_resources;
|
||||
};
|
||||
|
||||
/// The global Vulkan instance
|
||||
class VKInstance
|
||||
@@ -30,41 +37,33 @@ public:
|
||||
~VKInstance();
|
||||
|
||||
/// Construct global Vulkan context
|
||||
void Create(vk::UniqueInstance instance, vk::PhysicalDevice gpu, vk::UniqueSurfaceKHR surface,
|
||||
bool enable_debug_reports, bool enable_validation_layer);
|
||||
bool Create(vk::Instance instance, vk::PhysicalDevice gpu,
|
||||
vk::SurfaceKHR surface, bool enable_validation_layer);
|
||||
|
||||
vk::Device& GetDevice() { return device.get(); }
|
||||
vk::PhysicalDevice& GetPhysicalDevice() { return physical_device; }
|
||||
|
||||
/// Get a valid command buffer for the current frame
|
||||
vk::CommandBuffer& GetCommandBuffer();
|
||||
|
||||
/// Feature support
|
||||
bool SupportsAnisotropicFiltering() const;
|
||||
|
||||
private:
|
||||
void CreateDevices(int device_id = 0);
|
||||
void CreateRenderpass();
|
||||
void CreateCommandBuffers();
|
||||
bool CreateDevice(vk::SurfaceKHR surface, bool validation_enabled);
|
||||
bool FindExtensions();
|
||||
bool FindFeatures();
|
||||
|
||||
public:
|
||||
// Queue family indexes
|
||||
u32 queue_family = -1;
|
||||
u32 present_queue_family_index{}, graphics_queue_family_index{};
|
||||
vk::Queue present_queue, graphics_queue;
|
||||
|
||||
// Core vulkan objects
|
||||
vk::UniqueInstance instance;
|
||||
vk::Instance instance;
|
||||
vk::PhysicalDevice physical_device;
|
||||
vk::UniqueDevice device;
|
||||
vk::Queue graphics_queue;
|
||||
|
||||
// Pipeline
|
||||
vk::UniqueDescriptorPool descriptor_pool;
|
||||
std::array<std::vector<vk::DescriptorSetLayout>, SWAP_CHAIN_SIZE> descriptor_layouts;
|
||||
std::array<std::vector<vk::DescriptorSet>, SWAP_CHAIN_SIZE> descriptor_sets;
|
||||
|
||||
// Command buffer
|
||||
vk::UniqueCommandPool command_pool;
|
||||
std::vector<vk::UniqueCommandBuffer> command_buffers;
|
||||
// Extensions and features
|
||||
std::vector<const char*> device_extensions;
|
||||
vk::PhysicalDeviceFeatures device_features{};
|
||||
};
|
||||
|
||||
extern std::unique_ptr<VKInstance> g_vk_instace;
|
||||
|
@@ -6,7 +6,6 @@
|
||||
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
#include "video_core/renderer_vulkan/vk_buffer.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
@@ -3,13 +3,11 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <bitset>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
@@ -40,19 +38,6 @@ namespace Vulkan {
|
||||
using SurfaceType = SurfaceParams::SurfaceType;
|
||||
using PixelFormat = SurfaceParams::PixelFormat;
|
||||
|
||||
const FormatTuple& GetFormatTuple(PixelFormat pixel_format) {
|
||||
const SurfaceType type = SurfaceParams::GetFormatType(pixel_format);
|
||||
if (type == SurfaceType::Color) {
|
||||
ASSERT(static_cast<std::size_t>(pixel_format) < fb_format_tuples.size());
|
||||
return fb_format_tuples[static_cast<unsigned int>(pixel_format)];
|
||||
} else if (type == SurfaceType::Depth || type == SurfaceType::DepthStencil) {
|
||||
std::size_t tuple_idx = static_cast<std::size_t>(pixel_format) - 14;
|
||||
ASSERT(tuple_idx < depth_format_tuples.size());
|
||||
return depth_format_tuples[tuple_idx];
|
||||
}
|
||||
return tex_tuple;
|
||||
}
|
||||
|
||||
template <typename Map, typename Interval>
|
||||
static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
||||
return boost::make_iterator_range(map.equal_range(interval));
|
||||
@@ -61,7 +46,7 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
||||
template <bool morton_to_gl, PixelFormat format>
|
||||
static void MortonCopyTile(u32 stride, u8* tile_buffer, u8* gpu_buffer) {
|
||||
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / 8;
|
||||
constexpr u32 vk_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
||||
constexpr u32 vk_bytes_per_pixel = CachedSurface::GetBytesPerPixel(format);
|
||||
for (u32 y = 0; y < 8; ++y) {
|
||||
for (u32 x = 0; x < 8; ++x) {
|
||||
u8* tile_ptr = tile_buffer + VideoCore::MortonInterleave(x, y) * bytes_per_pixel;
|
||||
@@ -90,7 +75,7 @@ static void MortonCopy(u32 stride, u32 height, u8* gpu_buffer, PAddr base, PAddr
|
||||
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / 8;
|
||||
constexpr u32 tile_size = bytes_per_pixel * 64;
|
||||
|
||||
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
||||
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetBytesPerPixel(format);
|
||||
static_assert(gl_bytes_per_pixel >= bytes_per_pixel, "");
|
||||
gpu_buffer += gl_bytes_per_pixel - bytes_per_pixel;
|
||||
|
||||
@@ -220,67 +205,8 @@ VKTexture RasterizerCacheVulkan::AllocateSurfaceTexture(vk::Format format, u32 w
|
||||
return texture;
|
||||
}
|
||||
|
||||
static bool BlitTextures(GLuint src_tex, const Common::Rectangle<u32>& src_rect, GLuint dst_tex,
|
||||
const Common::Rectangle<u32>& dst_rect, SurfaceType type,
|
||||
GLuint read_fb_handle, GLuint draw_fb_handle) {
|
||||
OpenGLState prev_state = OpenGLState::GetCurState();
|
||||
SCOPE_EXIT({ prev_state.Apply(); });
|
||||
|
||||
OpenGLState state;
|
||||
state.draw.read_framebuffer = read_fb_handle;
|
||||
state.draw.draw_framebuffer = draw_fb_handle;
|
||||
state.Apply();
|
||||
|
||||
u32 buffers = 0;
|
||||
|
||||
if (type == SurfaceType::Color || type == SurfaceType::Texture) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, src_tex,
|
||||
0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
|
||||
0);
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, dst_tex,
|
||||
0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
|
||||
0);
|
||||
|
||||
buffers = GL_COLOR_BUFFER_BIT;
|
||||
} else if (type == SurfaceType::Depth) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, src_tex, 0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, dst_tex, 0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
|
||||
|
||||
buffers = GL_DEPTH_BUFFER_BIT;
|
||||
} else if (type == SurfaceType::DepthStencil) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
|
||||
src_tex, 0);
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
|
||||
dst_tex, 0);
|
||||
|
||||
buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
|
||||
}
|
||||
|
||||
// TODO (wwylele): use GL_NEAREST for shadow map texture
|
||||
// Note: shadow map is treated as RGBA8 format in PICA, as well as in the rasterizer cache, but
|
||||
// doing linear intepolation componentwise would cause incorrect value. However, for a
|
||||
// well-programmed game this code path should be rarely executed for shadow map with
|
||||
// inconsistent scale.
|
||||
glBlitFramebuffer(src_rect.left, src_rect.bottom, src_rect.right, src_rect.top, dst_rect.left,
|
||||
dst_rect.bottom, dst_rect.right, dst_rect.top, buffers,
|
||||
buffers == GL_COLOR_BUFFER_BIT ? GL_LINEAR : GL_NEAREST);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool FillSurface(const Surface& surface, const u8* fill_data,
|
||||
const Common::Rectangle<u32>& fill_rect, GLuint draw_fb_handle) {
|
||||
/*static bool FillSurface(const Surface& surface, const u8* fill_data,
|
||||
const Common::Rectangle<u32>& fill_rect) {
|
||||
OpenGLState prev_state = OpenGLState::GetCurState();
|
||||
SCOPE_EXIT({ prev_state.Apply(); });
|
||||
|
||||
@@ -352,10 +278,10 @@ static bool FillSurface(const Surface& surface, const u8* fill_data,
|
||||
glClearBufferfi(GL_DEPTH_STENCIL, 0, value_float, value_int);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}*/
|
||||
|
||||
CachedSurface::~CachedSurface() {
|
||||
if (texture.handle) {
|
||||
if (texture.IsValid()) {
|
||||
auto tag = is_custom ? HostTextureTag{GetFormatTuple(PixelFormat::RGBA8),
|
||||
custom_tex_info.width, custom_tex_info.height}
|
||||
: HostTextureTag{GetFormatTuple(pixel_format), GetScaledWidth(),
|
||||
@@ -432,26 +358,25 @@ void RasterizerCacheVulkan::CopySurface(const Surface& src_surface, const Surfac
|
||||
return;
|
||||
}
|
||||
if (src_surface->CanSubRect(subrect_params)) {
|
||||
BlitTextures(src_surface->texture.handle, src_surface->GetScaledSubRect(subrect_params),
|
||||
dst_surface->texture.handle, dst_surface->GetScaledSubRect(subrect_params),
|
||||
src_surface->type, read_framebuffer.handle, draw_framebuffer.handle);
|
||||
auto srect = src_surface->GetScaledSubRect(subrect_params);
|
||||
auto drect = dst_surface->GetScaledSubRect(subrect_params);
|
||||
src_surface->texture.BlitTo(srect, dst_surface->texture, drect, src_surface->type);
|
||||
|
||||
return;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64));
|
||||
MICROPROFILE_DEFINE(Vulkan_SurfaceLoad, "Vulkan", "Surface Load", MP_RGB(128, 192, 64));
|
||||
void CachedSurface::LoadGLBuffer(PAddr load_start, PAddr load_end) {
|
||||
ASSERT(type != SurfaceType::Fill);
|
||||
const bool need_swap =
|
||||
GLES && (pixel_format == PixelFormat::RGBA8 || pixel_format == PixelFormat::RGB8);
|
||||
|
||||
const u8* const texture_src_data = VideoCore::g_memory->GetPhysicalPointer(addr);
|
||||
if (texture_src_data == nullptr)
|
||||
return;
|
||||
|
||||
if (gl_buffer.empty()) {
|
||||
gl_buffer.resize(width * height * GetGLBytesPerPixel(pixel_format));
|
||||
if (vk_buffer.empty()) {
|
||||
vk_buffer.resize(width * height * GetGLBytesPerPixel(pixel_format));
|
||||
}
|
||||
|
||||
// TODO: Should probably be done in ::Memory:: and check for other regions too
|
||||
@@ -461,34 +386,15 @@ void CachedSurface::LoadGLBuffer(PAddr load_start, PAddr load_end) {
|
||||
if (load_start < Memory::VRAM_VADDR && load_end > Memory::VRAM_VADDR)
|
||||
load_start = Memory::VRAM_VADDR;
|
||||
|
||||
MICROPROFILE_SCOPE(OpenGL_SurfaceLoad);
|
||||
MICROPROFILE_SCOPE(Vulkan_SurfaceLoad);
|
||||
|
||||
ASSERT(load_start >= addr && load_end <= end);
|
||||
const u32 start_offset = load_start - addr;
|
||||
|
||||
if (!is_tiled) {
|
||||
ASSERT(type == SurfaceType::Color);
|
||||
if (need_swap) {
|
||||
// TODO(liushuyu): check if the byteswap here is 100% correct
|
||||
// cannot fully test this
|
||||
if (pixel_format == PixelFormat::RGBA8) {
|
||||
for (std::size_t i = start_offset; i < load_end - addr; i += 4) {
|
||||
gl_buffer[i] = texture_src_data[i + 3];
|
||||
gl_buffer[i + 1] = texture_src_data[i + 2];
|
||||
gl_buffer[i + 2] = texture_src_data[i + 1];
|
||||
gl_buffer[i + 3] = texture_src_data[i];
|
||||
}
|
||||
} else if (pixel_format == PixelFormat::RGB8) {
|
||||
for (std::size_t i = start_offset; i < load_end - addr; i += 3) {
|
||||
gl_buffer[i] = texture_src_data[i + 2];
|
||||
gl_buffer[i + 1] = texture_src_data[i + 1];
|
||||
gl_buffer[i + 2] = texture_src_data[i];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::memcpy(&gl_buffer[start_offset], texture_src_data + start_offset,
|
||||
std::memcpy(&vk_buffer[start_offset], texture_src_data + start_offset,
|
||||
load_end - load_start);
|
||||
}
|
||||
} else {
|
||||
if (type == SurfaceType::Texture) {
|
||||
Pica::Texture::TextureInfo tex_info{};
|
||||
@@ -507,11 +413,11 @@ void CachedSurface::LoadGLBuffer(PAddr load_start, PAddr load_end) {
|
||||
auto vec4 =
|
||||
Pica::Texture::LookupTexture(texture_src_data, x, height - 1 - y, tex_info);
|
||||
const std::size_t offset = (x + (width * y)) * 4;
|
||||
std::memcpy(&gl_buffer[offset], vec4.AsArray(), 4);
|
||||
std::memcpy(&vk_buffer[offset], vec4.AsArray(), 4);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
morton_to_gl_fns[static_cast<std::size_t>(pixel_format)](stride, height, &gl_buffer[0],
|
||||
morton_to_gpu_fns[static_cast<std::size_t>(pixel_format)](stride, height, &vk_buffer[0],
|
||||
addr, load_start, load_end);
|
||||
}
|
||||
}
|
||||
@@ -573,7 +479,7 @@ void CachedSurface::FlushGLBuffer(PAddr flush_start, PAddr flush_end) {
|
||||
flush_end - flush_start);
|
||||
}
|
||||
} else {
|
||||
gl_to_morton_fns[static_cast<std::size_t>(pixel_format)](stride, height, &gl_buffer[0],
|
||||
gpu_to_morton_fns[static_cast<std::size_t>(pixel_format)](stride, height, &gl_buffer[0],
|
||||
addr, flush_start, flush_end);
|
||||
}
|
||||
}
|
||||
|
@@ -10,22 +10,14 @@
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <tuple>
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
|
||||
#endif
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
#include <boost/icl/interval_set.hpp>
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#include <unordered_map>
|
||||
#include <vulkan/vulkan.hpp>
|
||||
#include <boost/functional/hash.hpp>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "core/custom_tex_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_surface_params.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture.h"
|
||||
@@ -194,7 +186,7 @@ struct CachedSurface : SurfaceParams, std::enable_shared_from_this<CachedSurface
|
||||
bool is_custom = false;
|
||||
Core::CustomTexInfo custom_tex_info;
|
||||
|
||||
static constexpr unsigned int GetGLBytesPerPixel(PixelFormat format) {
|
||||
static constexpr unsigned int GetBytesPerPixel(PixelFormat format) {
|
||||
return format == PixelFormat::Invalid
|
||||
? 0
|
||||
: (format == PixelFormat::D24 || GetFormatType(format) == SurfaceType::Texture)
|
||||
@@ -205,17 +197,16 @@ struct CachedSurface : SurfaceParams, std::enable_shared_from_this<CachedSurface
|
||||
std::vector<u8> vk_buffer;
|
||||
|
||||
// Read/Write data in 3DS memory to/from gl_buffer
|
||||
void LoadGLBuffer(PAddr load_start, PAddr load_end);
|
||||
void FlushGLBuffer(PAddr flush_start, PAddr flush_end);
|
||||
void LoadGPUBuffer(PAddr load_start, PAddr load_end);
|
||||
void FlushGPUBuffer(PAddr flush_start, PAddr flush_end);
|
||||
|
||||
// Custom texture loading and dumping
|
||||
bool LoadCustomTexture(u64 tex_hash);
|
||||
void DumpTexture(VKTexture& target_tex, u64 tex_hash);
|
||||
|
||||
// Upload/Download data in vk_buffer in/to this surface's texture
|
||||
void UploadGLTexture(Common::Rectangle<u32> rect, GLuint read_fb_handle, GLuint draw_fb_handle);
|
||||
void DownloadGLTexture(const Common::Rectangle<u32>& rect, GLuint read_fb_handle,
|
||||
GLuint draw_fb_handle);
|
||||
void UploadGPUTexture(Common::Rectangle<u32> rect);
|
||||
void DownloadGPUTexture(const Common::Rectangle<u32>& rect);
|
||||
|
||||
std::shared_ptr<SurfaceWatcher> CreateWatcher() {
|
||||
auto watcher = std::make_shared<SurfaceWatcher>(weak_from_this());
|
||||
@@ -351,8 +342,8 @@ private:
|
||||
SurfaceMap dirty_regions;
|
||||
SurfaceSet remove_surfaces;
|
||||
|
||||
OGLFramebuffer read_framebuffer;
|
||||
OGLFramebuffer draw_framebuffer;
|
||||
VKFramebuffer read_framebuffer;
|
||||
VKFramebuffer draw_framebuffer;
|
||||
|
||||
u16 resolution_scale_factor;
|
||||
|
||||
|
@@ -27,10 +27,10 @@ bool VKResourceCache::Initialize()
|
||||
}};
|
||||
|
||||
std::array<vk::DescriptorSetLayoutBinding, 4> texture_set = {{
|
||||
{ 0, vk::DescriptorType::eSampledImage, 1, vk::ShaderStageFlagBits::eFragment }, // tex0
|
||||
{ 1, vk::DescriptorType::eSampledImage, 1, vk::ShaderStageFlagBits::eFragment }, // tex1
|
||||
{ 2, vk::DescriptorType::eSampledImage, 1, vk::ShaderStageFlagBits::eFragment }, // tex2
|
||||
{ 3, vk::DescriptorType::eSampledImage, 1, vk::ShaderStageFlagBits::eFragment }, // tex_cube
|
||||
{ 0, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment }, // tex0
|
||||
{ 1, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment }, // tex1
|
||||
{ 2, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment }, // tex2
|
||||
{ 3, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment }, // tex_cube
|
||||
}};
|
||||
|
||||
std::array<vk::DescriptorSetLayoutBinding, 3> lut_set = {{
|
||||
|
@@ -23,18 +23,18 @@ constexpr u32 DESCRIPTOR_SET_LAYOUT_COUNT = 3;
|
||||
class VKResourceCache
|
||||
{
|
||||
public:
|
||||
VKResourceCache() = default;
|
||||
~VKResourceCache();
|
||||
VKResourceCache() = default;
|
||||
~VKResourceCache();
|
||||
|
||||
// Perform at startup, create descriptor layouts, compiles all static shaders.
|
||||
bool Initialize();
|
||||
void Shutdown();
|
||||
// Perform at startup, create descriptor layouts, compiles all static shaders.
|
||||
bool Initialize();
|
||||
void Shutdown();
|
||||
|
||||
// Public interface.
|
||||
VKBuffer& GetTextureUploadBuffer() { return texture_upload_buffer; }
|
||||
vk::Sampler GetSampler(const SamplerInfo& info);
|
||||
vk::RenderPass GetRenderPass(vk::Format color_format, vk::Format depth_format, u32 multisamples, vk::AttachmentLoadOp load_op);
|
||||
vk::PipelineCache GetPipelineCache() const { return pipeline_cache.get(); }
|
||||
// Public interface.
|
||||
VKBuffer& GetTextureUploadBuffer() { return texture_upload_buffer; }
|
||||
vk::Sampler GetSampler(const SamplerInfo& info);
|
||||
vk::RenderPass GetRenderPass(vk::Format color_format, vk::Format depth_format, u32 multisamples, vk::AttachmentLoadOp load_op);
|
||||
vk::PipelineCache GetPipelineCache() const { return pipeline_cache.get(); }
|
||||
|
||||
private:
|
||||
// Dummy image for samplers that are unbound
|
||||
|
@@ -203,9 +203,11 @@ struct PicaFixedGSConfig : Common::HashableStruct<PicaGSConfigCommonRaw> {
|
||||
*/
|
||||
struct VKPipelineCacheKey {
|
||||
VKPipelineCacheKey(const Pica::Regs& regs, Pica::Shader::ShaderSetup& setup) :
|
||||
vertex_config(regs.vs, setup), fragment_config(PicaFSConfig::BuildFromRegs(regs)) {}
|
||||
vertex_config(regs.vs, setup), geometry_config(regs),
|
||||
fragment_config(PicaFSConfig::BuildFromRegs(regs)) {}
|
||||
|
||||
PicaVSConfig vertex_config;
|
||||
PicaFixedGSConfig geometry_config;
|
||||
PicaFSConfig fragment_config;
|
||||
};
|
||||
|
||||
|
@@ -1,452 +1,565 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Copyright 2022 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <glad/glad.h>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/renderer_opengl/gl_state.h"
|
||||
#include "video_core/renderer_opengl/gl_vars.h"
|
||||
#include "video_core/renderer_vulkan/vk_state.h"
|
||||
|
||||
namespace OpenGL {
|
||||
namespace Vulkan {
|
||||
|
||||
OpenGLState OpenGLState::cur_state;
|
||||
std::unique_ptr<VulkanState> g_vk_state;
|
||||
|
||||
OpenGLState::OpenGLState() {
|
||||
// These all match default OpenGL values
|
||||
cull.enabled = false;
|
||||
cull.mode = GL_BACK;
|
||||
cull.front_face = GL_CCW;
|
||||
|
||||
depth.test_enabled = false;
|
||||
depth.test_func = GL_LESS;
|
||||
depth.write_mask = GL_TRUE;
|
||||
|
||||
color_mask.red_enabled = GL_TRUE;
|
||||
color_mask.green_enabled = GL_TRUE;
|
||||
color_mask.blue_enabled = GL_TRUE;
|
||||
color_mask.alpha_enabled = GL_TRUE;
|
||||
|
||||
stencil.test_enabled = false;
|
||||
stencil.test_func = GL_ALWAYS;
|
||||
stencil.test_ref = 0;
|
||||
stencil.test_mask = 0xFF;
|
||||
stencil.write_mask = 0xFF;
|
||||
stencil.action_depth_fail = GL_KEEP;
|
||||
stencil.action_depth_pass = GL_KEEP;
|
||||
stencil.action_stencil_fail = GL_KEEP;
|
||||
|
||||
blend.enabled = true;
|
||||
blend.rgb_equation = GL_FUNC_ADD;
|
||||
blend.a_equation = GL_FUNC_ADD;
|
||||
blend.src_rgb_func = GL_ONE;
|
||||
blend.dst_rgb_func = GL_ZERO;
|
||||
blend.src_a_func = GL_ONE;
|
||||
blend.dst_a_func = GL_ZERO;
|
||||
blend.color.red = 0.0f;
|
||||
blend.color.green = 0.0f;
|
||||
blend.color.blue = 0.0f;
|
||||
blend.color.alpha = 0.0f;
|
||||
|
||||
logic_op = GL_COPY;
|
||||
|
||||
for (auto& texture_unit : texture_units) {
|
||||
texture_unit.texture_2d = 0;
|
||||
texture_unit.sampler = 0;
|
||||
}
|
||||
|
||||
texture_cube_unit.texture_cube = 0;
|
||||
texture_cube_unit.sampler = 0;
|
||||
|
||||
texture_buffer_lut_lf.texture_buffer = 0;
|
||||
texture_buffer_lut_rg.texture_buffer = 0;
|
||||
texture_buffer_lut_rgba.texture_buffer = 0;
|
||||
|
||||
image_shadow_buffer = 0;
|
||||
image_shadow_texture_px = 0;
|
||||
image_shadow_texture_nx = 0;
|
||||
image_shadow_texture_py = 0;
|
||||
image_shadow_texture_ny = 0;
|
||||
image_shadow_texture_pz = 0;
|
||||
image_shadow_texture_nz = 0;
|
||||
|
||||
draw.read_framebuffer = 0;
|
||||
draw.draw_framebuffer = 0;
|
||||
draw.vertex_array = 0;
|
||||
draw.vertex_buffer = 0;
|
||||
draw.uniform_buffer = 0;
|
||||
draw.shader_program = 0;
|
||||
draw.program_pipeline = 0;
|
||||
|
||||
scissor.enabled = false;
|
||||
scissor.x = 0;
|
||||
scissor.y = 0;
|
||||
scissor.width = 0;
|
||||
scissor.height = 0;
|
||||
|
||||
viewport.x = 0;
|
||||
viewport.y = 0;
|
||||
viewport.width = 0;
|
||||
viewport.height = 0;
|
||||
|
||||
clip_distance = {};
|
||||
|
||||
renderbuffer = 0;
|
||||
// Define bitwise operators for DirtyState enum
|
||||
DirtyState operator |=(DirtyState lhs, DirtyState rhs) {
|
||||
return static_cast<DirtyState> (
|
||||
static_cast<unsigned>(lhs) |
|
||||
static_cast<unsigned>(rhs)
|
||||
);
|
||||
}
|
||||
|
||||
void OpenGLState::Apply() const {
|
||||
// Culling
|
||||
if (cull.enabled != cur_state.cull.enabled) {
|
||||
if (cull.enabled) {
|
||||
glEnable(GL_CULL_FACE);
|
||||
} else {
|
||||
glDisable(GL_CULL_FACE);
|
||||
}
|
||||
}
|
||||
void VulkanState::Create() {
|
||||
// Create a dummy texture which can be used in place of a real binding.
|
||||
VKTexture::Info info = {
|
||||
.width = 1,
|
||||
.height = 1,
|
||||
.format = vk::Format::eR8G8B8A8Unorm,
|
||||
.type = vk::ImageType::e2D,
|
||||
.view_type = vk::ImageViewType::e2D
|
||||
};
|
||||
|
||||
if (cull.mode != cur_state.cull.mode) {
|
||||
glCullFace(cull.mode);
|
||||
}
|
||||
dummy_texture.Create(info);
|
||||
//dummy_texture.TransitionLayout(vk::ImageLayout::eShaderReadOnlyOptimal);
|
||||
|
||||
if (cull.front_face != cur_state.cull.front_face) {
|
||||
glFrontFace(cull.front_face);
|
||||
}
|
||||
|
||||
// Depth test
|
||||
if (depth.test_enabled != cur_state.depth.test_enabled) {
|
||||
if (depth.test_enabled) {
|
||||
glEnable(GL_DEPTH_TEST);
|
||||
} else {
|
||||
glDisable(GL_DEPTH_TEST);
|
||||
}
|
||||
}
|
||||
|
||||
if (depth.test_func != cur_state.depth.test_func) {
|
||||
glDepthFunc(depth.test_func);
|
||||
}
|
||||
|
||||
// Depth mask
|
||||
if (depth.write_mask != cur_state.depth.write_mask) {
|
||||
glDepthMask(depth.write_mask);
|
||||
}
|
||||
|
||||
// Color mask
|
||||
if (color_mask.red_enabled != cur_state.color_mask.red_enabled ||
|
||||
color_mask.green_enabled != cur_state.color_mask.green_enabled ||
|
||||
color_mask.blue_enabled != cur_state.color_mask.blue_enabled ||
|
||||
color_mask.alpha_enabled != cur_state.color_mask.alpha_enabled) {
|
||||
glColorMask(color_mask.red_enabled, color_mask.green_enabled, color_mask.blue_enabled,
|
||||
color_mask.alpha_enabled);
|
||||
}
|
||||
|
||||
// Stencil test
|
||||
if (stencil.test_enabled != cur_state.stencil.test_enabled) {
|
||||
if (stencil.test_enabled) {
|
||||
glEnable(GL_STENCIL_TEST);
|
||||
} else {
|
||||
glDisable(GL_STENCIL_TEST);
|
||||
}
|
||||
}
|
||||
|
||||
if (stencil.test_func != cur_state.stencil.test_func ||
|
||||
stencil.test_ref != cur_state.stencil.test_ref ||
|
||||
stencil.test_mask != cur_state.stencil.test_mask) {
|
||||
glStencilFunc(stencil.test_func, stencil.test_ref, stencil.test_mask);
|
||||
}
|
||||
|
||||
if (stencil.action_depth_fail != cur_state.stencil.action_depth_fail ||
|
||||
stencil.action_depth_pass != cur_state.stencil.action_depth_pass ||
|
||||
stencil.action_stencil_fail != cur_state.stencil.action_stencil_fail) {
|
||||
glStencilOp(stencil.action_stencil_fail, stencil.action_depth_fail,
|
||||
stencil.action_depth_pass);
|
||||
}
|
||||
|
||||
// Stencil mask
|
||||
if (stencil.write_mask != cur_state.stencil.write_mask) {
|
||||
glStencilMask(stencil.write_mask);
|
||||
}
|
||||
|
||||
// Blending
|
||||
if (blend.enabled != cur_state.blend.enabled) {
|
||||
if (blend.enabled) {
|
||||
glEnable(GL_BLEND);
|
||||
} else {
|
||||
glDisable(GL_BLEND);
|
||||
}
|
||||
|
||||
// GLES does not support glLogicOp
|
||||
if (!GLES) {
|
||||
if (blend.enabled) {
|
||||
glDisable(GL_COLOR_LOGIC_OP);
|
||||
} else {
|
||||
glEnable(GL_COLOR_LOGIC_OP);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (blend.color.red != cur_state.blend.color.red ||
|
||||
blend.color.green != cur_state.blend.color.green ||
|
||||
blend.color.blue != cur_state.blend.color.blue ||
|
||||
blend.color.alpha != cur_state.blend.color.alpha) {
|
||||
glBlendColor(blend.color.red, blend.color.green, blend.color.blue, blend.color.alpha);
|
||||
}
|
||||
|
||||
if (blend.src_rgb_func != cur_state.blend.src_rgb_func ||
|
||||
blend.dst_rgb_func != cur_state.blend.dst_rgb_func ||
|
||||
blend.src_a_func != cur_state.blend.src_a_func ||
|
||||
blend.dst_a_func != cur_state.blend.dst_a_func) {
|
||||
glBlendFuncSeparate(blend.src_rgb_func, blend.dst_rgb_func, blend.src_a_func,
|
||||
blend.dst_a_func);
|
||||
}
|
||||
|
||||
if (blend.rgb_equation != cur_state.blend.rgb_equation ||
|
||||
blend.a_equation != cur_state.blend.a_equation) {
|
||||
glBlendEquationSeparate(blend.rgb_equation, blend.a_equation);
|
||||
}
|
||||
|
||||
// GLES does not support glLogicOp
|
||||
if (!GLES) {
|
||||
if (logic_op != cur_state.logic_op) {
|
||||
glLogicOp(logic_op);
|
||||
}
|
||||
}
|
||||
|
||||
// Textures
|
||||
for (u32 i = 0; i < texture_units.size(); ++i) {
|
||||
if (texture_units[i].texture_2d != cur_state.texture_units[i].texture_2d) {
|
||||
glActiveTexture(TextureUnits::PicaTexture(i).Enum());
|
||||
glBindTexture(GL_TEXTURE_2D, texture_units[i].texture_2d);
|
||||
}
|
||||
if (texture_units[i].sampler != cur_state.texture_units[i].sampler) {
|
||||
glBindSampler(i, texture_units[i].sampler);
|
||||
}
|
||||
}
|
||||
|
||||
if (texture_cube_unit.texture_cube != cur_state.texture_cube_unit.texture_cube) {
|
||||
glActiveTexture(TextureUnits::TextureCube.Enum());
|
||||
glBindTexture(GL_TEXTURE_CUBE_MAP, texture_cube_unit.texture_cube);
|
||||
}
|
||||
if (texture_cube_unit.sampler != cur_state.texture_cube_unit.sampler) {
|
||||
glBindSampler(TextureUnits::TextureCube.id, texture_cube_unit.sampler);
|
||||
}
|
||||
|
||||
// Texture buffer LUTs
|
||||
if (texture_buffer_lut_lf.texture_buffer != cur_state.texture_buffer_lut_lf.texture_buffer) {
|
||||
glActiveTexture(TextureUnits::TextureBufferLUT_LF.Enum());
|
||||
glBindTexture(GL_TEXTURE_BUFFER, texture_buffer_lut_lf.texture_buffer);
|
||||
}
|
||||
|
||||
// Texture buffer LUTs
|
||||
if (texture_buffer_lut_rg.texture_buffer != cur_state.texture_buffer_lut_rg.texture_buffer) {
|
||||
glActiveTexture(TextureUnits::TextureBufferLUT_RG.Enum());
|
||||
glBindTexture(GL_TEXTURE_BUFFER, texture_buffer_lut_rg.texture_buffer);
|
||||
}
|
||||
|
||||
// Texture buffer LUTs
|
||||
if (texture_buffer_lut_rgba.texture_buffer !=
|
||||
cur_state.texture_buffer_lut_rgba.texture_buffer) {
|
||||
glActiveTexture(TextureUnits::TextureBufferLUT_RGBA.Enum());
|
||||
glBindTexture(GL_TEXTURE_BUFFER, texture_buffer_lut_rgba.texture_buffer);
|
||||
}
|
||||
|
||||
// Shadow Images
|
||||
if (image_shadow_buffer != cur_state.image_shadow_buffer) {
|
||||
glBindImageTexture(ImageUnits::ShadowBuffer, image_shadow_buffer, 0, GL_FALSE, 0,
|
||||
GL_READ_WRITE, GL_R32UI);
|
||||
}
|
||||
|
||||
if (image_shadow_texture_px != cur_state.image_shadow_texture_px) {
|
||||
glBindImageTexture(ImageUnits::ShadowTexturePX, image_shadow_texture_px, 0, GL_FALSE, 0,
|
||||
GL_READ_ONLY, GL_R32UI);
|
||||
}
|
||||
|
||||
if (image_shadow_texture_nx != cur_state.image_shadow_texture_nx) {
|
||||
glBindImageTexture(ImageUnits::ShadowTextureNX, image_shadow_texture_nx, 0, GL_FALSE, 0,
|
||||
GL_READ_ONLY, GL_R32UI);
|
||||
}
|
||||
|
||||
if (image_shadow_texture_py != cur_state.image_shadow_texture_py) {
|
||||
glBindImageTexture(ImageUnits::ShadowTexturePY, image_shadow_texture_py, 0, GL_FALSE, 0,
|
||||
GL_READ_ONLY, GL_R32UI);
|
||||
}
|
||||
|
||||
if (image_shadow_texture_ny != cur_state.image_shadow_texture_ny) {
|
||||
glBindImageTexture(ImageUnits::ShadowTextureNY, image_shadow_texture_ny, 0, GL_FALSE, 0,
|
||||
GL_READ_ONLY, GL_R32UI);
|
||||
}
|
||||
|
||||
if (image_shadow_texture_pz != cur_state.image_shadow_texture_pz) {
|
||||
glBindImageTexture(ImageUnits::ShadowTexturePZ, image_shadow_texture_pz, 0, GL_FALSE, 0,
|
||||
GL_READ_ONLY, GL_R32UI);
|
||||
}
|
||||
|
||||
if (image_shadow_texture_nz != cur_state.image_shadow_texture_nz) {
|
||||
glBindImageTexture(ImageUnits::ShadowTextureNZ, image_shadow_texture_nz, 0, GL_FALSE, 0,
|
||||
GL_READ_ONLY, GL_R32UI);
|
||||
}
|
||||
|
||||
// Framebuffer
|
||||
if (draw.read_framebuffer != cur_state.draw.read_framebuffer) {
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, draw.read_framebuffer);
|
||||
}
|
||||
if (draw.draw_framebuffer != cur_state.draw.draw_framebuffer) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, draw.draw_framebuffer);
|
||||
}
|
||||
|
||||
// Vertex array
|
||||
if (draw.vertex_array != cur_state.draw.vertex_array) {
|
||||
glBindVertexArray(draw.vertex_array);
|
||||
}
|
||||
|
||||
// Vertex buffer
|
||||
if (draw.vertex_buffer != cur_state.draw.vertex_buffer) {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, draw.vertex_buffer);
|
||||
}
|
||||
|
||||
// Uniform buffer
|
||||
if (draw.uniform_buffer != cur_state.draw.uniform_buffer) {
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, draw.uniform_buffer);
|
||||
}
|
||||
|
||||
// Shader program
|
||||
if (draw.shader_program != cur_state.draw.shader_program) {
|
||||
glUseProgram(draw.shader_program);
|
||||
}
|
||||
|
||||
// Program pipeline
|
||||
if (draw.program_pipeline != cur_state.draw.program_pipeline) {
|
||||
glBindProgramPipeline(draw.program_pipeline);
|
||||
}
|
||||
|
||||
// Scissor test
|
||||
if (scissor.enabled != cur_state.scissor.enabled) {
|
||||
if (scissor.enabled) {
|
||||
glEnable(GL_SCISSOR_TEST);
|
||||
} else {
|
||||
glDisable(GL_SCISSOR_TEST);
|
||||
}
|
||||
}
|
||||
|
||||
if (scissor.x != cur_state.scissor.x || scissor.y != cur_state.scissor.y ||
|
||||
scissor.width != cur_state.scissor.width || scissor.height != cur_state.scissor.height) {
|
||||
glScissor(scissor.x, scissor.y, scissor.width, scissor.height);
|
||||
}
|
||||
|
||||
if (viewport.x != cur_state.viewport.x || viewport.y != cur_state.viewport.y ||
|
||||
viewport.width != cur_state.viewport.width ||
|
||||
viewport.height != cur_state.viewport.height) {
|
||||
glViewport(viewport.x, viewport.y, viewport.width, viewport.height);
|
||||
}
|
||||
|
||||
// Clip distance
|
||||
if (!GLES || GLAD_GL_EXT_clip_cull_distance) {
|
||||
for (size_t i = 0; i < clip_distance.size(); ++i) {
|
||||
if (clip_distance[i] != cur_state.clip_distance[i]) {
|
||||
if (clip_distance[i]) {
|
||||
glEnable(GL_CLIP_DISTANCE0 + static_cast<GLenum>(i));
|
||||
} else {
|
||||
glDisable(GL_CLIP_DISTANCE0 + static_cast<GLenum>(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (renderbuffer != cur_state.renderbuffer) {
|
||||
glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer);
|
||||
}
|
||||
|
||||
cur_state = *this;
|
||||
dirty_flags |= DirtyState::All;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetTexture(GLuint handle) {
|
||||
for (auto& unit : texture_units) {
|
||||
if (unit.texture_2d == handle) {
|
||||
unit.texture_2d = 0;
|
||||
}
|
||||
void VulkanState::SetVertexBuffer(VKBuffer* buffer, vk::DeviceSize offset)
|
||||
{
|
||||
if (vertex_buffer == buffer) {
|
||||
return;
|
||||
}
|
||||
if (texture_cube_unit.texture_cube == handle)
|
||||
texture_cube_unit.texture_cube = 0;
|
||||
if (texture_buffer_lut_lf.texture_buffer == handle)
|
||||
texture_buffer_lut_lf.texture_buffer = 0;
|
||||
if (texture_buffer_lut_rg.texture_buffer == handle)
|
||||
texture_buffer_lut_rg.texture_buffer = 0;
|
||||
if (texture_buffer_lut_rgba.texture_buffer == handle)
|
||||
texture_buffer_lut_rgba.texture_buffer = 0;
|
||||
if (image_shadow_buffer == handle)
|
||||
image_shadow_buffer = 0;
|
||||
if (image_shadow_texture_px == handle)
|
||||
image_shadow_texture_px = 0;
|
||||
if (image_shadow_texture_nx == handle)
|
||||
image_shadow_texture_nx = 0;
|
||||
if (image_shadow_texture_py == handle)
|
||||
image_shadow_texture_py = 0;
|
||||
if (image_shadow_texture_ny == handle)
|
||||
image_shadow_texture_ny = 0;
|
||||
if (image_shadow_texture_pz == handle)
|
||||
image_shadow_texture_pz = 0;
|
||||
if (image_shadow_texture_nz == handle)
|
||||
image_shadow_texture_nz = 0;
|
||||
return *this;
|
||||
|
||||
vertex_buffer = buffer;
|
||||
vertex_buffer_offset = offset;
|
||||
dirty_flags |= DirtyState::VertexBuffer;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetSampler(GLuint handle) {
|
||||
for (auto& unit : texture_units) {
|
||||
if (unit.sampler == handle) {
|
||||
unit.sampler = 0;
|
||||
}
|
||||
}
|
||||
if (texture_cube_unit.sampler == handle) {
|
||||
texture_cube_unit.sampler = 0;
|
||||
}
|
||||
return *this;
|
||||
void VulkanState::SetFramebuffer(VKFramebuffer* buffer)
|
||||
{
|
||||
// Should not be changed within a render pass.
|
||||
//ASSERT(!InRenderPass());
|
||||
//framebuffer = buffer;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetProgram(GLuint handle) {
|
||||
if (draw.shader_program == handle) {
|
||||
draw.shader_program = 0;
|
||||
}
|
||||
return *this;
|
||||
void VulkanState::SetPipeline(const VKPipeline* new_pipeline)
|
||||
{
|
||||
if (new_pipeline == pipeline)
|
||||
return;
|
||||
|
||||
pipeline = new_pipeline;
|
||||
dirty_flags |= DirtyState::Pipeline;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetPipeline(GLuint handle) {
|
||||
if (draw.program_pipeline == handle) {
|
||||
draw.program_pipeline = 0;
|
||||
void VulkanState::SetUniformBuffer(UniformID id, VKBuffer* buffer, u32 offset, u32 size)
|
||||
{
|
||||
auto& binding = bindings.ubo[static_cast<u32>(id)];
|
||||
if (binding.buffer != buffer->GetBuffer() || binding.range != size)
|
||||
{
|
||||
binding.buffer = buffer->GetBuffer();
|
||||
binding.range = size;
|
||||
dirty_flags |= DirtyState::Uniform;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetBuffer(GLuint handle) {
|
||||
if (draw.vertex_buffer == handle) {
|
||||
draw.vertex_buffer = 0;
|
||||
void VulkanState::SetTexture(TextureID id, VKTexture* texture)
|
||||
{
|
||||
u32 index = static_cast<u32>(id);
|
||||
if (bindings.texture[index].imageView == texture->GetView()) {
|
||||
return;
|
||||
}
|
||||
if (draw.uniform_buffer == handle) {
|
||||
draw.uniform_buffer = 0;
|
||||
}
|
||||
return *this;
|
||||
|
||||
bindings.texture[index].imageView = texture->GetView();
|
||||
bindings.texture[index].imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
|
||||
dirty_flags |= DirtyState::Texture;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetVertexArray(GLuint handle) {
|
||||
if (draw.vertex_array == handle) {
|
||||
draw.vertex_array = 0;
|
||||
void VulkanState::SetTexelBuffer(TexelBufferID id, VKBuffer* buffer)
|
||||
{
|
||||
u32 index = static_cast<u32>(id);
|
||||
if (bindings.lut[index].buffer == buffer->GetBuffer()) {
|
||||
return;
|
||||
}
|
||||
return *this;
|
||||
|
||||
bindings.lut[index].buffer = buffer->GetBuffer();
|
||||
dirty_flags |= DirtyState::TexelBuffer;
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetFramebuffer(GLuint handle) {
|
||||
if (draw.read_framebuffer == handle) {
|
||||
draw.read_framebuffer = 0;
|
||||
}
|
||||
if (draw.draw_framebuffer == handle) {
|
||||
draw.draw_framebuffer = 0;
|
||||
}
|
||||
return *this;
|
||||
void VulkanState::SetImageTexture(VKTexture* image)
|
||||
{
|
||||
// TODO
|
||||
}
|
||||
|
||||
OpenGLState& OpenGLState::ResetRenderbuffer(GLuint handle) {
|
||||
if (renderbuffer == handle) {
|
||||
renderbuffer = 0;
|
||||
}
|
||||
return *this;
|
||||
void VulkanState::BeginRenderPass()
|
||||
{
|
||||
if (InRenderPass())
|
||||
return;
|
||||
|
||||
m_current_render_pass = m_framebuffer->GetLoadRenderPass();
|
||||
m_framebuffer_render_area = m_framebuffer->GetRect();
|
||||
|
||||
VkRenderPassBeginInfo begin_info = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||
nullptr,
|
||||
m_current_render_pass,
|
||||
m_framebuffer->GetFB(),
|
||||
m_framebuffer_render_area,
|
||||
0,
|
||||
nullptr};
|
||||
|
||||
vkCmdBeginRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer(), &begin_info,
|
||||
VK_SUBPASS_CONTENTS_INLINE);
|
||||
}
|
||||
|
||||
} // namespace OpenGL
|
||||
void StateTracker::BeginDiscardRenderPass()
|
||||
{
|
||||
if (InRenderPass())
|
||||
return;
|
||||
|
||||
m_current_render_pass = m_framebuffer->GetDiscardRenderPass();
|
||||
m_framebuffer_render_area = m_framebuffer->GetRect();
|
||||
|
||||
VkRenderPassBeginInfo begin_info = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||
nullptr,
|
||||
m_current_render_pass,
|
||||
m_framebuffer->GetFB(),
|
||||
m_framebuffer_render_area,
|
||||
0,
|
||||
nullptr};
|
||||
|
||||
vkCmdBeginRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer(), &begin_info,
|
||||
VK_SUBPASS_CONTENTS_INLINE);
|
||||
}
|
||||
|
||||
void StateTracker::EndRenderPass()
|
||||
{
|
||||
if (!InRenderPass())
|
||||
return;
|
||||
|
||||
vkCmdEndRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer());
|
||||
m_current_render_pass = VK_NULL_HANDLE;
|
||||
}
|
||||
|
||||
void StateTracker::BeginClearRenderPass(const VkRect2D& area, const VkClearValue* clear_values,
|
||||
u32 num_clear_values)
|
||||
{
|
||||
ASSERT(!InRenderPass());
|
||||
|
||||
m_current_render_pass = m_framebuffer->GetClearRenderPass();
|
||||
m_framebuffer_render_area = area;
|
||||
|
||||
VkRenderPassBeginInfo begin_info = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||
nullptr,
|
||||
m_current_render_pass,
|
||||
m_framebuffer->GetFB(),
|
||||
m_framebuffer_render_area,
|
||||
num_clear_values,
|
||||
clear_values};
|
||||
|
||||
vkCmdBeginRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer(), &begin_info,
|
||||
VK_SUBPASS_CONTENTS_INLINE);
|
||||
}
|
||||
|
||||
void StateTracker::SetViewport(const VkViewport& viewport)
|
||||
{
|
||||
if (memcmp(&m_viewport, &viewport, sizeof(viewport)) == 0)
|
||||
return;
|
||||
|
||||
m_viewport = viewport;
|
||||
m_dirty_flags |= DIRTY_FLAG_VIEWPORT;
|
||||
}
|
||||
|
||||
void StateTracker::SetScissor(const VkRect2D& scissor)
|
||||
{
|
||||
if (memcmp(&m_scissor, &scissor, sizeof(scissor)) == 0)
|
||||
return;
|
||||
|
||||
m_scissor = scissor;
|
||||
m_dirty_flags |= DIRTY_FLAG_SCISSOR;
|
||||
}
|
||||
|
||||
bool StateTracker::Bind()
|
||||
{
|
||||
// Must have a pipeline.
|
||||
if (!m_pipeline)
|
||||
return false;
|
||||
|
||||
// Check the render area if we were in a clear pass.
|
||||
if (m_current_render_pass == m_framebuffer->GetClearRenderPass() && !IsViewportWithinRenderArea())
|
||||
EndRenderPass();
|
||||
|
||||
// Get a new descriptor set if any parts have changed
|
||||
if (!UpdateDescriptorSet())
|
||||
{
|
||||
// We can fail to allocate descriptors if we exhaust the pool for this command buffer.
|
||||
WARN_LOG_FMT(VIDEO, "Failed to get a descriptor set, executing buffer");
|
||||
Renderer::GetInstance()->ExecuteCommandBuffer(false, false);
|
||||
if (!UpdateDescriptorSet())
|
||||
{
|
||||
// Something strange going on.
|
||||
ERROR_LOG_FMT(VIDEO, "Failed to get descriptor set, skipping draw");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Start render pass if not already started
|
||||
if (!InRenderPass())
|
||||
BeginRenderPass();
|
||||
|
||||
// Re-bind parts of the pipeline
|
||||
const VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
|
||||
if (m_dirty_flags & DIRTY_FLAG_VERTEX_BUFFER)
|
||||
vkCmdBindVertexBuffers(command_buffer, 0, 1, &m_vertex_buffer, &m_vertex_buffer_offset);
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_INDEX_BUFFER)
|
||||
vkCmdBindIndexBuffer(command_buffer, m_index_buffer, m_index_buffer_offset, m_index_type);
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_PIPELINE)
|
||||
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline->GetVkPipeline());
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_VIEWPORT)
|
||||
vkCmdSetViewport(command_buffer, 0, 1, &m_viewport);
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_SCISSOR)
|
||||
vkCmdSetScissor(command_buffer, 0, 1, &m_scissor);
|
||||
|
||||
m_dirty_flags &= ~(DIRTY_FLAG_VERTEX_BUFFER | DIRTY_FLAG_INDEX_BUFFER | DIRTY_FLAG_PIPELINE |
|
||||
DIRTY_FLAG_VIEWPORT | DIRTY_FLAG_SCISSOR);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StateTracker::BindCompute()
|
||||
{
|
||||
if (!m_compute_shader)
|
||||
return false;
|
||||
|
||||
// Can't kick compute in a render pass.
|
||||
if (InRenderPass())
|
||||
EndRenderPass();
|
||||
|
||||
const VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
|
||||
if (m_dirty_flags & DIRTY_FLAG_COMPUTE_SHADER)
|
||||
{
|
||||
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE,
|
||||
m_compute_shader->GetComputePipeline());
|
||||
}
|
||||
|
||||
if (!UpdateComputeDescriptorSet())
|
||||
{
|
||||
WARN_LOG_FMT(VIDEO, "Failed to get a compute descriptor set, executing buffer");
|
||||
Renderer::GetInstance()->ExecuteCommandBuffer(false, false);
|
||||
if (!UpdateComputeDescriptorSet())
|
||||
{
|
||||
// Something strange going on.
|
||||
ERROR_LOG_FMT(VIDEO, "Failed to get descriptor set, skipping dispatch");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
m_dirty_flags &= ~DIRTY_FLAG_COMPUTE_SHADER;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StateTracker::IsWithinRenderArea(s32 x, s32 y, u32 width, u32 height) const
|
||||
{
|
||||
// Check that the viewport does not lie outside the render area.
|
||||
// If it does, we need to switch to a normal load/store render pass.
|
||||
s32 left = m_framebuffer_render_area.offset.x;
|
||||
s32 top = m_framebuffer_render_area.offset.y;
|
||||
s32 right = left + static_cast<s32>(m_framebuffer_render_area.extent.width);
|
||||
s32 bottom = top + static_cast<s32>(m_framebuffer_render_area.extent.height);
|
||||
s32 test_left = x;
|
||||
s32 test_top = y;
|
||||
s32 test_right = test_left + static_cast<s32>(width);
|
||||
s32 test_bottom = test_top + static_cast<s32>(height);
|
||||
return test_left >= left && test_right <= right && test_top >= top && test_bottom <= bottom;
|
||||
}
|
||||
|
||||
bool StateTracker::IsViewportWithinRenderArea() const
|
||||
{
|
||||
return IsWithinRenderArea(static_cast<s32>(m_viewport.x), static_cast<s32>(m_viewport.y),
|
||||
static_cast<u32>(m_viewport.width),
|
||||
static_cast<u32>(m_viewport.height));
|
||||
}
|
||||
|
||||
void StateTracker::EndClearRenderPass()
|
||||
{
|
||||
if (m_current_render_pass != m_framebuffer->GetClearRenderPass())
|
||||
return;
|
||||
|
||||
// End clear render pass. Bind() will call BeginRenderPass() which
|
||||
// will switch to the load/store render pass.
|
||||
EndRenderPass();
|
||||
}
|
||||
|
||||
bool StateTracker::UpdateDescriptorSet()
|
||||
{
|
||||
if (m_pipeline->GetUsage() == AbstractPipelineUsage::GX)
|
||||
return UpdateGXDescriptorSet();
|
||||
else
|
||||
return UpdateUtilityDescriptorSet();
|
||||
}
|
||||
|
||||
bool StateTracker::UpdateGXDescriptorSet()
|
||||
{
|
||||
const size_t MAX_DESCRIPTOR_WRITES = NUM_UBO_DESCRIPTOR_SET_BINDINGS + // UBO
|
||||
1 + // Samplers
|
||||
1; // SSBO
|
||||
std::array<VkWriteDescriptorSet, MAX_DESCRIPTOR_WRITES> writes;
|
||||
u32 num_writes = 0;
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_GX_UBOS || m_gx_descriptor_sets[0] == VK_NULL_HANDLE)
|
||||
{
|
||||
m_gx_descriptor_sets[0] = g_command_buffer_mgr->AllocateDescriptorSet(
|
||||
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_LAYOUT_STANDARD_UNIFORM_BUFFERS));
|
||||
if (m_gx_descriptor_sets[0] == VK_NULL_HANDLE)
|
||||
return false;
|
||||
|
||||
for (size_t i = 0; i < NUM_UBO_DESCRIPTOR_SET_BINDINGS; i++)
|
||||
{
|
||||
if (i == UBO_DESCRIPTOR_SET_BINDING_GS &&
|
||||
!g_ActiveConfig.backend_info.bSupportsGeometryShaders)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
writes[num_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_gx_descriptor_sets[0],
|
||||
static_cast<uint32_t>(i),
|
||||
0,
|
||||
1,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
|
||||
nullptr,
|
||||
&m_bindings.gx_ubo_bindings[i],
|
||||
nullptr};
|
||||
}
|
||||
|
||||
m_dirty_flags = (m_dirty_flags & ~DIRTY_FLAG_GX_UBOS) | DIRTY_FLAG_DESCRIPTOR_SETS;
|
||||
}
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_GX_SAMPLERS || m_gx_descriptor_sets[1] == VK_NULL_HANDLE)
|
||||
{
|
||||
m_gx_descriptor_sets[1] = g_command_buffer_mgr->AllocateDescriptorSet(
|
||||
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_LAYOUT_STANDARD_SAMPLERS));
|
||||
if (m_gx_descriptor_sets[1] == VK_NULL_HANDLE)
|
||||
return false;
|
||||
|
||||
writes[num_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_gx_descriptor_sets[1],
|
||||
0,
|
||||
0,
|
||||
static_cast<u32>(NUM_PIXEL_SHADER_SAMPLERS),
|
||||
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
||||
m_bindings.samplers.data(),
|
||||
nullptr,
|
||||
nullptr};
|
||||
m_dirty_flags = (m_dirty_flags & ~DIRTY_FLAG_GX_SAMPLERS) | DIRTY_FLAG_DESCRIPTOR_SETS;
|
||||
}
|
||||
|
||||
if (g_ActiveConfig.backend_info.bSupportsBBox &&
|
||||
(m_dirty_flags & DIRTY_FLAG_GX_SSBO || m_gx_descriptor_sets[2] == VK_NULL_HANDLE))
|
||||
{
|
||||
m_gx_descriptor_sets[2] =
|
||||
g_command_buffer_mgr->AllocateDescriptorSet(g_object_cache->GetDescriptorSetLayout(
|
||||
DESCRIPTOR_SET_LAYOUT_STANDARD_SHADER_STORAGE_BUFFERS));
|
||||
if (m_gx_descriptor_sets[2] == VK_NULL_HANDLE)
|
||||
return false;
|
||||
|
||||
writes[num_writes++] = {
|
||||
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, m_gx_descriptor_sets[2], 0, 0, 1,
|
||||
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, nullptr, &m_bindings.ssbo, nullptr};
|
||||
m_dirty_flags = (m_dirty_flags & ~DIRTY_FLAG_GX_SSBO) | DIRTY_FLAG_DESCRIPTOR_SETS;
|
||||
}
|
||||
|
||||
if (num_writes > 0)
|
||||
vkUpdateDescriptorSets(g_vulkan_context->GetDevice(), num_writes, writes.data(), 0, nullptr);
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_DESCRIPTOR_SETS)
|
||||
{
|
||||
vkCmdBindDescriptorSets(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline->GetVkPipelineLayout(), 0,
|
||||
g_ActiveConfig.backend_info.bSupportsBBox ?
|
||||
NUM_GX_DESCRIPTOR_SETS :
|
||||
(NUM_GX_DESCRIPTOR_SETS - 1),
|
||||
m_gx_descriptor_sets.data(),
|
||||
g_ActiveConfig.backend_info.bSupportsGeometryShaders ?
|
||||
NUM_UBO_DESCRIPTOR_SET_BINDINGS :
|
||||
(NUM_UBO_DESCRIPTOR_SET_BINDINGS - 1),
|
||||
m_bindings.gx_ubo_offsets.data());
|
||||
m_dirty_flags &= ~(DIRTY_FLAG_DESCRIPTOR_SETS | DIRTY_FLAG_GX_UBO_OFFSETS);
|
||||
}
|
||||
else if (m_dirty_flags & DIRTY_FLAG_GX_UBO_OFFSETS)
|
||||
{
|
||||
vkCmdBindDescriptorSets(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline->GetVkPipelineLayout(), 0,
|
||||
1, m_gx_descriptor_sets.data(),
|
||||
g_ActiveConfig.backend_info.bSupportsGeometryShaders ?
|
||||
NUM_UBO_DESCRIPTOR_SET_BINDINGS :
|
||||
(NUM_UBO_DESCRIPTOR_SET_BINDINGS - 1),
|
||||
m_bindings.gx_ubo_offsets.data());
|
||||
m_dirty_flags &= ~DIRTY_FLAG_GX_UBO_OFFSETS;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StateTracker::UpdateUtilityDescriptorSet()
|
||||
{
|
||||
// Max number of updates - UBO, Samplers, TexelBuffer
|
||||
std::array<VkWriteDescriptorSet, 3> dswrites;
|
||||
u32 writes = 0;
|
||||
|
||||
// Allocate descriptor sets.
|
||||
if (m_dirty_flags & DIRTY_FLAG_UTILITY_UBO || m_utility_descriptor_sets[0] == VK_NULL_HANDLE)
|
||||
{
|
||||
m_utility_descriptor_sets[0] = g_command_buffer_mgr->AllocateDescriptorSet(
|
||||
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_LAYOUT_UTILITY_UNIFORM_BUFFER));
|
||||
if (!m_utility_descriptor_sets[0])
|
||||
return false;
|
||||
|
||||
dswrites[writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_utility_descriptor_sets[0],
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
|
||||
nullptr,
|
||||
&m_bindings.utility_ubo_binding,
|
||||
nullptr};
|
||||
|
||||
m_dirty_flags = (m_dirty_flags & ~DIRTY_FLAG_UTILITY_UBO) | DIRTY_FLAG_DESCRIPTOR_SETS;
|
||||
}
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_UTILITY_BINDINGS || m_utility_descriptor_sets[1] == VK_NULL_HANDLE)
|
||||
{
|
||||
m_utility_descriptor_sets[1] = g_command_buffer_mgr->AllocateDescriptorSet(
|
||||
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_LAYOUT_UTILITY_SAMPLERS));
|
||||
if (!m_utility_descriptor_sets[1])
|
||||
return false;
|
||||
|
||||
dswrites[writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_utility_descriptor_sets[1],
|
||||
0,
|
||||
0,
|
||||
NUM_PIXEL_SHADER_SAMPLERS,
|
||||
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
||||
m_bindings.samplers.data(),
|
||||
nullptr,
|
||||
nullptr};
|
||||
dswrites[writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_utility_descriptor_sets[1],
|
||||
8,
|
||||
0,
|
||||
1,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
|
||||
nullptr,
|
||||
nullptr,
|
||||
m_bindings.texel_buffers.data()};
|
||||
|
||||
m_dirty_flags = (m_dirty_flags & ~DIRTY_FLAG_UTILITY_BINDINGS) | DIRTY_FLAG_DESCRIPTOR_SETS;
|
||||
}
|
||||
|
||||
if (writes > 0)
|
||||
vkUpdateDescriptorSets(g_vulkan_context->GetDevice(), writes, dswrites.data(), 0, nullptr);
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_DESCRIPTOR_SETS)
|
||||
{
|
||||
vkCmdBindDescriptorSets(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline->GetVkPipelineLayout(), 0,
|
||||
NUM_UTILITY_DESCRIPTOR_SETS, m_utility_descriptor_sets.data(), 1,
|
||||
&m_bindings.utility_ubo_offset);
|
||||
m_dirty_flags &= ~(DIRTY_FLAG_DESCRIPTOR_SETS | DIRTY_FLAG_UTILITY_UBO_OFFSET);
|
||||
}
|
||||
else if (m_dirty_flags & DIRTY_FLAG_UTILITY_UBO_OFFSET)
|
||||
{
|
||||
vkCmdBindDescriptorSets(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline->GetVkPipelineLayout(), 0,
|
||||
1, m_utility_descriptor_sets.data(), 1, &m_bindings.utility_ubo_offset);
|
||||
m_dirty_flags &= ~(DIRTY_FLAG_DESCRIPTOR_SETS | DIRTY_FLAG_UTILITY_UBO_OFFSET);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StateTracker::UpdateComputeDescriptorSet()
|
||||
{
|
||||
// Max number of updates - UBO, Samplers, TexelBuffer, Image
|
||||
std::array<VkWriteDescriptorSet, 4> dswrites;
|
||||
|
||||
// Allocate descriptor sets.
|
||||
if (m_dirty_flags & DIRTY_FLAG_COMPUTE_BINDINGS)
|
||||
{
|
||||
m_compute_descriptor_set = g_command_buffer_mgr->AllocateDescriptorSet(
|
||||
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_LAYOUT_COMPUTE));
|
||||
dswrites[0] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_compute_descriptor_set,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
|
||||
nullptr,
|
||||
&m_bindings.utility_ubo_binding,
|
||||
nullptr};
|
||||
dswrites[1] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_compute_descriptor_set,
|
||||
1,
|
||||
0,
|
||||
NUM_COMPUTE_SHADER_SAMPLERS,
|
||||
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
||||
m_bindings.samplers.data(),
|
||||
nullptr,
|
||||
nullptr};
|
||||
dswrites[2] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_compute_descriptor_set,
|
||||
3,
|
||||
0,
|
||||
NUM_COMPUTE_TEXEL_BUFFERS,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
|
||||
nullptr,
|
||||
nullptr,
|
||||
m_bindings.texel_buffers.data()};
|
||||
dswrites[3] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
nullptr,
|
||||
m_compute_descriptor_set,
|
||||
5,
|
||||
0,
|
||||
1,
|
||||
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
|
||||
&m_bindings.image_texture,
|
||||
nullptr,
|
||||
nullptr};
|
||||
|
||||
vkUpdateDescriptorSets(g_vulkan_context->GetDevice(), static_cast<uint32_t>(dswrites.size()),
|
||||
dswrites.data(), 0, nullptr);
|
||||
m_dirty_flags =
|
||||
(m_dirty_flags & ~DIRTY_FLAG_COMPUTE_BINDINGS) | DIRTY_FLAG_COMPUTE_DESCRIPTOR_SET;
|
||||
}
|
||||
|
||||
if (m_dirty_flags & DIRTY_FLAG_COMPUTE_DESCRIPTOR_SET)
|
||||
{
|
||||
vkCmdBindDescriptorSets(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_PIPELINE_BIND_POINT_COMPUTE,
|
||||
g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_COMPUTE), 0, 1,
|
||||
&m_compute_descriptor_set, 1, &m_bindings.utility_ubo_offset);
|
||||
m_dirty_flags &= ~DIRTY_FLAG_COMPUTE_DESCRIPTOR_SET;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -5,145 +5,119 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vulkan/vulkan.hpp>
|
||||
#include "video_core/renderer_vulkan/vk_texture.h"
|
||||
#include "video_core/renderer_vulkan/vk_pipeline.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
namespace TextureUnits {
|
||||
|
||||
struct TextureUnit {
|
||||
GLint id;
|
||||
constexpr GLenum Enum() const {
|
||||
return static_cast<GLenum>(GL_TEXTURE0 + id);
|
||||
}
|
||||
enum class DirtyState {
|
||||
All,
|
||||
Framebuffer,
|
||||
Pipeline,
|
||||
Texture,
|
||||
Sampler,
|
||||
TexelBuffer,
|
||||
ImageTexture,
|
||||
Depth,
|
||||
Stencil,
|
||||
LogicOp,
|
||||
Viewport,
|
||||
Scissor,
|
||||
CullMode,
|
||||
VertexBuffer,
|
||||
Uniform
|
||||
};
|
||||
|
||||
constexpr TextureUnit PicaTexture(int unit) {
|
||||
return TextureUnit{unit};
|
||||
}
|
||||
enum class UniformID {
|
||||
Pica = 0,
|
||||
Shader = 1
|
||||
};
|
||||
|
||||
constexpr TextureUnit TextureCube{6};
|
||||
constexpr TextureUnit TextureBufferLUT_LF{3};
|
||||
constexpr TextureUnit TextureBufferLUT_RG{4};
|
||||
constexpr TextureUnit TextureBufferLUT_RGBA{5};
|
||||
enum class TextureID {
|
||||
Tex0 = 0,
|
||||
Tex1 = 1,
|
||||
Tex2 = 2,
|
||||
TexCube = 3
|
||||
};
|
||||
|
||||
} // namespace TextureUnits
|
||||
|
||||
namespace ImageUnits {
|
||||
constexpr uint ShadowBuffer = 0;
|
||||
constexpr uint ShadowTexturePX = 1;
|
||||
constexpr uint ShadowTextureNX = 2;
|
||||
constexpr uint ShadowTexturePY = 3;
|
||||
constexpr uint ShadowTextureNY = 4;
|
||||
constexpr uint ShadowTexturePZ = 5;
|
||||
constexpr uint ShadowTextureNZ = 6;
|
||||
} // namespace ImageUnits
|
||||
enum class TexelBufferID {
|
||||
LF = 0,
|
||||
RG = 1,
|
||||
RGBA = 2
|
||||
};
|
||||
|
||||
/// Tracks global Vulkan state
|
||||
class VulkanState {
|
||||
public:
|
||||
struct Messenger {
|
||||
bool cull_state;
|
||||
bool depth_state;
|
||||
bool color_mask;
|
||||
bool stencil_state;
|
||||
bool logic_op;
|
||||
bool texture_state;
|
||||
};
|
||||
VulkanState() = default;
|
||||
~VulkanState() = default;
|
||||
|
||||
struct {
|
||||
bool enabled;
|
||||
vk::CullModeFlags mode;
|
||||
vk::FrontFace front_face;
|
||||
} cull;
|
||||
/// Initialize object to its initial state
|
||||
void Create();
|
||||
|
||||
struct {
|
||||
bool test_enabled;
|
||||
vk::CompareOp test_func;
|
||||
bool write_mask;
|
||||
} depth;
|
||||
/// Configure drawing state
|
||||
void SetVertexBuffer(VKBuffer* buffer, vk::DeviceSize offset);
|
||||
void SetFramebuffer(VKFramebuffer* framebuffer);
|
||||
void SetPipeline(const VKPipeline* pipeline);
|
||||
|
||||
vk::ColorComponentFlags color_mask;
|
||||
/// Configure shader resources
|
||||
void SetUniformBuffer(UniformID id, VKBuffer* buffer, u32 offset, u32 size);
|
||||
void SetTexture(TextureID id, VKTexture* texture);
|
||||
void SetTexelBuffer(TexelBufferID id, VKBuffer* buffer);
|
||||
void SetImageTexture(VKTexture* image);
|
||||
|
||||
struct {
|
||||
bool test_enabled;
|
||||
vk::CompareOp test_func;
|
||||
int test_ref;
|
||||
uint32_t test_mask, write_mask;
|
||||
vk::StencilOp action_stencil_fail;
|
||||
vk::StencilOp action_depth_fail;
|
||||
vk::StencilOp action_depth_pass;
|
||||
} stencil;
|
||||
|
||||
vk::LogicOp logic_op;
|
||||
|
||||
// 3 texture units - one for each that is used in PICA fragment shader emulation
|
||||
struct TextureUnit {
|
||||
uint texture_2d; // GL_TEXTURE_BINDING_2D
|
||||
uint sampler; // GL_SAMPLER_BINDING
|
||||
};
|
||||
std::array<TextureUnit, 3> texture_units;
|
||||
|
||||
struct {
|
||||
uint texture_cube; // GL_TEXTURE_BINDING_CUBE_MAP
|
||||
uint sampler; // GL_SAMPLER_BINDING
|
||||
} texture_cube_unit;
|
||||
|
||||
struct {
|
||||
uint texture_buffer; // GL_TEXTURE_BINDING_BUFFER
|
||||
} texture_buffer_lut_lf;
|
||||
|
||||
struct {
|
||||
uint texture_buffer; // GL_TEXTURE_BINDING_BUFFER
|
||||
} texture_buffer_lut_rg;
|
||||
|
||||
struct {
|
||||
uint texture_buffer; // GL_TEXTURE_BINDING_BUFFER
|
||||
} texture_buffer_lut_rgba;
|
||||
|
||||
// GL_IMAGE_BINDING_NAME
|
||||
uint image_shadow_buffer;
|
||||
uint image_shadow_texture_px;
|
||||
uint image_shadow_texture_nx;
|
||||
uint image_shadow_texture_py;
|
||||
uint image_shadow_texture_ny;
|
||||
uint image_shadow_texture_pz;
|
||||
uint image_shadow_texture_nz;
|
||||
|
||||
struct {
|
||||
uint read_framebuffer; // GL_READ_FRAMEBUFFER_BINDING
|
||||
uint draw_framebuffer; // GL_DRAW_FRAMEBUFFER_BINDING
|
||||
uint vertex_array; // GL_VERTEX_ARRAY_BINDING
|
||||
uint vertex_buffer; // GL_ARRAY_BUFFER_BINDING
|
||||
uint uniform_buffer; // GL_UNIFORM_BUFFER_BINDING
|
||||
uint shader_program; // GL_CURRENT_PROGRAM
|
||||
uint program_pipeline; // GL_PROGRAM_PIPELINE_BINDING
|
||||
} draw;
|
||||
|
||||
struct {
|
||||
bool enabled; // GL_SCISSOR_TEST
|
||||
int x, y;
|
||||
std::size_t width, height;
|
||||
} scissor;
|
||||
|
||||
struct {
|
||||
int x, y;
|
||||
std::size_t width, height;
|
||||
} viewport;
|
||||
|
||||
std::array<bool, 2> clip_distance;
|
||||
|
||||
VulkanState();
|
||||
|
||||
/// Get the currently active OpenGL state
|
||||
static VulkanState GetCurState() {
|
||||
return cur_state;
|
||||
}
|
||||
|
||||
/// Apply all dynamic state to the provided Vulkan command buffer
|
||||
void Apply(vk::CommandBuffer& command_buffer) const;
|
||||
/// Apply all dirty state to the current Vulkan command buffer
|
||||
void Apply();
|
||||
|
||||
private:
|
||||
static VulkanState cur_state;
|
||||
// Stage which should be applied
|
||||
DirtyState dirty_flags;
|
||||
|
||||
// Input assembly
|
||||
VKBuffer* vertex_buffer = nullptr;
|
||||
vk::DeviceSize vertex_buffer_offset = 0;
|
||||
|
||||
// Pipeline state
|
||||
const VKPipeline* pipeline = nullptr;
|
||||
|
||||
// Shader bindings
|
||||
struct
|
||||
{
|
||||
std::array<vk::DescriptorBufferInfo, 2> ubo;
|
||||
std::array<vk::DescriptorImageInfo, 4> texture;
|
||||
std::array<vk::DescriptorBufferInfo, 3> lut;
|
||||
} bindings = {};
|
||||
|
||||
std::array<vk::DescriptorSet, 3> descriptor_sets = {};
|
||||
|
||||
// Rasterization
|
||||
vk::Viewport viewport = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f};
|
||||
vk::CullModeFlags cull_mode = vk::CullModeFlagBits::eNone;
|
||||
vk::Rect2D scissor = {{0, 0}, {1, 1}};
|
||||
VKTexture dummy_texture;
|
||||
|
||||
// Framebuffer
|
||||
VKFramebuffer* framebuffer = nullptr;
|
||||
vk::RenderPass current_render_pass = VK_NULL_HANDLE;
|
||||
vk::Rect2D framebuffer_render_area = {};
|
||||
vk::ColorComponentFlags color_mask;
|
||||
|
||||
// Depth
|
||||
bool depth_enabled;
|
||||
vk::CompareOp test_func;
|
||||
|
||||
// Stencil
|
||||
bool stencil_enabled;
|
||||
vk::StencilFaceFlags face_mask;
|
||||
vk::StencilOp fail_op, pass_op;
|
||||
vk::StencilOp depth_fail_op;
|
||||
vk::CompareOp compare_op;
|
||||
|
||||
vk::LogicOp logic_op;
|
||||
std::array<bool, 2> clip_distance;
|
||||
|
||||
};
|
||||
|
||||
} // namespace OpenGL
|
||||
extern std::unique_ptr<VulkanState> g_vk_state;
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -267,4 +267,4 @@ public:
|
||||
SurfaceType type = SurfaceType::Invalid;
|
||||
};
|
||||
|
||||
} // namespace OpenGL
|
||||
} // namespace Vulkan
|
||||
|
@@ -1,435 +1,348 @@
|
||||
#include "vk_swapchain.h"
|
||||
#include "vk_context.h"
|
||||
#include "vk_buffer.h"
|
||||
#include <fmt/core.h>
|
||||
// Copyright 2022 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
constexpr uint64_t MAX_UINT64 = ~0ULL;
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <limits>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/renderer_vulkan/vk_swapchain.h"
|
||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||
|
||||
VkWindow::VkWindow(int width, int height, std::string_view name) :
|
||||
width(width), height(height), name(name)
|
||||
{
|
||||
glfwInit();
|
||||
glfwWindowHint(GLFW_CLIENT_API, GL_FALSE);
|
||||
glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE);
|
||||
namespace Vulkan {
|
||||
|
||||
window = glfwCreateWindow(width, height, name.data(), nullptr, nullptr);
|
||||
glfwSetWindowUserPointer(window, this);
|
||||
glfwSetFramebufferSizeCallback(window, [](GLFWwindow* window, int width, int height)
|
||||
{
|
||||
auto my_window = reinterpret_cast<VkWindow*>(glfwGetWindowUserPointer(window));
|
||||
my_window->framebuffer_resized = true;
|
||||
my_window->width = width;
|
||||
my_window->height = height;
|
||||
});
|
||||
}
|
||||
struct SwapchainDetails {
|
||||
vk::SurfaceFormatKHR format;
|
||||
vk::PresentModeKHR present_mode;
|
||||
vk::Extent2D extent;
|
||||
vk::SurfaceTransformFlagBitsKHR transform;
|
||||
u32 image_count;
|
||||
};
|
||||
|
||||
VkWindow::~VkWindow()
|
||||
{
|
||||
auto& device = context->device;
|
||||
device->waitIdle();
|
||||
SwapchainDetails PopulateSwapchainDetails(vk::SurfaceKHR surface, u32 width, u32 height) {
|
||||
SwapchainDetails details;
|
||||
auto& gpu = g_vk_instace->GetPhysicalDevice();
|
||||
|
||||
buffers.clear();
|
||||
glfwDestroyWindow(window);
|
||||
glfwTerminate();
|
||||
}
|
||||
// Choose surface format
|
||||
auto formats = gpu.getSurfaceFormatsKHR(surface);
|
||||
details.format = formats[0];
|
||||
|
||||
bool VkWindow::should_close() const
|
||||
{
|
||||
return glfwWindowShouldClose(window);
|
||||
}
|
||||
|
||||
vk::Extent2D VkWindow::get_extent() const
|
||||
{
|
||||
return { width, height };
|
||||
}
|
||||
|
||||
void VkWindow::begin_frame()
|
||||
{
|
||||
// Poll for mouse events
|
||||
glfwPollEvents();
|
||||
|
||||
auto& device = context->device;
|
||||
if (auto result = device->waitForFences(flight_fences[current_frame].get(), true, MAX_UINT64); result != vk::Result::eSuccess)
|
||||
throw std::runtime_error("[VK] Failed waiting for flight fences");
|
||||
|
||||
device->resetFences(flight_fences[current_frame].get());
|
||||
try
|
||||
{
|
||||
vk::ResultValue result = device->acquireNextImageKHR(swapchain.get(), MAX_UINT64, image_semaphores[current_frame].get(), nullptr);
|
||||
image_index = result.value;
|
||||
if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) {
|
||||
details.format = { vk::Format::eB8G8R8A8Unorm };
|
||||
}
|
||||
catch (vk::OutOfDateKHRError err)
|
||||
{
|
||||
//recreateSwapChain();
|
||||
return;
|
||||
}
|
||||
catch (vk::SystemError err)
|
||||
{
|
||||
throw std::runtime_error("failed to acquire swap chain image!");
|
||||
}
|
||||
|
||||
// Start command buffer recording
|
||||
auto& command_buffer = context->get_command_buffer();
|
||||
command_buffer.begin({ vk::CommandBufferUsageFlagBits::eSimultaneousUse });
|
||||
|
||||
// Clear the screen
|
||||
vk::ClearValue clear_values[2];
|
||||
clear_values[0].color = { std::array<float, 4>{ 0.0f, 0.0f, 0.0f, 1.0f } };
|
||||
clear_values[1].depthStencil = vk::ClearDepthStencilValue(0.0f, 0.0f);
|
||||
|
||||
vk::Rect2D render_area({0, 0}, swapchain_info.extent);
|
||||
vk::RenderPassBeginInfo renderpass_info(context->renderpass.get(), buffers[current_frame].framebuffer, render_area, 2, clear_values);
|
||||
|
||||
command_buffer.beginRenderPass(renderpass_info, vk::SubpassContents::eInline);
|
||||
command_buffer.bindPipeline(vk::PipelineBindPoint::eGraphics, context->graphics_pipeline.get());
|
||||
command_buffer.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, context->pipeline_layout.get(), 0,
|
||||
context->descriptor_sets[current_frame], {});
|
||||
command_buffer.setDepthCompareOp(vk::CompareOp::eGreaterOrEqual);
|
||||
}
|
||||
|
||||
void VkWindow::end_frame()
|
||||
{
|
||||
// Finish recording
|
||||
auto& command_buffer = context->get_command_buffer();
|
||||
command_buffer.endRenderPass();
|
||||
command_buffer.end();
|
||||
|
||||
std::array<vk::PipelineStageFlags, 1> wait_stages = { vk::PipelineStageFlagBits::eColorAttachmentOutput };
|
||||
std::array<vk::CommandBuffer, 1> command_buffers = { context->get_command_buffer() };
|
||||
|
||||
submit_info = vk::SubmitInfo(image_semaphores[current_frame].get(), wait_stages, command_buffers, render_semaphores[current_frame].get());
|
||||
context->graphics_queue.submit(submit_info, flight_fences[current_frame].get());
|
||||
|
||||
vk::PresentInfoKHR present_info(render_semaphores[current_frame].get(), swapchain.get(), image_index);
|
||||
vk::Result result;
|
||||
try
|
||||
{
|
||||
result = present_queue.presentKHR(present_info);
|
||||
}
|
||||
catch (vk::OutOfDateKHRError err)
|
||||
{
|
||||
result = vk::Result::eErrorOutOfDateKHR;
|
||||
}
|
||||
catch (vk::SystemError err)
|
||||
{
|
||||
throw std::runtime_error("failed to present swap chain image!");
|
||||
}
|
||||
|
||||
if (result == vk::Result::eSuboptimalKHR || result == vk::Result::eSuboptimalKHR || framebuffer_resized)
|
||||
{
|
||||
framebuffer_resized = false;
|
||||
// recreate_swapchain();
|
||||
return;
|
||||
}
|
||||
|
||||
current_frame = (current_frame + 1) % MAX_FRAMES_IN_FLIGHT;
|
||||
}
|
||||
|
||||
std::shared_ptr<VkContext> VkWindow::create_context(bool validation)
|
||||
{
|
||||
vk::ApplicationInfo app_info("PS2 Emulator", 1, nullptr, 0, VK_API_VERSION_1_3);
|
||||
|
||||
uint32_t extension_count = 0U;
|
||||
const char** extension_list = glfwGetRequiredInstanceExtensions(&extension_count);
|
||||
|
||||
// Get required extensions
|
||||
std::vector<const char*> extensions(extension_list, extension_list + extension_count);
|
||||
extensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
|
||||
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
|
||||
|
||||
const char* layers[1] = { "VK_LAYER_KHRONOS_validation" };
|
||||
vk::InstanceCreateInfo instance_info({}, &app_info, {}, {}, extensions.size(), extensions.data());
|
||||
if (validation)
|
||||
{
|
||||
instance_info.enabledLayerCount = 1;
|
||||
instance_info.ppEnabledLayerNames = layers;
|
||||
}
|
||||
|
||||
auto instance = vk::createInstanceUnique(instance_info);
|
||||
|
||||
// Create a surface for our window
|
||||
VkSurfaceKHR surface_tmp;
|
||||
if (glfwCreateWindowSurface(instance.get(), window, nullptr, &surface_tmp) != VK_SUCCESS)
|
||||
throw std::runtime_error("[WINDOW] Could not create window surface\n");
|
||||
|
||||
surface = vk::UniqueSurfaceKHR(surface_tmp);
|
||||
|
||||
// Create context
|
||||
context = std::make_shared<VkContext>(std::move(instance), this);
|
||||
swapchain_info = get_swapchain_info();
|
||||
context->create(swapchain_info);
|
||||
|
||||
// Create swapchain
|
||||
create_present_queue();
|
||||
create_depth_buffer();
|
||||
create_swapchain();
|
||||
create_sync_objects();
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
void VkWindow::create_present_queue()
|
||||
{
|
||||
auto& physical_device = context->physical_device;
|
||||
auto family_props = physical_device.getQueueFamilyProperties();
|
||||
|
||||
// Determine a queueFamilyIndex that suports present
|
||||
// first check if the graphicsQueueFamiliyIndex is good enough
|
||||
size_t present_queue_family = -1;
|
||||
if (physical_device.getSurfaceSupportKHR(context->queue_family, surface.get()))
|
||||
{
|
||||
present_queue_family = context->queue_family;
|
||||
}
|
||||
else
|
||||
{
|
||||
// The graphicsQueueFamilyIndex doesn't support present -> look for an other family index that supports both
|
||||
// graphics and present
|
||||
vk::QueueFlags search = vk::QueueFlagBits::eGraphics | vk::QueueFlagBits::eCompute;
|
||||
for (size_t i = 0; i < family_props.size(); i++ )
|
||||
{
|
||||
if (((family_props[i].queueFlags & search) == search) && physical_device.getSurfaceSupportKHR(i, surface.get()))
|
||||
{
|
||||
context->queue_family = present_queue_family = i;
|
||||
else {
|
||||
for (const auto& format : formats) {
|
||||
if (format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear &&
|
||||
format.format == vk::Format::eB8G8R8A8Unorm) {
|
||||
details.format = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (present_queue_family == -1)
|
||||
{
|
||||
// There's nothing like a single family index that supports both graphics and present -> look for an other
|
||||
// family index that supports present
|
||||
for (size_t i = 0; i < family_props.size(); i++ )
|
||||
{
|
||||
if (physical_device.getSurfaceSupportKHR(i, surface.get()))
|
||||
{
|
||||
present_queue_family = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (present_queue_family == -1)
|
||||
throw std::runtime_error("[VK] No present queue could be found");
|
||||
// Checks if a particular mode is supported, if it is, returns that mode.
|
||||
auto modes = gpu.getSurfacePresentModesKHR(surface);
|
||||
auto ModePresent = [&modes](vk::PresentModeKHR check_mode) {
|
||||
auto it = std::find_if(modes.begin(), modes.end(), [check_mode](const auto& mode) {
|
||||
return check_mode == mode;
|
||||
});
|
||||
|
||||
// Get the queue
|
||||
present_queue = context->device->getQueue(present_queue_family, 0);
|
||||
return it != modes.end();
|
||||
};
|
||||
|
||||
// FIFO is guaranteed by the standard to be available
|
||||
details.present_mode = vk::PresentModeKHR::eFifo;
|
||||
|
||||
// Prefer Mailbox if present for lowest latency
|
||||
if (ModePresent(vk::PresentModeKHR::eMailbox)) {
|
||||
details.present_mode = vk::PresentModeKHR::eMailbox;
|
||||
}
|
||||
|
||||
// Query surface capabilities
|
||||
auto capabilities = gpu.getSurfaceCapabilitiesKHR(surface);
|
||||
details.extent = capabilities.currentExtent;
|
||||
|
||||
if (capabilities.currentExtent.width == std::numeric_limits<u32>::max()) {
|
||||
details.extent.width = std::clamp(width, capabilities.minImageExtent.width,
|
||||
capabilities.maxImageExtent.width);
|
||||
details.extent.height = std::clamp(height, capabilities.minImageExtent.height,
|
||||
capabilities.maxImageExtent.height);
|
||||
}
|
||||
|
||||
// Select number of images in swap chain, we prefer one buffer in the background to work on
|
||||
details.image_count = capabilities.minImageCount + 1;
|
||||
if (capabilities.maxImageCount > 0) {
|
||||
details.image_count = std::min(details.image_count, capabilities.maxImageCount);
|
||||
}
|
||||
|
||||
// Prefer identity transform if possible
|
||||
details.transform = vk::SurfaceTransformFlagBitsKHR::eIdentity;
|
||||
if (!(capabilities.supportedTransforms & details.transform)) {
|
||||
details.transform = capabilities.currentTransform;
|
||||
}
|
||||
|
||||
return details;
|
||||
}
|
||||
|
||||
void VkWindow::create_swapchain(bool enable_vsync)
|
||||
{
|
||||
auto& physical_device = context->physical_device;
|
||||
VKSwapchain::VKSwapchain(vk::SurfaceKHR surface_) : surface(surface_) {
|
||||
|
||||
}
|
||||
|
||||
void VKSwapchain::Create(u32 width, u32 height, bool vsync_enabled) {
|
||||
is_outdated = false;
|
||||
is_suboptimal = false;
|
||||
|
||||
const auto gpu = g_vk_instace->GetPhysicalDevice();
|
||||
auto details = PopulateSwapchainDetails(surface, width, height);
|
||||
|
||||
// Store the old/current swap chain when recreating for resize
|
||||
vk::SwapchainKHR old_swapchain = swapchain.get();
|
||||
|
||||
// Figure out best swapchain create attributes
|
||||
auto capabilities = physical_device.getSurfaceCapabilitiesKHR(surface.get());
|
||||
|
||||
// Find the transformation of the surface, prefer a non-rotated transform
|
||||
auto pretransform = capabilities.supportedTransforms & vk::SurfaceTransformFlagBitsKHR::eIdentity ?
|
||||
vk::SurfaceTransformFlagBitsKHR::eIdentity :
|
||||
capabilities.currentTransform;
|
||||
|
||||
// Create the swapchain
|
||||
vk::SwapchainCreateInfoKHR swapchain_create_info
|
||||
// Now we can actually create the swap chain
|
||||
vk::SwapchainCreateInfoKHR swap_chain_info
|
||||
(
|
||||
{},
|
||||
surface.get(),
|
||||
swapchain_info.image_count,
|
||||
swapchain_info.surface_format.format,
|
||||
swapchain_info.surface_format.colorSpace,
|
||||
swapchain_info.extent,
|
||||
1,
|
||||
surface,
|
||||
details.image_count,
|
||||
details.format.format, details.format.colorSpace,
|
||||
details.extent, 1,
|
||||
vk::ImageUsageFlagBits::eColorAttachment,
|
||||
vk::SharingMode::eExclusive,
|
||||
0,
|
||||
nullptr,
|
||||
pretransform,
|
||||
0, nullptr,
|
||||
details.transform,
|
||||
vk::CompositeAlphaFlagBitsKHR::eOpaque,
|
||||
swapchain_info.present_mode,
|
||||
details.present_mode,
|
||||
VK_TRUE,
|
||||
old_swapchain
|
||||
);
|
||||
|
||||
auto& device = context->device;
|
||||
swapchain = device->createSwapchainKHRUnique(swapchain_create_info);
|
||||
|
||||
// If an existing sawp chain is re-created, destroy the old swap chain
|
||||
// This also cleans up all the presentable images
|
||||
if (old_swapchain)
|
||||
std::array<uint32_t, 2> indices = {{
|
||||
g_vulkan_context->GetGraphicsQueueFamilyIndex(),
|
||||
g_vulkan_context->GetPresentQueueFamilyIndex(),
|
||||
}};
|
||||
if (g_vulkan_context->GetGraphicsQueueFamilyIndex() !=
|
||||
g_vulkan_context->GetPresentQueueFamilyIndex())
|
||||
{
|
||||
buffers.clear();
|
||||
device->destroySwapchainKHR(old_swapchain);
|
||||
swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
|
||||
swap_chain_info.queueFamilyIndexCount = 2;
|
||||
swap_chain_info.pQueueFamilyIndices = indices.data();
|
||||
}
|
||||
|
||||
// Get the swap chain images
|
||||
auto images = device->getSwapchainImagesKHR(swapchain.get());
|
||||
|
||||
// Create the swapchain buffers containing the image and imageview
|
||||
buffers.resize(images.size());
|
||||
for (size_t i = 0; i < buffers.size(); i++)
|
||||
#ifdef SUPPORTS_VULKAN_EXCLUSIVE_FULLSCREEN
|
||||
if (m_fullscreen_supported)
|
||||
{
|
||||
vk::ImageViewCreateInfo color_attachment_view
|
||||
(
|
||||
{},
|
||||
images[i],
|
||||
vk::ImageViewType::e2D,
|
||||
swapchain_info.surface_format.format,
|
||||
{},
|
||||
{ vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 }
|
||||
);
|
||||
VkSurfaceFullScreenExclusiveInfoEXT fullscreen_support = {};
|
||||
swap_chain_info.pNext = &fullscreen_support;
|
||||
fullscreen_support.sType = VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT;
|
||||
fullscreen_support.fullScreenExclusive = VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT;
|
||||
|
||||
auto image_view = device->createImageView(color_attachment_view);
|
||||
vk::ImageView attachments[] = { image_view, depth_buffer.view };
|
||||
auto platform_info = g_vulkan_context->GetPlatformExclusiveFullscreenInfo(m_wsi);
|
||||
fullscreen_support.pNext = &platform_info;
|
||||
|
||||
vk::FramebufferCreateInfo framebuffer_info
|
||||
(
|
||||
{},
|
||||
context->renderpass.get(),
|
||||
2,
|
||||
attachments,
|
||||
swapchain_info.extent.width,
|
||||
swapchain_info.extent.height,
|
||||
1
|
||||
);
|
||||
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr,
|
||||
&m_swap_chain);
|
||||
if (res != VK_SUCCESS)
|
||||
{
|
||||
// Try without exclusive fullscreen.
|
||||
WARN_LOG_FMT(VIDEO, "Failed to create exclusive fullscreen swapchain, trying without.");
|
||||
swap_chain_info.pNext = nullptr;
|
||||
g_Config.backend_info.bSupportsExclusiveFullscreen = false;
|
||||
g_ActiveConfig.backend_info.bSupportsExclusiveFullscreen = false;
|
||||
m_fullscreen_supported = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
buffers[i].image = images[i];
|
||||
buffers[i].view = device->createImageView(color_attachment_view);
|
||||
buffers[i].framebuffer = device->createFramebuffer(framebuffer_info);
|
||||
buffers[i].device = &context->device.get();
|
||||
if (m_swap_chain == VK_NULL_HANDLE)
|
||||
{
|
||||
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr,
|
||||
&m_swap_chain);
|
||||
}
|
||||
if (res != VK_SUCCESS)
|
||||
{
|
||||
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now destroy the old swap chain, since it's been recreated.
|
||||
// We can do this immediately since all work should have been completed before calling resize.
|
||||
if (old_swap_chain != VK_NULL_HANDLE)
|
||||
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
|
||||
|
||||
m_width = size.width;
|
||||
m_height = size.height;
|
||||
m_layers = image_layers;
|
||||
return true;
|
||||
}
|
||||
|
||||
void VKSwapchain::AcquireNextImage() {
|
||||
const auto result = g_vk_instace->GetDevice().acquireNextImageKHR(*swapchain,
|
||||
std::numeric_limits<u64>::max(), *present_semaphores[frame_index],
|
||||
VK_NULL_HANDLE, &image_index);
|
||||
|
||||
switch (result) {
|
||||
case vk::Result::eSuccess:
|
||||
break;
|
||||
case vk::Result::eSuboptimalKHR:
|
||||
is_suboptimal = true;
|
||||
break;
|
||||
case vk::Result::eErrorOutOfDateKHR:
|
||||
is_outdated = true;
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(Render_Vulkan, "acquireNextImageKHR returned unknown result");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
vk::Framebuffer VkWindow::get_framebuffer(int index) const
|
||||
{
|
||||
return buffers[index].framebuffer;
|
||||
}
|
||||
void VKSwapchain::Present(vk::Semaphore render_semaphore) {
|
||||
const auto present_queue{device.GetPresentQueue()};
|
||||
|
||||
void VkWindow::create_depth_buffer()
|
||||
{
|
||||
auto& device = context->device;
|
||||
|
||||
// Create an optimal image used as the depth stencil attachment
|
||||
vk::ImageCreateInfo image
|
||||
(
|
||||
{},
|
||||
vk::ImageType::e2D,
|
||||
swapchain_info.depth_format,
|
||||
vk::Extent3D(swapchain_info.extent, 1),
|
||||
1, 1,
|
||||
vk::SampleCountFlagBits::e1,
|
||||
vk::ImageTiling::eOptimal,
|
||||
vk::ImageUsageFlagBits::eDepthStencilAttachment
|
||||
);
|
||||
vk::PresentInfoKHR present_info(
|
||||
|
||||
depth_buffer.image = device->createImage(image);
|
||||
|
||||
// Allocate memory for the image (device local) and bind it to our image
|
||||
auto requirements = device->getImageMemoryRequirements(depth_buffer.image);
|
||||
auto memory_type_index = Buffer::find_memory_type(requirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eDeviceLocal, context);
|
||||
vk::MemoryAllocateInfo memory_alloc(requirements.size, memory_type_index);
|
||||
|
||||
depth_buffer.memory = device->allocateMemory(memory_alloc);
|
||||
device->bindImageMemory(depth_buffer.image, depth_buffer.memory, 0);
|
||||
|
||||
// Create a view for the depth stencil image
|
||||
vk::ImageViewCreateInfo depth_view
|
||||
(
|
||||
{},
|
||||
depth_buffer.image,
|
||||
vk::ImageViewType::e2D,
|
||||
swapchain_info.depth_format,
|
||||
{},
|
||||
vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil, 0, 1, 0, 1)
|
||||
);
|
||||
depth_buffer.view = device->createImageView(depth_view);
|
||||
}
|
||||
|
||||
SwapchainInfo VkWindow::get_swapchain_info() const
|
||||
{
|
||||
SwapchainInfo info;
|
||||
auto& physical_device = context->physical_device;
|
||||
|
||||
// Choose surface format
|
||||
auto formats = physical_device.getSurfaceFormatsKHR(surface.get());
|
||||
info.surface_format = formats[0];
|
||||
|
||||
if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined)
|
||||
{
|
||||
info.surface_format = { vk::Format::eB8G8R8A8Unorm, vk::ColorSpaceKHR::eSrgbNonlinear };
|
||||
const VkPresentInfoKHR present_info{
|
||||
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.waitSemaphoreCount = render_semaphore ? 1U : 0U,
|
||||
.pWaitSemaphores = &render_semaphore,
|
||||
.swapchainCount = 1,
|
||||
.pSwapchains = swapchain.address(),
|
||||
.pImageIndices = &image_index,
|
||||
.pResults = nullptr,
|
||||
};
|
||||
switch (const VkResult result = present_queue.Present(present_info)) {
|
||||
case VK_SUCCESS:
|
||||
break;
|
||||
case VK_SUBOPTIMAL_KHR:
|
||||
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
|
||||
break;
|
||||
case VK_ERROR_OUT_OF_DATE_KHR:
|
||||
is_outdated = true;
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto& format : formats)
|
||||
{
|
||||
if (format.format == vk::Format::eB8G8R8A8Unorm &&
|
||||
format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear)
|
||||
++frame_index;
|
||||
if (frame_index >= image_count) {
|
||||
frame_index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
|
||||
u32 height, bool srgb) {
|
||||
const auto physical_device{device.GetPhysical()};
|
||||
const auto formats{physical_device.GetSurfaceFormatsKHR(surface)};
|
||||
const auto present_modes{physical_device.GetSurfacePresentModesKHR(surface)};
|
||||
|
||||
const VkSurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats)};
|
||||
present_mode = ChooseSwapPresentMode(present_modes);
|
||||
|
||||
u32 requested_image_count{capabilities.minImageCount + 1};
|
||||
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
|
||||
requested_image_count = capabilities.maxImageCount;
|
||||
}
|
||||
VkSwapchainCreateInfoKHR swapchain_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.surface = surface,
|
||||
.minImageCount = requested_image_count,
|
||||
.imageFormat = surface_format.format,
|
||||
.imageColorSpace = surface_format.colorSpace,
|
||||
.imageExtent = {},
|
||||
.imageArrayLayers = 1,
|
||||
.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
|
||||
.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.queueFamilyIndexCount = 0,
|
||||
.pQueueFamilyIndices = nullptr,
|
||||
.preTransform = capabilities.currentTransform,
|
||||
.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
|
||||
.presentMode = present_mode,
|
||||
.clipped = VK_FALSE,
|
||||
.oldSwapchain = nullptr,
|
||||
};
|
||||
const u32 graphics_family{device.GetGraphicsFamily()};
|
||||
const u32 present_family{device.GetPresentFamily()};
|
||||
const std::array<u32, 2> queue_indices{graphics_family, present_family};
|
||||
if (graphics_family != present_family) {
|
||||
swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
|
||||
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
|
||||
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
|
||||
}
|
||||
static constexpr std::array view_formats{VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_SRGB};
|
||||
VkImageFormatListCreateInfo format_list{
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.viewFormatCount = static_cast<u32>(view_formats.size()),
|
||||
.pViewFormats = view_formats.data(),
|
||||
};
|
||||
if (device.IsKhrSwapchainMutableFormatEnabled()) {
|
||||
format_list.pNext = std::exchange(swapchain_ci.pNext, &format_list);
|
||||
swapchain_ci.flags |= VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR;
|
||||
}
|
||||
// Request the size again to reduce the possibility of a TOCTOU race condition.
|
||||
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
|
||||
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
|
||||
// Don't add code within this and the swapchain creation.
|
||||
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
|
||||
|
||||
extent = swapchain_ci.imageExtent;
|
||||
current_srgb = srgb;
|
||||
current_fps_unlocked = Settings::values.disable_fps_limit.GetValue();
|
||||
|
||||
images = swapchain.GetImages();
|
||||
image_count = static_cast<u32>(images.size());
|
||||
image_view_format = srgb ? VK_FORMAT_B8G8R8A8_SRGB : VK_FORMAT_B8G8R8A8_UNORM;
|
||||
}
|
||||
|
||||
void VKSwapchain::CreateImageViews() {
|
||||
VkImageViewCreateInfo ci{
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.image = {},
|
||||
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
||||
.format = image_view_format,
|
||||
.components =
|
||||
{
|
||||
info.surface_format = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||
},
|
||||
.subresourceRange =
|
||||
{
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = 1,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
};
|
||||
|
||||
// Choose best present mode
|
||||
auto present_modes = physical_device.getSurfacePresentModesKHR(surface.get());
|
||||
info.present_mode = vk::PresentModeKHR::eFifo;
|
||||
|
||||
// Query surface capabilities
|
||||
auto capabilities = physical_device.getSurfaceCapabilitiesKHR(surface.get());
|
||||
info.extent = capabilities.currentExtent;
|
||||
|
||||
if (capabilities.currentExtent.width == std::numeric_limits<uint32_t>::max())
|
||||
{
|
||||
int width, height;
|
||||
glfwGetFramebufferSize(window, &width, &height);
|
||||
|
||||
vk::Extent2D extent = { static_cast<uint32_t>(width), static_cast<uint32_t>(height) };
|
||||
extent.width = std::max(capabilities.minImageExtent.width, std::min(capabilities.maxImageExtent.width, extent.width));
|
||||
extent.height = std::max(capabilities.minImageExtent.height, std::min(capabilities.maxImageExtent.height, extent.height));
|
||||
|
||||
info.extent = extent;
|
||||
}
|
||||
|
||||
// Find a suitable depth (stencil) format that is supported by the device
|
||||
auto depth_formats = { vk::Format::eD32SfloatS8Uint, vk::Format::eD24UnormS8Uint, vk::Format::eD16UnormS8Uint,
|
||||
vk::Format::eD32Sfloat, vk::Format::eD16Unorm };
|
||||
info.depth_format = vk::Format::eUndefined;
|
||||
|
||||
for (auto& format : depth_formats)
|
||||
{
|
||||
auto format_props = physical_device.getFormatProperties(format);
|
||||
auto search = vk::FormatFeatureFlagBits::eDepthStencilAttachment;
|
||||
if ((format_props.optimalTilingFeatures & search) == search)
|
||||
{
|
||||
info.depth_format = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (info.depth_format == vk::Format::eUndefined)
|
||||
throw std::runtime_error("[VK] Couldn't find optinal depth format");
|
||||
|
||||
// Determine the number of images
|
||||
info.image_count = capabilities.minImageCount + 1 > capabilities.maxImageCount &&
|
||||
capabilities.maxImageCount > 0 ?
|
||||
capabilities.maxImageCount :
|
||||
capabilities.minImageCount + 1;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
void VkWindow::create_sync_objects()
|
||||
{
|
||||
auto& device = context->device;
|
||||
|
||||
image_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
|
||||
render_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
|
||||
flight_fences.resize(MAX_FRAMES_IN_FLIGHT);
|
||||
|
||||
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++)
|
||||
{
|
||||
image_semaphores[i] = device->createSemaphoreUnique({});
|
||||
render_semaphores[i] = device->createSemaphoreUnique({});
|
||||
flight_fences[i] = device->createFenceUnique({ vk::FenceCreateFlagBits::eSignaled });
|
||||
image_views.resize(image_count);
|
||||
for (std::size_t i = 0; i < image_count; i++) {
|
||||
ci.image = images[i];
|
||||
image_views[i] = device.GetLogical().CreateImageView(ci);
|
||||
}
|
||||
}
|
||||
|
||||
void VKSwapchain::Destroy() {
|
||||
frame_index = 0;
|
||||
present_semaphores.clear();
|
||||
framebuffers.clear();
|
||||
image_views.clear();
|
||||
swapchain.reset();
|
||||
}
|
||||
|
||||
bool VKSwapchain::NeedsPresentModeUpdate() const {
|
||||
// Mailbox present mode is the ideal for all scenarios. If it is not available,
|
||||
// A different present mode is needed to support unlocked FPS above the monitor's refresh rate.
|
||||
return present_mode != VK_PRESENT_MODE_MAILBOX_KHR && HasFpsUnlockChanged();
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -1,99 +1,63 @@
|
||||
// Copyright 2022 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
#include <vulkan/vulkan.hpp>
|
||||
|
||||
#include <string_view>
|
||||
#include <memory>
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture.h"
|
||||
|
||||
class VkContext;
|
||||
struct GLFWwindow;
|
||||
|
||||
struct SwapchainBuffer
|
||||
{
|
||||
~SwapchainBuffer()
|
||||
{
|
||||
device->destroyImageView(view);
|
||||
device->destroyFramebuffer(framebuffer);
|
||||
}
|
||||
namespace Vulkan {
|
||||
|
||||
struct SwapChainImage {
|
||||
vk::Image image;
|
||||
vk::ImageView view;
|
||||
vk::Framebuffer framebuffer;
|
||||
vk::Device* device;
|
||||
VKTexture texture;
|
||||
VKFramebuffer framebuffer;
|
||||
};
|
||||
|
||||
struct SwapchainInfo
|
||||
{
|
||||
vk::Format depth_format;
|
||||
vk::SurfaceFormatKHR surface_format;
|
||||
vk::PresentModeKHR present_mode;
|
||||
vk::Extent2D extent;
|
||||
uint32_t image_count;
|
||||
};
|
||||
|
||||
struct DepthBuffer : public NonCopyable
|
||||
{
|
||||
~DepthBuffer()
|
||||
{
|
||||
// Destroy depth buffer
|
||||
device.destroyImage(image);
|
||||
device.destroyImageView(view);
|
||||
device.freeMemory(memory);
|
||||
}
|
||||
|
||||
vk::Device device;
|
||||
vk::Image image;
|
||||
vk::DeviceMemory memory;
|
||||
vk::ImageView view;
|
||||
};
|
||||
|
||||
constexpr int MAX_FRAMES_IN_FLIGHT = 3;
|
||||
|
||||
class VkWindow
|
||||
{
|
||||
class VKSwapchain {
|
||||
public:
|
||||
VkWindow(int width, int height, std::string_view name);
|
||||
~VkWindow();
|
||||
VKSwapchain(vk::SurfaceKHR surface);
|
||||
~VKSwapchain() = default;
|
||||
|
||||
std::shared_ptr<VkContext> create_context(bool validation = true);
|
||||
bool should_close() const;
|
||||
vk::Extent2D get_extent() const;
|
||||
/// Creates (or recreates) the swapchain with a given size.
|
||||
void Create(u32 width, u32 height, bool vsync_enabled);
|
||||
|
||||
void begin_frame();
|
||||
void end_frame();
|
||||
/// Acquires the next image in the swapchain, waits as needed.
|
||||
void AcquireNextImage();
|
||||
|
||||
void destroy();
|
||||
vk::Framebuffer get_framebuffer(int index) const;
|
||||
/// Returns true when the swapchain needs to be recreated.
|
||||
bool NeedsRecreation() const { return IsSubOptimal(); }
|
||||
bool IsOutDated() const { return is_outdated; }
|
||||
bool IsSubOptimal() const { return is_suboptimal; }
|
||||
bool IsVSyncEnabled() const { return vsync_enabled; }
|
||||
u32 GetCurrentImageIndex() const { return image_index; }
|
||||
|
||||
/// Get current swapchain state
|
||||
vk::Extent2D GetSize() const { return extent; }
|
||||
vk::SurfaceKHR GetSurface() const { return surface; }
|
||||
vk::SurfaceFormatKHR GetSurfaceFormat() const { return surface_format; }
|
||||
vk::Format GetTextureFormat() const { return texture_format; }
|
||||
vk::SwapchainKHR GetSwapChain() const { return swapchain.get(); }
|
||||
vk::Image GetCurrentImage() const { return swapchain_images[image_index].image; }
|
||||
|
||||
/// Retrieve current texture and framebuffer
|
||||
VKTexture& GetCurrentTexture() { return swapchain_images[image_index].texture; }
|
||||
VKFramebuffer& GetCurrentFramebuffer() { return swapchain_images[image_index].framebuffer; }
|
||||
|
||||
private:
|
||||
void create_sync_objects();
|
||||
void create_swapchain(bool enable_vsync = false);
|
||||
SwapchainInfo get_swapchain_info() const;
|
||||
void create_depth_buffer();
|
||||
void create_present_queue();
|
||||
vk::SurfaceKHR surface;
|
||||
vk::SurfaceFormatKHR surface_format = {};
|
||||
vk::PresentModeKHR present_mode = vk::PresentModeKHR::eFifo;
|
||||
vk::Format texture_format = vk::Format::eUndefined;
|
||||
vk::Extent2D extent;
|
||||
bool vsync_enabled = false;
|
||||
bool is_outdated = false, is_suboptimal = false;
|
||||
|
||||
public:
|
||||
// Window attributes
|
||||
uint32_t width = 0, height = 0;
|
||||
bool framebuffer_resized = false;
|
||||
std::string_view name;
|
||||
|
||||
// Context
|
||||
std::shared_ptr<VkContext> context;
|
||||
vk::Queue present_queue;
|
||||
|
||||
// Swapchain objects
|
||||
vk::UniqueSurfaceKHR surface;
|
||||
vk::UniqueSwapchainKHR swapchain;
|
||||
std::vector<SwapchainBuffer> buffers;
|
||||
SwapchainInfo swapchain_info;
|
||||
uint32_t current_frame = 0, image_index = 0;
|
||||
uint32_t draw_batch = 0;
|
||||
|
||||
// Depth buffer
|
||||
DepthBuffer depth_buffer;
|
||||
|
||||
// Synchronization
|
||||
vk::SubmitInfo submit_info;
|
||||
std::vector<vk::UniqueSemaphore> image_semaphores;
|
||||
std::vector<vk::UniqueSemaphore> render_semaphores;
|
||||
std::vector<vk::UniqueFence> flight_fences;
|
||||
std::vector<SwapChainImage> swapchain_images;
|
||||
u32 image_index = 0, frame_index = 0;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -10,8 +10,7 @@
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
void VKTexture::Create(const Info& info)
|
||||
{
|
||||
void VKTexture::Create(const Info& info) {
|
||||
auto& device = g_vk_instace->GetDevice();
|
||||
texture_info = info;
|
||||
|
||||
@@ -66,58 +65,95 @@ void VKTexture::Create(const Info& info)
|
||||
texture_view = device.createImageViewUnique(view_info);
|
||||
}
|
||||
|
||||
void VKTexture::TransitionLayout(vk::ImageLayout old_layout, vk::ImageLayout new_layout)
|
||||
{
|
||||
auto& device = g_vk_instace->GetDevice();
|
||||
auto& queue = g_vk_instace->graphics_queue;
|
||||
void VKTexture::TransitionLayout(vk::ImageLayout new_layout, vk::CommandBuffer& command_buffer) {
|
||||
struct LayoutInfo {
|
||||
vk::ImageLayout layout;
|
||||
vk::AccessFlags access;
|
||||
vk::PipelineStageFlags stage;
|
||||
};
|
||||
|
||||
vk::CommandBufferAllocateInfo alloc_info(g_vk_instace->command_pool.get(), vk::CommandBufferLevel::ePrimary, 1);
|
||||
vk::CommandBuffer command_buffer = device.allocateCommandBuffers(alloc_info)[0];
|
||||
// Get optimal transition settings for every image layout. Settings taken from Dolphin
|
||||
auto layout_info = [&](vk::ImageLayout layout) -> LayoutInfo {
|
||||
LayoutInfo info = { .layout = layout };
|
||||
switch (texture_layout) {
|
||||
case vk::ImageLayout::eUndefined:
|
||||
// Layout undefined therefore contents undefined, and we don't care what happens to it.
|
||||
info.access = vk::AccessFlagBits::eNone;
|
||||
info.stage = vk::PipelineStageFlagBits::eTopOfPipe;
|
||||
break;
|
||||
|
||||
case vk::ImageLayout::ePreinitialized:
|
||||
// Image has been pre-initialized by the host, so ensure all writes have completed.
|
||||
info.access = vk::AccessFlagBits::eHostWrite;
|
||||
info.stage = vk::PipelineStageFlagBits::eHost;
|
||||
break;
|
||||
|
||||
case vk::ImageLayout::eColorAttachmentOptimal:
|
||||
// Image was being used as a color attachment, so ensure all writes have completed.
|
||||
info.access = vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite;
|
||||
info.stage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
|
||||
break;
|
||||
|
||||
case vk::ImageLayout::eDepthStencilAttachmentOptimal:
|
||||
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
|
||||
info.access = vk::AccessFlagBits::eDepthStencilAttachmentRead | vk::AccessFlagBits::eDepthStencilAttachmentWrite;
|
||||
info.stage = vk::PipelineStageFlagBits::eEarlyFragmentTests | vk::PipelineStageFlagBits::eLateFragmentTests;
|
||||
break;
|
||||
|
||||
case vk::ImageLayout::eShaderReadOnlyOptimal:
|
||||
// Image was being used as a shader resource, make sure all reads have finished.
|
||||
info.access = vk::AccessFlagBits::eShaderRead;
|
||||
info.stage = vk::PipelineStageFlagBits::eFragmentShader;
|
||||
break;
|
||||
|
||||
case vk::ImageLayout::eTransferSrcOptimal:
|
||||
// Image was being used as a copy source, ensure all reads have finished.
|
||||
info.access = vk::AccessFlagBits::eTransferRead;
|
||||
info.stage = vk::PipelineStageFlagBits::eTransfer;
|
||||
break;
|
||||
|
||||
case vk::ImageLayout::eTransferDstOptimal:
|
||||
// Image was being used as a copy destination, ensure all writes have finished.
|
||||
info.access = vk::AccessFlagBits::eTransferWrite;
|
||||
info.stage = vk::PipelineStageFlagBits::eTransfer;
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(Render_Vulkan, "Unhandled vulkan image layout {}\n", texture_layout);
|
||||
break;
|
||||
}
|
||||
|
||||
return info;
|
||||
};
|
||||
|
||||
// Submit pipeline barrier
|
||||
LayoutInfo source = layout_info(texture_layout), dst = layout_info(new_layout);
|
||||
command_buffer.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
|
||||
|
||||
vk::ImageMemoryBarrier barrier({}, {}, old_layout, new_layout, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, texture.get(),
|
||||
vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1));
|
||||
vk::ImageMemoryBarrier barrier
|
||||
(
|
||||
source.access, dst.access,
|
||||
source.layout, dst.layout,
|
||||
VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED,
|
||||
texture.get(),
|
||||
vk::ImageSubresourceRange(vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1)
|
||||
);
|
||||
|
||||
std::array<vk::ImageMemoryBarrier, 1> barriers = { barrier };
|
||||
|
||||
vk::PipelineStageFlags source_stage, destination_stage;
|
||||
if (old_layout == vk::ImageLayout::eUndefined && new_layout == vk::ImageLayout::eTransferDstOptimal) {
|
||||
barrier.srcAccessMask = vk::AccessFlagBits::eNone;
|
||||
barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite;
|
||||
|
||||
source_stage = vk::PipelineStageFlagBits::eTopOfPipe;
|
||||
destination_stage = vk::PipelineStageFlagBits::eTransfer;
|
||||
}
|
||||
else if (old_layout == vk::ImageLayout::eTransferDstOptimal && new_layout == vk::ImageLayout::eShaderReadOnlyOptimal) {
|
||||
barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
|
||||
barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead;
|
||||
|
||||
source_stage = vk::PipelineStageFlagBits::eTransfer;
|
||||
destination_stage = vk::PipelineStageFlagBits::eFragmentShader;
|
||||
}
|
||||
else {
|
||||
LOG_CRITICAL(Render_Vulkan, "Unsupported layout transition");
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
command_buffer.pipelineBarrier(source_stage, destination_stage, vk::DependencyFlagBits::eByRegion, {}, {}, barriers);
|
||||
command_buffer.pipelineBarrier(source.stage, dst.stage, vk::DependencyFlagBits::eByRegion, {}, {}, barriers);
|
||||
command_buffer.end();
|
||||
|
||||
vk::SubmitInfo submit_info({}, {}, {}, 1, &command_buffer);
|
||||
queue.submit(submit_info, nullptr);
|
||||
queue.waitIdle();
|
||||
|
||||
device.freeCommandBuffers(g_vk_instace->command_pool.get(), command_buffer);
|
||||
// Update texture layout
|
||||
texture_layout = new_layout;
|
||||
}
|
||||
|
||||
void VKTexture::CopyPixels(std::span<u32> new_pixels)
|
||||
{
|
||||
void VKTexture::CopyPixels(std::span<u32> new_pixels) {
|
||||
auto& device = g_vk_instace->GetDevice();
|
||||
auto& queue = g_vk_instace->graphics_queue;
|
||||
|
||||
// Transition image to transfer format
|
||||
TransitionLayout(vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal);
|
||||
|
||||
// Copy pixels to staging buffer
|
||||
std::memcpy(g_vk_res_cache->GetTextureUploadBuffer().GetHostPointer(),
|
||||
new_pixels.data(), new_pixels.size() * channels);
|
||||
@@ -132,8 +168,14 @@ void VKTexture::CopyPixels(std::span<u32> new_pixels)
|
||||
{ texture_info.width, texture_info.height, 1 });
|
||||
std::array<vk::BufferImageCopy, 1> regions = { region };
|
||||
|
||||
// Transition image to transfer format
|
||||
TransitionLayout(vk::ImageLayout::eTransferDstOptimal, command_buffer);
|
||||
|
||||
auto& staging = g_vk_res_cache->GetTextureUploadBuffer();
|
||||
command_buffer.copyBufferToImage(staging.GetBuffer(), texture.get(), vk::ImageLayout::eTransferDstOptimal, regions);
|
||||
|
||||
// Prepare for shader reads
|
||||
TransitionLayout(vk::ImageLayout::eShaderReadOnlyOptimal, command_buffer);
|
||||
command_buffer.end();
|
||||
|
||||
vk::SubmitInfo submit_info({}, {}, {}, 1, &command_buffer);
|
||||
@@ -142,13 +184,57 @@ void VKTexture::CopyPixels(std::span<u32> new_pixels)
|
||||
/// NOTE: Remove this when the renderer starts working, otherwise it will be very slow
|
||||
queue.waitIdle();
|
||||
device.freeCommandBuffers(g_vk_instace->command_pool.get(), command_buffer);
|
||||
|
||||
// Prepare for shader reads
|
||||
TransitionLayout(vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal);
|
||||
}
|
||||
|
||||
void VKFramebuffer::Create(const Info& info)
|
||||
{
|
||||
void VKTexture::BlitTo(Common::Rectangle<u32> srect, VKTexture& dest,
|
||||
Common::Rectangle<u32> drect, SurfaceParams::SurfaceType type,
|
||||
vk::CommandBuffer& command_buffer) {
|
||||
// Ensure textures are of the same dimention
|
||||
assert(texture_info.width == dest.texture_info.width &&
|
||||
texture_info.height == dest.texture_info.height);
|
||||
|
||||
vk::ImageAspectFlags image_aspect;
|
||||
switch (type) {
|
||||
case SurfaceParams::SurfaceType::Color:
|
||||
case SurfaceParams::SurfaceType::Texture:
|
||||
image_aspect = vk::ImageAspectFlagBits::eColor;
|
||||
break;
|
||||
case SurfaceParams::SurfaceType::Depth:
|
||||
image_aspect = vk::ImageAspectFlagBits::eDepth;
|
||||
break;
|
||||
case SurfaceParams::SurfaceType::DepthStencil:
|
||||
image_aspect = vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil;
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(Render_Vulkan, "Unhandled image blit aspect\n");
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Define the region to blit
|
||||
vk::ImageSubresourceLayers layers(image_aspect, 0, 0, 1);
|
||||
|
||||
std::array<vk::Offset3D, 2> src_offsets = { vk::Offset3D(srect.left, srect.bottom, 1), vk::Offset3D(srect.right, srect.top, 1) };
|
||||
std::array<vk::Offset3D, 2> dst_offsets = { vk::Offset3D(drect.left, drect.bottom, 1), vk::Offset3D(drect.right, drect.top, 1) };
|
||||
std::array<vk::ImageBlit, 1> regions = {{{layers, src_offsets, layers, dst_offsets}}};
|
||||
|
||||
// Transition image layouts
|
||||
TransitionLayout(vk::ImageLayout::eTransferSrcOptimal, command_buffer);
|
||||
dest.TransitionLayout(vk::ImageLayout::eTransferDstOptimal, command_buffer);
|
||||
|
||||
// Perform blit operation
|
||||
command_buffer.blitImage(texture.get(), vk::ImageLayout::eTransferSrcOptimal, dest.texture.get(),
|
||||
vk::ImageLayout::eTransferDstOptimal, regions, vk::Filter::eNearest);
|
||||
}
|
||||
|
||||
void VKTexture::Fill(Common::Rectangle<u32> region, glm::vec4 color) {
|
||||
|
||||
}
|
||||
|
||||
void VKTexture::Fill(Common::Rectangle<u32> region, glm::vec2 depth_stencil) {
|
||||
|
||||
}
|
||||
|
||||
void VKFramebuffer::Create(const Info& info) {
|
||||
// Make sure that either attachment is valid
|
||||
assert(info.color || info.depth_stencil);
|
||||
attachments = { info.color, info.depth_stencil };
|
||||
@@ -180,17 +266,14 @@ void VKFramebuffer::Create(const Info& info)
|
||||
framebuffer = g_vk_instace->GetDevice().createFramebufferUnique(framebuffer_info);
|
||||
}
|
||||
|
||||
void VKFramebuffer::Prepare()
|
||||
{
|
||||
void VKFramebuffer::Prepare(vk::CommandBuffer& command_buffer) {
|
||||
// Transition attachments to their optimal formats for rendering
|
||||
if (attachments[Attachments::Color]) {
|
||||
attachments[Attachments::Color]->TransitionLayout(vk::ImageLayout::eUndefined,
|
||||
vk::ImageLayout::eColorAttachmentOptimal);
|
||||
attachments[Attachments::Color]->TransitionLayout(vk::ImageLayout::eColorAttachmentOptimal, command_buffer);
|
||||
}
|
||||
|
||||
if (attachments[Attachments::DepthStencil]) {
|
||||
attachments[Attachments::DepthStencil]->TransitionLayout(vk::ImageLayout::eUndefined,
|
||||
vk::ImageLayout::eDepthStencilAttachmentOptimal);
|
||||
attachments[Attachments::DepthStencil]->TransitionLayout(vk::ImageLayout::eDepthStencilAttachmentOptimal, command_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -7,7 +7,10 @@
|
||||
#include <memory>
|
||||
#include <span>
|
||||
#include <functional>
|
||||
#include <glm/glm.hpp>
|
||||
#include "common/math_util.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer.h"
|
||||
#include "video_core/renderer_vulkan/vk_surface_params.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
@@ -40,6 +43,7 @@ public:
|
||||
|
||||
/// Create a new Vulkan texture object along with its sampler
|
||||
void Create(const Info& info);
|
||||
bool IsValid() { return !!texture; }
|
||||
|
||||
/// Copies CPU side pixel data to the GPU texture buffer
|
||||
void CopyPixels(std::span<u32> pixels);
|
||||
@@ -50,12 +54,21 @@ public:
|
||||
vk::Rect2D GetRect() const { return vk::Rect2D({}, { texture_info.width, texture_info.height }); }
|
||||
u32 GetSamples() const { return texture_info.multisamples; }
|
||||
|
||||
private:
|
||||
/// Used to transition the image to an optimal layout during transfers
|
||||
void TransitionLayout(vk::ImageLayout old_layout, vk::ImageLayout new_layout);
|
||||
void TransitionLayout(vk::ImageLayout new_layout, vk::CommandBuffer& command_buffer);
|
||||
|
||||
/// Fill the texture with the values provided
|
||||
void Fill(Common::Rectangle<u32> region, glm::vec4 color);
|
||||
void Fill(Common::Rectangle<u32> region, glm::vec2 depth_stencil);
|
||||
|
||||
/// Copy current texture to another with optionally performing format convesions
|
||||
void BlitTo(Common::Rectangle<u32> source_rect, VKTexture& dest,
|
||||
Common::Rectangle<u32> dst_rect, SurfaceParams::SurfaceType type,
|
||||
vk::CommandBuffer& command_buffer);
|
||||
|
||||
private:
|
||||
Info texture_info;
|
||||
vk::ImageLayout texture_layout = vk::ImageLayout::eUndefined;
|
||||
vk::UniqueImage texture;
|
||||
vk::UniqueImageView texture_view;
|
||||
vk::UniqueDeviceMemory texture_memory;
|
||||
@@ -82,7 +95,7 @@ public:
|
||||
void Create(const Info& info);
|
||||
|
||||
/// Configure frambuffer for rendering
|
||||
void Prepare();
|
||||
void Prepare(vk::CommandBuffer& command_buffer);
|
||||
|
||||
vk::Rect2D GetRect() const { return vk::Rect2D({}, { width, height }); }
|
||||
|
||||
@@ -92,4 +105,4 @@ private:
|
||||
std::array<VKTexture*, 2> attachments;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace Vulkan
|
||||
|
Reference in New Issue
Block a user