renderer_vulkan: Use separate present thread
This commit is contained in:
@ -313,23 +313,9 @@ private:
|
|||||||
|
|
||||||
class VulkanRenderWidget : public RenderWidget {
|
class VulkanRenderWidget : public RenderWidget {
|
||||||
public:
|
public:
|
||||||
explicit VulkanRenderWidget(GRenderWindow* parent, bool is_secondary)
|
explicit VulkanRenderWidget(GRenderWindow* parent) : RenderWidget(parent) {
|
||||||
: RenderWidget(parent), is_secondary(is_secondary) {
|
|
||||||
windowHandle()->setSurfaceType(QWindow::VulkanSurface);
|
windowHandle()->setSurfaceType(QWindow::VulkanSurface);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Present() override {
|
|
||||||
if (!isVisible()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!Core::System::GetInstance().IsPoweredOn()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
VideoCore::g_renderer->TryPresent(100, is_secondary);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool is_secondary;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static Frontend::WindowSystemType GetWindowSystemType() {
|
static Frontend::WindowSystemType GetWindowSystemType() {
|
||||||
@ -677,7 +663,7 @@ bool GRenderWindow::InitializeOpenGL() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool GRenderWindow::InitializeVulkan() {
|
bool GRenderWindow::InitializeVulkan() {
|
||||||
auto child = new VulkanRenderWidget(this, is_secondary);
|
auto child = new VulkanRenderWidget(this);
|
||||||
child_widget = child;
|
child_widget = child;
|
||||||
child_widget->windowHandle()->create();
|
child_widget->windowHandle()->create();
|
||||||
main_context = std::make_unique<DummyContext>();
|
main_context = std::make_unique<DummyContext>();
|
||||||
|
@ -13,8 +13,10 @@
|
|||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
#include "common/polyfill_thread.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
template <typename T>
|
template <typename T, bool with_stop_token = false>
|
||||||
class SPSCQueue {
|
class SPSCQueue {
|
||||||
public:
|
public:
|
||||||
SPSCQueue() {
|
SPSCQueue() {
|
||||||
@ -40,21 +42,19 @@ public:
|
|||||||
template <typename Arg>
|
template <typename Arg>
|
||||||
void Push(Arg&& t) {
|
void Push(Arg&& t) {
|
||||||
// create the element, add it to the queue
|
// create the element, add it to the queue
|
||||||
write_ptr->current = std::forward<Arg>(t);
|
write_ptr->current = std::move(t);
|
||||||
// set the next pointer to a new element ptr
|
// set the next pointer to a new element ptr
|
||||||
// then advance the write pointer
|
// then advance the write pointer
|
||||||
ElementPtr* new_ptr = new ElementPtr();
|
ElementPtr* new_ptr = new ElementPtr();
|
||||||
write_ptr->next.store(new_ptr, std::memory_order_release);
|
write_ptr->next.store(new_ptr, std::memory_order_release);
|
||||||
write_ptr = new_ptr;
|
write_ptr = new_ptr;
|
||||||
|
++size;
|
||||||
|
|
||||||
const size_t previous_size{size++};
|
// cv_mutex must be held or else there will be a missed wakeup if the other thread is in the
|
||||||
|
// line before cv.wait
|
||||||
// Acquire the mutex and then immediately release it as a fence.
|
|
||||||
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
|
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
|
||||||
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
|
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
|
||||||
if (previous_size == 0) {
|
std::scoped_lock lock{cv_mutex};
|
||||||
std::lock_guard lock{cv_mutex};
|
|
||||||
}
|
|
||||||
cv.notify_one();
|
cv.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,10 +83,27 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
T PopWait() {
|
void Wait() {
|
||||||
if (Empty()) {
|
if (Empty()) {
|
||||||
std::unique_lock lock{cv_mutex};
|
std::unique_lock lock{cv_mutex};
|
||||||
cv.wait(lock, [this]() { return !Empty(); });
|
cv.wait(lock, [this] { return !Empty(); });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
T PopWait() {
|
||||||
|
Wait();
|
||||||
|
T t;
|
||||||
|
Pop(t);
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
T PopWait(std::stop_token stop_token) {
|
||||||
|
if (Empty()) {
|
||||||
|
std::unique_lock lock{cv_mutex};
|
||||||
|
Common::CondvarWait(cv, lock, stop_token, [this] { return !Empty(); });
|
||||||
|
}
|
||||||
|
if (stop_token.stop_requested()) {
|
||||||
|
return T{};
|
||||||
}
|
}
|
||||||
T t;
|
T t;
|
||||||
Pop(t);
|
Pop(t);
|
||||||
@ -105,7 +122,7 @@ private:
|
|||||||
// and a pointer to the next ElementPtr
|
// and a pointer to the next ElementPtr
|
||||||
class ElementPtr {
|
class ElementPtr {
|
||||||
public:
|
public:
|
||||||
ElementPtr() = default;
|
ElementPtr() {}
|
||||||
~ElementPtr() {
|
~ElementPtr() {
|
||||||
ElementPtr* next_ptr = next.load();
|
ElementPtr* next_ptr = next.load();
|
||||||
|
|
||||||
@ -121,13 +138,13 @@ private:
|
|||||||
ElementPtr* read_ptr;
|
ElementPtr* read_ptr;
|
||||||
std::atomic_size_t size{0};
|
std::atomic_size_t size{0};
|
||||||
std::mutex cv_mutex;
|
std::mutex cv_mutex;
|
||||||
std::condition_variable cv;
|
std::conditional_t<with_stop_token, std::condition_variable_any, std::condition_variable> cv;
|
||||||
};
|
};
|
||||||
|
|
||||||
// a simple thread-safe,
|
// a simple thread-safe,
|
||||||
// single reader, multiple writer queue
|
// single reader, multiple writer queue
|
||||||
|
|
||||||
template <typename T>
|
template <typename T, bool with_stop_token = false>
|
||||||
class MPSCQueue {
|
class MPSCQueue {
|
||||||
public:
|
public:
|
||||||
[[nodiscard]] std::size_t Size() const {
|
[[nodiscard]] std::size_t Size() const {
|
||||||
@ -144,7 +161,7 @@ public:
|
|||||||
|
|
||||||
template <typename Arg>
|
template <typename Arg>
|
||||||
void Push(Arg&& t) {
|
void Push(Arg&& t) {
|
||||||
std::lock_guard lock{write_lock};
|
std::scoped_lock lock{write_lock};
|
||||||
spsc_queue.Push(t);
|
spsc_queue.Push(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,17 +173,25 @@ public:
|
|||||||
return spsc_queue.Pop(t);
|
return spsc_queue.Pop(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Wait() {
|
||||||
|
spsc_queue.Wait();
|
||||||
|
}
|
||||||
|
|
||||||
T PopWait() {
|
T PopWait() {
|
||||||
return spsc_queue.PopWait();
|
return spsc_queue.PopWait();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
T PopWait(std::stop_token stop_token) {
|
||||||
|
return spsc_queue.PopWait(stop_token);
|
||||||
|
}
|
||||||
|
|
||||||
// not thread-safe
|
// not thread-safe
|
||||||
void Clear() {
|
void Clear() {
|
||||||
spsc_queue.Clear();
|
spsc_queue.Clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
SPSCQueue<T> spsc_queue;
|
SPSCQueue<T, with_stop_token> spsc_queue;
|
||||||
std::mutex write_lock;
|
std::mutex write_lock;
|
||||||
};
|
};
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
@ -11,7 +11,7 @@ namespace OpenGL {
|
|||||||
StreamBuffer::StreamBuffer(GLenum target, size_t size_)
|
StreamBuffer::StreamBuffer(GLenum target, size_t size_)
|
||||||
: gl_target{target}, buffer_size{size_}, slot_size{buffer_size / SYNC_POINTS},
|
: gl_target{target}, buffer_size{size_}, slot_size{buffer_size / SYNC_POINTS},
|
||||||
buffer_storage{bool(GLAD_GL_ARB_buffer_storage)} {
|
buffer_storage{bool(GLAD_GL_ARB_buffer_storage)} {
|
||||||
for (int i = 0; i < SYNC_POINTS; i++) {
|
for (u64 i = 0; i < SYNC_POINTS; i++) {
|
||||||
fences[i].Create();
|
fences[i].Create();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,13 +44,13 @@ std::tuple<u8*, u64, bool> StreamBuffer::Map(u64 size, u64 alignment) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert waiting slots for used memory
|
// Insert waiting slots for used memory
|
||||||
for (u32 i = Slot(used_iterator); i < Slot(iterator); i++) {
|
for (u64 i = Slot(used_iterator); i < Slot(iterator); i++) {
|
||||||
fences[i].Create();
|
fences[i].Create();
|
||||||
}
|
}
|
||||||
used_iterator = iterator;
|
used_iterator = iterator;
|
||||||
|
|
||||||
// Wait for new slots to end of buffer
|
// Wait for new slots to end of buffer
|
||||||
for (u32 i = Slot(free_iterator) + 1; i <= Slot(iterator + size) && i < SYNC_POINTS; i++) {
|
for (u64 i = Slot(free_iterator) + 1; i <= Slot(iterator + size) && i < SYNC_POINTS; i++) {
|
||||||
glClientWaitSync(fences[i].handle, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
glClientWaitSync(fences[i].handle, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
||||||
fences[i].Release();
|
fences[i].Release();
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ std::tuple<u8*, u64, bool> StreamBuffer::Map(u64 size, u64 alignment) {
|
|||||||
invalidate = true;
|
invalidate = true;
|
||||||
|
|
||||||
// Insert waiting slots in unused space at the end of the buffer
|
// Insert waiting slots in unused space at the end of the buffer
|
||||||
for (int i = Slot(used_iterator); i < SYNC_POINTS; i++) {
|
for (u64 i = Slot(used_iterator); i < SYNC_POINTS; i++) {
|
||||||
fences[i].Create();
|
fences[i].Create();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ std::tuple<u8*, u64, bool> StreamBuffer::Map(u64 size, u64 alignment) {
|
|||||||
used_iterator = iterator = 0; // offset 0 is always aligned
|
used_iterator = iterator = 0; // offset 0 is always aligned
|
||||||
|
|
||||||
// Wait for space at the start
|
// Wait for space at the start
|
||||||
for (int i = 0; i <= Slot(iterator + size); i++) {
|
for (u64 i = 0; i <= Slot(iterator + size); i++) {
|
||||||
glClientWaitSync(fences[i].handle, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
glClientWaitSync(fences[i].handle, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED);
|
||||||
fences[i].Release();
|
fences[i].Release();
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#include <vk_mem_alloc.h>
|
#include <vk_mem_alloc.h>
|
||||||
|
|
||||||
MICROPROFILE_DEFINE(Vulkan_RenderFrame, "Vulkan", "Render Frame", MP_RGB(128, 128, 64));
|
MICROPROFILE_DEFINE(Vulkan_RenderFrame, "Vulkan", "Render Frame", MP_RGB(128, 128, 64));
|
||||||
MICROPROFILE_DEFINE(Vulkan_SwapchainCopy, "Vulkan", "Swapchain Copy", MP_RGB(64, 64, 0));
|
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
@ -117,7 +116,7 @@ RendererVulkan::RendererVulkan(Memory::MemorySystem& memory_, Frontend::EmuWindo
|
|||||||
CompileShaders();
|
CompileShaders();
|
||||||
BuildLayouts();
|
BuildLayouts();
|
||||||
BuildPipelines();
|
BuildPipelines();
|
||||||
window.mailbox = std::make_unique<TextureMailbox>(instance, swapchain, renderpass_cache);
|
mailbox = std::make_unique<PresentMailbox>(instance, swapchain, scheduler, renderpass_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
RendererVulkan::~RendererVulkan() {
|
RendererVulkan::~RendererVulkan() {
|
||||||
@ -186,14 +185,13 @@ void RendererVulkan::PrepareRendertarget() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
|
void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
|
||||||
std::unique_ptr<Frontend::TextureMailbox>& mailbox,
|
std::unique_ptr<PresentMailbox>& mailbox, bool flipped) {
|
||||||
bool flipped) {
|
Frame* frame = mailbox->GetRenderFrame();
|
||||||
Frontend::Frame* frame = mailbox->GetRenderFrame();
|
|
||||||
MICROPROFILE_SCOPE(Vulkan_RenderFrame);
|
MICROPROFILE_SCOPE(Vulkan_RenderFrame);
|
||||||
|
|
||||||
const auto [width, height] = swapchain.GetExtent();
|
const auto [width, height] = swapchain.GetExtent();
|
||||||
if (width != frame->width || height != frame->height) {
|
if (width != frame->width || height != frame->height) {
|
||||||
mailbox->ReloadRenderFrame(frame, width, height);
|
mailbox->ReloadFrame(frame, width, height);
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduler.Record([layout](vk::CommandBuffer cmdbuf) {
|
scheduler.Record([layout](vk::CommandBuffer cmdbuf) {
|
||||||
@ -237,7 +235,7 @@ void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
|
|||||||
DrawScreens(layout, flipped);
|
DrawScreens(layout, flipped);
|
||||||
|
|
||||||
scheduler.Flush(frame->render_ready);
|
scheduler.Flush(frame->render_ready);
|
||||||
scheduler.Record([&mailbox, frame](vk::CommandBuffer) { mailbox->ReleaseRenderFrame(frame); });
|
scheduler.Record([&mailbox, frame](vk::CommandBuffer) { mailbox->Present(frame); });
|
||||||
scheduler.DispatchWork();
|
scheduler.DispatchWork();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -956,165 +954,12 @@ void RendererVulkan::DrawScreens(const Layout::FramebufferLayout& layout, bool f
|
|||||||
scheduler.Record([](vk::CommandBuffer cmdbuf) { cmdbuf.endRenderPass(); });
|
scheduler.Record([](vk::CommandBuffer cmdbuf) { cmdbuf.endRenderPass(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::TryPresent(int timeout_ms, bool is_secondary) {
|
|
||||||
Frontend::Frame* frame = render_window.mailbox->TryGetPresentFrame(timeout_ms);
|
|
||||||
if (!frame) {
|
|
||||||
LOG_DEBUG(Render_Vulkan, "TryGetPresentFrame returned no frame to present");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if ANDROID
|
|
||||||
// On Android swapchain invalidations are always due to surface changes.
|
|
||||||
// These are processed on the main thread so wait for it to recreate
|
|
||||||
// the swapchain for us.
|
|
||||||
std::unique_lock lock{swapchain_mutex};
|
|
||||||
swapchain_cv.wait(lock, [this]() { return !swapchain.NeedsRecreation(); });
|
|
||||||
#endif
|
|
||||||
|
|
||||||
while (!swapchain.AcquireNextImage()) {
|
|
||||||
#if ANDROID
|
|
||||||
swapchain_cv.wait(lock, [this]() { return !swapchain.NeedsRecreation(); });
|
|
||||||
#else
|
|
||||||
std::scoped_lock lock{scheduler.QueueMutex()};
|
|
||||||
instance.GetGraphicsQueue().waitIdle();
|
|
||||||
swapchain.Create();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
MICROPROFILE_SCOPE(Vulkan_SwapchainCopy);
|
|
||||||
const vk::Image swapchain_image = swapchain.Image();
|
|
||||||
|
|
||||||
const vk::CommandBufferBeginInfo begin_info = {
|
|
||||||
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
|
|
||||||
};
|
|
||||||
const vk::CommandBuffer cmdbuf = frame->cmdbuf;
|
|
||||||
cmdbuf.begin(begin_info);
|
|
||||||
|
|
||||||
const auto [width, height] = swapchain.GetExtent();
|
|
||||||
const u32 copy_width = std::min(width, frame->width);
|
|
||||||
const u32 copy_height = std::min(height, frame->height);
|
|
||||||
|
|
||||||
const vk::ImageCopy image_copy = {
|
|
||||||
.srcSubresource{
|
|
||||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
|
||||||
.mipLevel = 0,
|
|
||||||
.baseArrayLayer = 0,
|
|
||||||
.layerCount = 1,
|
|
||||||
},
|
|
||||||
.srcOffset = {0, 0, 0},
|
|
||||||
.dstSubresource{
|
|
||||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
|
||||||
.mipLevel = 0,
|
|
||||||
.baseArrayLayer = 0,
|
|
||||||
.layerCount = 1,
|
|
||||||
},
|
|
||||||
.dstOffset = {0, 0, 0},
|
|
||||||
.extent = {copy_width, copy_height, 1},
|
|
||||||
};
|
|
||||||
|
|
||||||
const std::array pre_barriers{
|
|
||||||
vk::ImageMemoryBarrier{
|
|
||||||
.srcAccessMask = vk::AccessFlagBits::eNone,
|
|
||||||
.dstAccessMask = vk::AccessFlagBits::eTransferWrite,
|
|
||||||
.oldLayout = vk::ImageLayout::eUndefined,
|
|
||||||
.newLayout = vk::ImageLayout::eTransferDstOptimal,
|
|
||||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
|
||||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
|
||||||
.image = swapchain_image,
|
|
||||||
.subresourceRange{
|
|
||||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
|
||||||
.baseMipLevel = 0,
|
|
||||||
.levelCount = 1,
|
|
||||||
.baseArrayLayer = 0,
|
|
||||||
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
vk::ImageMemoryBarrier{
|
|
||||||
.srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite,
|
|
||||||
.dstAccessMask = vk::AccessFlagBits::eTransferRead,
|
|
||||||
.oldLayout = vk::ImageLayout::eTransferSrcOptimal,
|
|
||||||
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
|
|
||||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
|
||||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
|
||||||
.image = frame->image,
|
|
||||||
.subresourceRange{
|
|
||||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
|
||||||
.baseMipLevel = 0,
|
|
||||||
.levelCount = 1,
|
|
||||||
.baseArrayLayer = 0,
|
|
||||||
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const vk::ImageMemoryBarrier post_barrier{
|
|
||||||
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
|
|
||||||
.dstAccessMask = vk::AccessFlagBits::eNone,
|
|
||||||
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
|
|
||||||
.newLayout = vk::ImageLayout::ePresentSrcKHR,
|
|
||||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
|
||||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
|
||||||
.image = swapchain_image,
|
|
||||||
.subresourceRange{
|
|
||||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
|
||||||
.baseMipLevel = 0,
|
|
||||||
.levelCount = 1,
|
|
||||||
.baseArrayLayer = 0,
|
|
||||||
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eColorAttachmentOutput,
|
|
||||||
vk::PipelineStageFlagBits::eTransfer,
|
|
||||||
vk::DependencyFlagBits::eByRegion, {}, {}, pre_barriers);
|
|
||||||
|
|
||||||
cmdbuf.copyImage(frame->image, vk::ImageLayout::eTransferSrcOptimal, swapchain_image,
|
|
||||||
vk::ImageLayout::eTransferDstOptimal, image_copy);
|
|
||||||
|
|
||||||
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
|
|
||||||
vk::PipelineStageFlagBits::eBottomOfPipe,
|
|
||||||
vk::DependencyFlagBits::eByRegion, {}, {}, post_barrier);
|
|
||||||
|
|
||||||
cmdbuf.end();
|
|
||||||
|
|
||||||
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
|
|
||||||
vk::PipelineStageFlagBits::eAllCommands,
|
|
||||||
vk::PipelineStageFlagBits::eAllCommands,
|
|
||||||
};
|
|
||||||
|
|
||||||
const vk::Semaphore present_ready = swapchain.GetPresentReadySemaphore();
|
|
||||||
const vk::Semaphore image_acquired = swapchain.GetImageAcquiredSemaphore();
|
|
||||||
const std::array wait_semaphores = {image_acquired, frame->render_ready};
|
|
||||||
|
|
||||||
vk::SubmitInfo submit_info = {
|
|
||||||
.waitSemaphoreCount = static_cast<u32>(wait_semaphores.size()),
|
|
||||||
.pWaitSemaphores = wait_semaphores.data(),
|
|
||||||
.pWaitDstStageMask = wait_stage_masks.data(),
|
|
||||||
.commandBufferCount = 1u,
|
|
||||||
.pCommandBuffers = &cmdbuf,
|
|
||||||
.signalSemaphoreCount = 1,
|
|
||||||
.pSignalSemaphores = &present_ready,
|
|
||||||
};
|
|
||||||
|
|
||||||
try {
|
|
||||||
std::scoped_lock lock{scheduler.QueueMutex(), frame->fence_mutex};
|
|
||||||
instance.GetGraphicsQueue().submit(submit_info, frame->present_done);
|
|
||||||
} catch (vk::DeviceLostError& err) {
|
|
||||||
LOG_CRITICAL(Render_Vulkan, "Device lost during present submit: {}", err.what());
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
swapchain.Present();
|
|
||||||
render_window.mailbox->ReleasePresentFrame(frame);
|
|
||||||
}
|
|
||||||
|
|
||||||
void RendererVulkan::SwapBuffers() {
|
void RendererVulkan::SwapBuffers() {
|
||||||
const auto& layout = render_window.GetFramebufferLayout();
|
const auto& layout = render_window.GetFramebufferLayout();
|
||||||
PrepareRendertarget();
|
PrepareRendertarget();
|
||||||
RenderScreenshot();
|
RenderScreenshot();
|
||||||
|
|
||||||
RenderToMailbox(layout, render_window.mailbox, false);
|
RenderToMailbox(layout, mailbox, false);
|
||||||
|
|
||||||
m_current_frame++;
|
m_current_frame++;
|
||||||
|
|
||||||
@ -1180,11 +1025,10 @@ void RendererVulkan::RenderScreenshot() {
|
|||||||
}
|
}
|
||||||
vk::Image staging_image{unsafe_image};
|
vk::Image staging_image{unsafe_image};
|
||||||
|
|
||||||
Frontend::Frame frame{};
|
Frame frame{};
|
||||||
render_window.mailbox->ReloadRenderFrame(&frame, width, height);
|
mailbox->ReloadFrame(&frame, width, height);
|
||||||
|
|
||||||
renderpass_cache.ExitRenderpass();
|
renderpass_cache.ExitRenderpass();
|
||||||
|
|
||||||
scheduler.Record([this, framebuffer = frame.framebuffer, width = frame.width,
|
scheduler.Record([this, framebuffer = frame.framebuffer, width = frame.width,
|
||||||
height = frame.height](vk::CommandBuffer cmdbuf) {
|
height = frame.height](vk::CommandBuffer cmdbuf) {
|
||||||
const vk::ClearValue clear{.color = clear_color};
|
const vk::ClearValue clear{.color = clear_color};
|
||||||
@ -1343,12 +1187,7 @@ void RendererVulkan::RenderScreenshot() {
|
|||||||
void RendererVulkan::NotifySurfaceChanged() {
|
void RendererVulkan::NotifySurfaceChanged() {
|
||||||
scheduler.Finish();
|
scheduler.Finish();
|
||||||
vk::SurfaceKHR new_surface = CreateSurface(instance.GetInstance(), render_window);
|
vk::SurfaceKHR new_surface = CreateSurface(instance.GetInstance(), render_window);
|
||||||
{
|
mailbox->UpdateSurface(new_surface);
|
||||||
std::scoped_lock lock{swapchain_mutex};
|
|
||||||
swapchain.SetNeedsRecreation(true);
|
|
||||||
swapchain.Create(new_surface);
|
|
||||||
swapchain_cv.notify_one();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::Report() const {
|
void RendererVulkan::Report() const {
|
||||||
|
@ -54,7 +54,7 @@ struct ScreenInfo {
|
|||||||
vk::ImageView image_view;
|
vk::ImageView image_view;
|
||||||
};
|
};
|
||||||
|
|
||||||
class RasterizerVulkan;
|
class PresentMailbox;
|
||||||
|
|
||||||
class RendererVulkan : public VideoCore::RendererBase {
|
class RendererVulkan : public VideoCore::RendererBase {
|
||||||
static constexpr std::size_t PRESENT_PIPELINES = 3;
|
static constexpr std::size_t PRESENT_PIPELINES = 3;
|
||||||
@ -70,7 +70,7 @@ public:
|
|||||||
|
|
||||||
void SwapBuffers() override;
|
void SwapBuffers() override;
|
||||||
void NotifySurfaceChanged() override;
|
void NotifySurfaceChanged() override;
|
||||||
void TryPresent(int timeout_ms, bool is_secondary) override;
|
void TryPresent(int timeout_ms, bool is_secondary) override {}
|
||||||
void PrepareVideoDumping() override {}
|
void PrepareVideoDumping() override {}
|
||||||
void CleanupVideoDumping() override {}
|
void CleanupVideoDumping() override {}
|
||||||
void Sync() override;
|
void Sync() override;
|
||||||
@ -88,7 +88,7 @@ private:
|
|||||||
void PrepareRendertarget();
|
void PrepareRendertarget();
|
||||||
void RenderScreenshot();
|
void RenderScreenshot();
|
||||||
void RenderToMailbox(const Layout::FramebufferLayout& layout,
|
void RenderToMailbox(const Layout::FramebufferLayout& layout,
|
||||||
std::unique_ptr<Frontend::TextureMailbox>& mailbox, bool flipped);
|
std::unique_ptr<PresentMailbox>& mailbox, bool flipped);
|
||||||
void BeginRendering();
|
void BeginRendering();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -135,8 +135,7 @@ private:
|
|||||||
Swapchain swapchain;
|
Swapchain swapchain;
|
||||||
StreamBuffer vertex_buffer;
|
StreamBuffer vertex_buffer;
|
||||||
RasterizerVulkan rasterizer;
|
RasterizerVulkan rasterizer;
|
||||||
std::mutex swapchain_mutex;
|
std::unique_ptr<PresentMailbox> mailbox;
|
||||||
std::condition_variable swapchain_cv;
|
|
||||||
|
|
||||||
/// Present pipelines (Normal, Anaglyph, Interlaced)
|
/// Present pipelines (Normal, Anaglyph, Interlaced)
|
||||||
vk::PipelineLayout present_pipeline_layout;
|
vk::PipelineLayout present_pipeline_layout;
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
|
#include "common/thread.h"
|
||||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||||
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
|
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
|
||||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||||
@ -79,6 +80,7 @@ void Scheduler::DispatchWork() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||||
|
Common::SetCurrentThreadName("VulkanWorker");
|
||||||
do {
|
do {
|
||||||
std::unique_ptr<CommandChunk> work;
|
std::unique_ptr<CommandChunk> work;
|
||||||
{
|
{
|
||||||
|
@ -30,6 +30,7 @@ Swapchain::~Swapchain() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Swapchain::Create(vk::SurfaceKHR new_surface) {
|
void Swapchain::Create(vk::SurfaceKHR new_surface) {
|
||||||
|
needs_recreation = true; ///< Set this for the present thread to wait on
|
||||||
Destroy();
|
Destroy();
|
||||||
|
|
||||||
if (new_surface) {
|
if (new_surface) {
|
||||||
|
@ -3,20 +3,24 @@
|
|||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
|
#include "common/thread.h"
|
||||||
#include "video_core/renderer_vulkan/vk_instance.h"
|
#include "video_core/renderer_vulkan/vk_instance.h"
|
||||||
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
|
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
|
||||||
|
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||||
#include "video_core/renderer_vulkan/vk_swapchain.h"
|
#include "video_core/renderer_vulkan/vk_swapchain.h"
|
||||||
#include "video_core/renderer_vulkan/vk_texture_mailbox.h"
|
#include "video_core/renderer_vulkan/vk_texture_mailbox.h"
|
||||||
|
|
||||||
#include <vk_mem_alloc.h>
|
#include <vk_mem_alloc.h>
|
||||||
|
|
||||||
MICROPROFILE_DEFINE(Vulkan_WaitPresent, "Vulkan", "Wait For Present", MP_RGB(128, 128, 128));
|
MICROPROFILE_DEFINE(Vulkan_WaitPresent, "Vulkan", "Wait For Present", MP_RGB(128, 128, 128));
|
||||||
|
MICROPROFILE_DEFINE(Vulkan_PresentFrame, "Vulkan", "Present Frame", MP_RGB(64, 64, 0));
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
TextureMailbox::TextureMailbox(const Instance& instance_, const Swapchain& swapchain_,
|
PresentMailbox::PresentMailbox(const Instance& instance_, Swapchain& swapchain_,
|
||||||
const RenderpassCache& renderpass_cache_)
|
Scheduler& scheduler_, RenderpassCache& renderpass_cache_)
|
||||||
: instance{instance_}, swapchain{swapchain_}, renderpass_cache{renderpass_cache_} {
|
: instance{instance_}, swapchain{swapchain_}, scheduler{scheduler_},
|
||||||
|
renderpass_cache{renderpass_cache_}, graphics_queue{instance.GetGraphicsQueue()} {
|
||||||
|
|
||||||
const vk::Device device = instance.GetDevice();
|
const vk::Device device = instance.GetDevice();
|
||||||
const vk::CommandPoolCreateInfo pool_info = {
|
const vk::CommandPoolCreateInfo pool_info = {
|
||||||
@ -34,20 +38,19 @@ TextureMailbox::TextureMailbox(const Instance& instance_, const Swapchain& swapc
|
|||||||
const std::vector command_buffers = device.allocateCommandBuffers(alloc_info);
|
const std::vector command_buffers = device.allocateCommandBuffers(alloc_info);
|
||||||
|
|
||||||
for (u32 i = 0; i < SWAP_CHAIN_SIZE; i++) {
|
for (u32 i = 0; i < SWAP_CHAIN_SIZE; i++) {
|
||||||
Frontend::Frame& frame = swap_chain[i];
|
Frame& frame = swap_chain[i];
|
||||||
frame.cmdbuf = command_buffers[i];
|
frame.cmdbuf = command_buffers[i];
|
||||||
frame.render_ready = device.createSemaphore({});
|
frame.render_ready = device.createSemaphore({});
|
||||||
frame.present_done = device.createFence({.flags = vk::FenceCreateFlagBits::eSignaled});
|
frame.present_done = device.createFence({.flags = vk::FenceCreateFlagBits::eSignaled});
|
||||||
free_queue.push(&frame);
|
free_queue.Push(&frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
present_thread = std::jthread([this](std::stop_token token) { PresentThread(token); });
|
||||||
}
|
}
|
||||||
|
|
||||||
TextureMailbox::~TextureMailbox() {
|
PresentMailbox::~PresentMailbox() {
|
||||||
std::scoped_lock lock{present_mutex, free_mutex};
|
free_queue.Clear();
|
||||||
free_queue = {};
|
present_queue.Clear();
|
||||||
present_queue = {};
|
|
||||||
present_cv.notify_all();
|
|
||||||
free_cv.notify_all();
|
|
||||||
|
|
||||||
const vk::Device device = instance.GetDevice();
|
const vk::Device device = instance.GetDevice();
|
||||||
device.destroyCommandPool(command_pool);
|
device.destroyCommandPool(command_pool);
|
||||||
@ -60,7 +63,7 @@ TextureMailbox::~TextureMailbox() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TextureMailbox::ReloadRenderFrame(Frontend::Frame* frame, u32 width, u32 height) {
|
void PresentMailbox::ReloadFrame(Frame* frame, u32 width, u32 height) {
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
if (frame->framebuffer) {
|
if (frame->framebuffer) {
|
||||||
device.destroyFramebuffer(frame->framebuffer);
|
device.destroyFramebuffer(frame->framebuffer);
|
||||||
@ -131,26 +134,16 @@ void TextureMailbox::ReloadRenderFrame(Frontend::Frame* frame, u32 width, u32 he
|
|||||||
frame->height = height;
|
frame->height = height;
|
||||||
}
|
}
|
||||||
|
|
||||||
Frontend::Frame* TextureMailbox::GetRenderFrame() {
|
Frame* PresentMailbox::GetRenderFrame() {
|
||||||
MICROPROFILE_SCOPE(Vulkan_WaitPresent);
|
MICROPROFILE_SCOPE(Vulkan_WaitPresent);
|
||||||
|
|
||||||
Frontend::Frame* frame{};
|
Frame* frame = free_queue.PopWait();
|
||||||
{
|
|
||||||
std::unique_lock lock{free_mutex};
|
|
||||||
if (free_queue.empty()) {
|
|
||||||
free_cv.wait(lock, [&] { return !free_queue.empty(); });
|
|
||||||
}
|
|
||||||
|
|
||||||
frame = free_queue.front();
|
|
||||||
free_queue.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::scoped_lock lock{frame->fence_mutex};
|
|
||||||
|
|
||||||
vk::Device device = instance.GetDevice();
|
vk::Device device = instance.GetDevice();
|
||||||
vk::Result result{};
|
vk::Result result{};
|
||||||
|
|
||||||
const auto Wait = [&]() {
|
const auto Wait = [&]() {
|
||||||
|
std::scoped_lock lock{frame->fence_mutex};
|
||||||
result = device.waitForFences(frame->present_done, false, std::numeric_limits<u64>::max());
|
result = device.waitForFences(frame->present_done, false, std::numeric_limits<u64>::max());
|
||||||
return result;
|
return result;
|
||||||
};
|
};
|
||||||
@ -173,31 +166,168 @@ Frontend::Frame* TextureMailbox::GetRenderFrame() {
|
|||||||
return frame;
|
return frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TextureMailbox::ReleaseRenderFrame(Frontend::Frame* frame) {
|
void PresentMailbox::UpdateSurface(vk::SurfaceKHR surface) {
|
||||||
std::unique_lock lock{present_mutex};
|
std::scoped_lock lock{swapchain_mutex};
|
||||||
present_queue.push(frame);
|
swapchain.Create(surface);
|
||||||
present_cv.notify_one();
|
swapchain_cv.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
void TextureMailbox::ReleasePresentFrame(Frontend::Frame* frame) {
|
void PresentMailbox::Present(Frame* frame) {
|
||||||
std::unique_lock lock{free_mutex};
|
present_queue.Push(frame);
|
||||||
free_queue.push(frame);
|
|
||||||
free_cv.notify_one();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Frontend::Frame* TextureMailbox::TryGetPresentFrame(int timeout_ms) {
|
void PresentMailbox::PresentThread(std::stop_token token) {
|
||||||
std::unique_lock lock{present_mutex};
|
Common::SetCurrentThreadName("VulkanPresent");
|
||||||
// Wait for new entries in the present_queue
|
do {
|
||||||
present_cv.wait_for(lock, std::chrono::milliseconds(timeout_ms),
|
Frame* frame = present_queue.PopWait(token);
|
||||||
[&] { return !present_queue.empty(); });
|
CopyToSwapchain(frame);
|
||||||
if (present_queue.empty()) {
|
free_queue.Push(frame);
|
||||||
LOG_DEBUG(Render_Vulkan, "Timed out waiting present frame");
|
} while (!token.stop_requested());
|
||||||
return nullptr;
|
}
|
||||||
|
|
||||||
|
void PresentMailbox::CopyToSwapchain(Frame* frame) {
|
||||||
|
MICROPROFILE_SCOPE(Vulkan_PresentFrame);
|
||||||
|
|
||||||
|
#if ANDROID
|
||||||
|
// On Android swapchain invalidations are always due to surface changes.
|
||||||
|
// These are processed on the main thread so wait for it to recreate
|
||||||
|
// the swapchain for us.
|
||||||
|
std::unique_lock lock{swapchain_mutex};
|
||||||
|
swapchain_cv.wait(lock, [this]() { return !swapchain.NeedsRecreation(); });
|
||||||
|
#endif
|
||||||
|
|
||||||
|
while (!swapchain.AcquireNextImage()) {
|
||||||
|
#if ANDROID
|
||||||
|
swapchain_cv.wait(lock, [this]() { return !swapchain.NeedsRecreation(); });
|
||||||
|
#else
|
||||||
|
std::scoped_lock lock{scheduler.QueueMutex()};
|
||||||
|
graphics_queue.waitIdle();
|
||||||
|
swapchain.Create();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Frontend::Frame* frame = present_queue.front();
|
const vk::Image swapchain_image = swapchain.Image();
|
||||||
present_queue.pop();
|
|
||||||
return frame;
|
const vk::CommandBufferBeginInfo begin_info = {
|
||||||
|
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
|
||||||
|
};
|
||||||
|
const vk::CommandBuffer cmdbuf = frame->cmdbuf;
|
||||||
|
cmdbuf.begin(begin_info);
|
||||||
|
|
||||||
|
const auto [width, height] = swapchain.GetExtent();
|
||||||
|
const u32 copy_width = std::min(width, frame->width);
|
||||||
|
const u32 copy_height = std::min(height, frame->height);
|
||||||
|
|
||||||
|
const vk::ImageCopy image_copy = {
|
||||||
|
.srcSubresource{
|
||||||
|
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||||
|
.mipLevel = 0,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = 1,
|
||||||
|
},
|
||||||
|
.srcOffset = {0, 0, 0},
|
||||||
|
.dstSubresource{
|
||||||
|
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||||
|
.mipLevel = 0,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = 1,
|
||||||
|
},
|
||||||
|
.dstOffset = {0, 0, 0},
|
||||||
|
.extent = {copy_width, copy_height, 1},
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::array pre_barriers{
|
||||||
|
vk::ImageMemoryBarrier{
|
||||||
|
.srcAccessMask = vk::AccessFlagBits::eNone,
|
||||||
|
.dstAccessMask = vk::AccessFlagBits::eTransferWrite,
|
||||||
|
.oldLayout = vk::ImageLayout::eUndefined,
|
||||||
|
.newLayout = vk::ImageLayout::eTransferDstOptimal,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.image = swapchain_image,
|
||||||
|
.subresourceRange{
|
||||||
|
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = 1,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
vk::ImageMemoryBarrier{
|
||||||
|
.srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite,
|
||||||
|
.dstAccessMask = vk::AccessFlagBits::eTransferRead,
|
||||||
|
.oldLayout = vk::ImageLayout::eTransferSrcOptimal,
|
||||||
|
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.image = frame->image,
|
||||||
|
.subresourceRange{
|
||||||
|
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = 1,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
const vk::ImageMemoryBarrier post_barrier{
|
||||||
|
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
|
||||||
|
.dstAccessMask = vk::AccessFlagBits::eNone,
|
||||||
|
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
|
||||||
|
.newLayout = vk::ImageLayout::ePresentSrcKHR,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.image = swapchain_image,
|
||||||
|
.subresourceRange{
|
||||||
|
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = 1,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eColorAttachmentOutput,
|
||||||
|
vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlagBits::eByRegion,
|
||||||
|
{}, {}, pre_barriers);
|
||||||
|
|
||||||
|
cmdbuf.copyImage(frame->image, vk::ImageLayout::eTransferSrcOptimal, swapchain_image,
|
||||||
|
vk::ImageLayout::eTransferDstOptimal, image_copy);
|
||||||
|
|
||||||
|
cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
|
||||||
|
vk::PipelineStageFlagBits::eBottomOfPipe,
|
||||||
|
vk::DependencyFlagBits::eByRegion, {}, {}, post_barrier);
|
||||||
|
|
||||||
|
cmdbuf.end();
|
||||||
|
|
||||||
|
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
|
||||||
|
vk::PipelineStageFlagBits::eColorAttachmentOutput,
|
||||||
|
vk::PipelineStageFlagBits::eAllGraphics,
|
||||||
|
};
|
||||||
|
|
||||||
|
const vk::Semaphore present_ready = swapchain.GetPresentReadySemaphore();
|
||||||
|
const vk::Semaphore image_acquired = swapchain.GetImageAcquiredSemaphore();
|
||||||
|
const std::array wait_semaphores = {image_acquired, frame->render_ready};
|
||||||
|
|
||||||
|
vk::SubmitInfo submit_info = {
|
||||||
|
.waitSemaphoreCount = static_cast<u32>(wait_semaphores.size()),
|
||||||
|
.pWaitSemaphores = wait_semaphores.data(),
|
||||||
|
.pWaitDstStageMask = wait_stage_masks.data(),
|
||||||
|
.commandBufferCount = 1u,
|
||||||
|
.pCommandBuffers = &cmdbuf,
|
||||||
|
.signalSemaphoreCount = 1,
|
||||||
|
.pSignalSemaphores = &present_ready,
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
std::scoped_lock lock{scheduler.QueueMutex(), frame->fence_mutex};
|
||||||
|
graphics_queue.submit(submit_info, frame->present_done);
|
||||||
|
} catch (vk::DeviceLostError& err) {
|
||||||
|
LOG_CRITICAL(Render_Vulkan, "Device lost during present submit: {}", err.what());
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
swapchain.Present();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -5,12 +5,18 @@
|
|||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
#include "core/frontend/emu_window.h"
|
#include "common/polyfill_thread.h"
|
||||||
|
#include "common/threadsafe_queue.h"
|
||||||
#include "video_core/renderer_vulkan/vk_common.h"
|
#include "video_core/renderer_vulkan/vk_common.h"
|
||||||
|
|
||||||
VK_DEFINE_HANDLE(VmaAllocation)
|
VK_DEFINE_HANDLE(VmaAllocation)
|
||||||
|
|
||||||
namespace Frontend {
|
namespace Vulkan {
|
||||||
|
|
||||||
|
class Instance;
|
||||||
|
class Swapchain;
|
||||||
|
class Scheduler;
|
||||||
|
class RenderpassCache;
|
||||||
|
|
||||||
struct Frame {
|
struct Frame {
|
||||||
u32 width{};
|
u32 width{};
|
||||||
@ -25,42 +31,36 @@ struct Frame {
|
|||||||
vk::CommandBuffer cmdbuf{};
|
vk::CommandBuffer cmdbuf{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Frontend
|
class PresentMailbox final {
|
||||||
|
static constexpr std::size_t SWAP_CHAIN_SIZE = 6;
|
||||||
namespace Vulkan {
|
|
||||||
|
|
||||||
class Instance;
|
|
||||||
class Swapchain;
|
|
||||||
class RenderpassCache;
|
|
||||||
|
|
||||||
class TextureMailbox final : public Frontend::TextureMailbox {
|
|
||||||
static constexpr std::size_t SWAP_CHAIN_SIZE = 8;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TextureMailbox(const Instance& instance, const Swapchain& swapchain,
|
PresentMailbox(const Instance& instance, Swapchain& swapchain, Scheduler& scheduler,
|
||||||
const RenderpassCache& renderpass_cache);
|
RenderpassCache& renderpass_cache);
|
||||||
~TextureMailbox() override;
|
~PresentMailbox();
|
||||||
|
|
||||||
void ReloadRenderFrame(Frontend::Frame* frame, u32 width, u32 height) override;
|
Frame* GetRenderFrame();
|
||||||
|
void UpdateSurface(vk::SurfaceKHR surface);
|
||||||
|
void ReloadFrame(Frame* frame, u32 width, u32 height);
|
||||||
|
void Present(Frame* frame);
|
||||||
|
|
||||||
Frontend::Frame* GetRenderFrame() override;
|
private:
|
||||||
Frontend::Frame* TryGetPresentFrame(int timeout_ms) override;
|
void PresentThread(std::stop_token token);
|
||||||
|
void CopyToSwapchain(Frame* frame);
|
||||||
void ReleaseRenderFrame(Frontend::Frame* frame) override;
|
|
||||||
void ReleasePresentFrame(Frontend::Frame* frame) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Instance& instance;
|
const Instance& instance;
|
||||||
const Swapchain& swapchain;
|
Swapchain& swapchain;
|
||||||
const RenderpassCache& renderpass_cache;
|
Scheduler& scheduler;
|
||||||
|
RenderpassCache& renderpass_cache;
|
||||||
vk::CommandPool command_pool;
|
vk::CommandPool command_pool;
|
||||||
std::mutex free_mutex;
|
vk::Queue graphics_queue;
|
||||||
std::mutex present_mutex;
|
std::jthread present_thread;
|
||||||
std::condition_variable free_cv;
|
std::array<Frame, SWAP_CHAIN_SIZE> swap_chain{};
|
||||||
std::condition_variable present_cv;
|
Common::SPSCQueue<Frame*> free_queue{};
|
||||||
std::array<Frontend::Frame, SWAP_CHAIN_SIZE> swap_chain{};
|
Common::SPSCQueue<Frame*, true> present_queue{};
|
||||||
std::queue<Frontend::Frame*> free_queue{};
|
std::mutex swapchain_mutex;
|
||||||
std::queue<Frontend::Frame*> present_queue{};
|
std::condition_variable swapchain_cv;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
Reference in New Issue
Block a user