renderer_vulkan: Submit present frames from the EmuThread
* This commit also reworks scheduler synchronization somewhat to be more reliable. Flush is given an atomic_bool is signal when the submit is done
This commit is contained in:
@ -217,9 +217,8 @@ void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
|
|||||||
|
|
||||||
DrawScreens(frame, layout, flipped);
|
DrawScreens(frame, layout, flipped);
|
||||||
|
|
||||||
scheduler.Flush(frame->render_ready);
|
scheduler.Flush(frame->render_ready, nullptr, &frame->is_submitted);
|
||||||
scheduler.Record([&mailbox, frame](vk::CommandBuffer) { mailbox->Present(frame); });
|
mailbox->Present(frame);
|
||||||
scheduler.DispatchWork();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RendererVulkan::BeginRendering(Frame* frame) {
|
void RendererVulkan::BeginRendering(Frame* frame) {
|
||||||
|
@ -188,7 +188,8 @@ void DescriptorManager::BuildLayouts() {
|
|||||||
pipeline_layout = device.createPipelineLayout(layout_info);
|
pipeline_layout = device.createPipelineLayout(layout_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<vk::DescriptorSet> DescriptorManager::AllocateSets(vk::DescriptorSetLayout layout, u32 num_sets) {
|
std::vector<vk::DescriptorSet> DescriptorManager::AllocateSets(vk::DescriptorSetLayout layout,
|
||||||
|
u32 num_sets) {
|
||||||
static std::array<vk::DescriptorSetLayout, MAX_BATCH_SIZE> layouts;
|
static std::array<vk::DescriptorSetLayout, MAX_BATCH_SIZE> layouts;
|
||||||
layouts.fill(layout);
|
layouts.fill(layout);
|
||||||
|
|
||||||
|
@ -42,27 +42,16 @@ Scheduler::Scheduler(const Instance& instance, RenderpassCache& renderpass_cache
|
|||||||
|
|
||||||
Scheduler::~Scheduler() = default;
|
Scheduler::~Scheduler() = default;
|
||||||
|
|
||||||
void Scheduler::Flush(vk::Semaphore signal, vk::Semaphore wait) {
|
|
||||||
SubmitExecution(signal, wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
|
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
|
||||||
const u64 presubmit_tick = CurrentTick();
|
const u64 presubmit_tick = CurrentTick();
|
||||||
SubmitExecution(signal, wait);
|
std::atomic_bool submit_done{false};
|
||||||
WaitWorker();
|
|
||||||
Wait(presubmit_tick);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::WaitWorker() {
|
|
||||||
if (!use_worker_thread) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
Flush(signal, wait, &submit_done);
|
||||||
|
if (use_worker_thread) {
|
||||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||||
DispatchWork();
|
submit_done.wait(false);
|
||||||
|
}
|
||||||
std::unique_lock lock{work_mutex};
|
Wait(presubmit_tick);
|
||||||
wait_cv.wait(lock, [this] { return work_queue.empty(); });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::DispatchWork() {
|
void Scheduler::DispatchWork() {
|
||||||
@ -116,24 +105,23 @@ void Scheduler::AllocateWorkerCommandBuffers() {
|
|||||||
current_cmdbuf.begin(begin_info);
|
current_cmdbuf.begin(begin_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wait_semaphore) {
|
void Scheduler::Flush(vk::Semaphore signal, vk::Semaphore wait, std::atomic_bool* submit_done) {
|
||||||
const vk::Semaphore handle = master_semaphore.Handle();
|
const vk::Semaphore handle = master_semaphore.Handle();
|
||||||
const u64 signal_value = master_semaphore.NextTick();
|
const u64 signal_value = master_semaphore.NextTick();
|
||||||
state = StateFlags::AllDirty;
|
state = StateFlags::AllDirty;
|
||||||
|
|
||||||
renderpass_cache.EndRendering();
|
renderpass_cache.EndRendering();
|
||||||
Record(
|
Record([signal, wait, handle, signal_value, submit_done, this](vk::CommandBuffer cmdbuf) {
|
||||||
[signal_semaphore, wait_semaphore, handle, signal_value, this](vk::CommandBuffer cmdbuf) {
|
|
||||||
MICROPROFILE_SCOPE(Vulkan_Submit);
|
MICROPROFILE_SCOPE(Vulkan_Submit);
|
||||||
cmdbuf.end();
|
cmdbuf.end();
|
||||||
|
|
||||||
const u32 num_signal_semaphores = signal_semaphore ? 2U : 1U;
|
const u32 num_signal_semaphores = signal ? 2U : 1U;
|
||||||
const std::array signal_values{signal_value, u64(0)};
|
const std::array signal_values{signal_value, u64(0)};
|
||||||
const std::array signal_semaphores{handle, signal_semaphore};
|
const std::array signal_semaphores{handle, signal};
|
||||||
|
|
||||||
const u32 num_wait_semaphores = wait_semaphore ? 2U : 1U;
|
const u32 num_wait_semaphores = wait ? 2U : 1U;
|
||||||
const std::array wait_values{signal_value - 1, u64(1)};
|
const std::array wait_values{signal_value - 1, u64(1)};
|
||||||
const std::array wait_semaphores{handle, wait_semaphore};
|
const std::array wait_semaphores{handle, wait};
|
||||||
|
|
||||||
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
|
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
|
||||||
vk::PipelineStageFlagBits::eAllCommands,
|
vk::PipelineStageFlagBits::eAllCommands,
|
||||||
@ -165,6 +153,11 @@ void Scheduler::SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wa
|
|||||||
LOG_CRITICAL(Render_Vulkan, "Device lost during submit: {}", err.what());
|
LOG_CRITICAL(Render_Vulkan, "Device lost during submit: {}", err.what());
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (submit_done) {
|
||||||
|
*submit_done = true;
|
||||||
|
submit_done->notify_one();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!use_worker_thread) {
|
if (!use_worker_thread) {
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
@ -38,15 +39,12 @@ public:
|
|||||||
~Scheduler();
|
~Scheduler();
|
||||||
|
|
||||||
/// Sends the current execution context to the GPU.
|
/// Sends the current execution context to the GPU.
|
||||||
void Flush(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
|
void Flush(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr,
|
||||||
|
std::atomic_bool* submit_done = nullptr);
|
||||||
|
|
||||||
/// Sends the current execution context to the GPU and waits for it to complete.
|
/// Sends the current execution context to the GPU and waits for it to complete.
|
||||||
void Finish(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
|
void Finish(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
|
||||||
|
|
||||||
/// Waits for the worker thread to finish executing everything. After this function returns it's
|
|
||||||
/// safe to touch worker resources.
|
|
||||||
void WaitWorker();
|
|
||||||
|
|
||||||
/// Sends currently recorded work to the worker thread.
|
/// Sends currently recorded work to the worker thread.
|
||||||
void DispatchWork();
|
void DispatchWork();
|
||||||
|
|
||||||
@ -199,8 +197,6 @@ private:
|
|||||||
|
|
||||||
void AllocateWorkerCommandBuffers();
|
void AllocateWorkerCommandBuffers();
|
||||||
|
|
||||||
void SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wait_semaphore);
|
|
||||||
|
|
||||||
void AcquireNewChunk();
|
void AcquireNewChunk();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -41,6 +41,7 @@ PresentMailbox::PresentMailbox(const Instance& instance_, Swapchain& swapchain_,
|
|||||||
|
|
||||||
for (u32 i = 0; i < SWAP_CHAIN_SIZE; i++) {
|
for (u32 i = 0; i < SWAP_CHAIN_SIZE; i++) {
|
||||||
Frame& frame = swap_chain[i];
|
Frame& frame = swap_chain[i];
|
||||||
|
frame.index = i;
|
||||||
frame.cmdbuf = command_buffers[i];
|
frame.cmdbuf = command_buffers[i];
|
||||||
frame.render_ready = device.createSemaphore({});
|
frame.render_ready = device.createSemaphore({});
|
||||||
frame.present_done = device.createFence({.flags = vk::FenceCreateFlagBits::eSignaled});
|
frame.present_done = device.createFence({.flags = vk::FenceCreateFlagBits::eSignaled});
|
||||||
@ -162,6 +163,7 @@ Frame* PresentMailbox::GetRenderFrame() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
device.resetFences(frame->present_done);
|
device.resetFences(frame->present_done);
|
||||||
|
frame->is_submitted = false;
|
||||||
return frame;
|
return frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,6 +328,9 @@ void PresentMailbox::CopyToSwapchain(Frame* frame) {
|
|||||||
.pSignalSemaphores = &present_ready,
|
.pSignalSemaphores = &present_ready,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Ensure we won't wait on a semaphore that has no way of being signaled
|
||||||
|
frame->is_submitted.wait(false);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
std::scoped_lock lock{scheduler.QueueMutex(), frame->fence_mutex};
|
std::scoped_lock lock{scheduler.QueueMutex(), frame->fence_mutex};
|
||||||
graphics_queue.submit(submit_info, frame->present_done);
|
graphics_queue.submit(submit_info, frame->present_done);
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
@ -21,6 +22,7 @@ class RenderpassCache;
|
|||||||
struct Frame {
|
struct Frame {
|
||||||
u32 width{};
|
u32 width{};
|
||||||
u32 height{};
|
u32 height{};
|
||||||
|
u32 index{};
|
||||||
VmaAllocation allocation{};
|
VmaAllocation allocation{};
|
||||||
vk::Framebuffer framebuffer{};
|
vk::Framebuffer framebuffer{};
|
||||||
vk::Image image{};
|
vk::Image image{};
|
||||||
@ -29,6 +31,7 @@ struct Frame {
|
|||||||
vk::Fence present_done{};
|
vk::Fence present_done{};
|
||||||
std::mutex fence_mutex{};
|
std::mutex fence_mutex{};
|
||||||
vk::CommandBuffer cmdbuf{};
|
vk::CommandBuffer cmdbuf{};
|
||||||
|
std::atomic_bool is_submitted{false};
|
||||||
};
|
};
|
||||||
|
|
||||||
class PresentMailbox final {
|
class PresentMailbox final {
|
||||||
|
Reference in New Issue
Block a user