renderer_vulkan: Revert some recent present changes
* Fixes the deadlocking on mingw until I rewrite presentation again
This commit is contained in:
@@ -217,8 +217,9 @@ void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
|
||||
|
||||
DrawScreens(frame, layout, flipped);
|
||||
|
||||
scheduler.Flush(&frame->is_submitted, frame->render_ready);
|
||||
mailbox->Present(frame);
|
||||
scheduler.Flush(frame->render_ready);
|
||||
scheduler.Record([&mailbox, frame](vk::CommandBuffer) { mailbox->Present(frame); });
|
||||
scheduler.DispatchWork();
|
||||
}
|
||||
|
||||
void RendererVulkan::BeginRendering(Frame* frame) {
|
||||
|
@@ -42,23 +42,106 @@ Scheduler::Scheduler(const Instance& instance, RenderpassCache& renderpass_cache
|
||||
|
||||
Scheduler::~Scheduler() = default;
|
||||
|
||||
void Scheduler::Flush(std::atomic_bool* submit_done, vk::Semaphore signal, vk::Semaphore wait) {
|
||||
void Scheduler::Flush(vk::Semaphore signal, vk::Semaphore wait) {
|
||||
SubmitExecution(signal, wait);
|
||||
}
|
||||
|
||||
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
|
||||
const u64 presubmit_tick = CurrentTick();
|
||||
SubmitExecution(signal, wait);
|
||||
WaitWorker();
|
||||
Wait(presubmit_tick);
|
||||
}
|
||||
|
||||
void Scheduler::WaitWorker() {
|
||||
if (!use_worker_thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||
DispatchWork();
|
||||
|
||||
std::unique_lock lock{work_mutex};
|
||||
wait_cv.wait(lock, [this] { return work_queue.empty(); });
|
||||
}
|
||||
|
||||
void Scheduler::Wait(u64 tick) {
|
||||
if (tick >= master_semaphore.CurrentTick()) {
|
||||
// Make sure we are not waiting for the current tick without signalling
|
||||
Flush();
|
||||
}
|
||||
master_semaphore.Wait(tick);
|
||||
}
|
||||
|
||||
void Scheduler::DispatchWork() {
|
||||
if (!use_worker_thread || chunk->Empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lock{work_mutex};
|
||||
work_queue.push(std::move(chunk));
|
||||
}
|
||||
|
||||
work_cv.notify_one();
|
||||
AcquireNewChunk();
|
||||
}
|
||||
|
||||
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||
Common::SetCurrentThreadName("VulkanWorker");
|
||||
do {
|
||||
std::unique_ptr<CommandChunk> work;
|
||||
bool has_submit{false};
|
||||
{
|
||||
std::unique_lock lock{work_mutex};
|
||||
if (work_queue.empty()) {
|
||||
wait_cv.notify_all();
|
||||
}
|
||||
Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
|
||||
if (stop_token.stop_requested()) {
|
||||
continue;
|
||||
}
|
||||
work = std::move(work_queue.front());
|
||||
work_queue.pop();
|
||||
|
||||
has_submit = work->HasSubmit();
|
||||
work->ExecuteAll(current_cmdbuf);
|
||||
}
|
||||
if (has_submit) {
|
||||
AllocateWorkerCommandBuffers();
|
||||
}
|
||||
std::scoped_lock reserve_lock{reserve_mutex};
|
||||
chunk_reserve.push_back(std::move(work));
|
||||
} while (!stop_token.stop_requested());
|
||||
}
|
||||
|
||||
void Scheduler::AllocateWorkerCommandBuffers() {
|
||||
const vk::CommandBufferBeginInfo begin_info = {
|
||||
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
|
||||
};
|
||||
|
||||
current_cmdbuf = command_pool.Commit();
|
||||
current_cmdbuf.begin(begin_info);
|
||||
}
|
||||
|
||||
void Scheduler::SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wait_semaphore) {
|
||||
const vk::Semaphore handle = master_semaphore.Handle();
|
||||
const u64 signal_value = master_semaphore.NextTick();
|
||||
state = StateFlags::AllDirty;
|
||||
|
||||
renderpass_cache.EndRendering();
|
||||
Record([signal, wait, handle, signal_value, submit_done, this](vk::CommandBuffer cmdbuf) {
|
||||
Record(
|
||||
[signal_semaphore, wait_semaphore, handle, signal_value, this](vk::CommandBuffer cmdbuf) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Submit);
|
||||
cmdbuf.end();
|
||||
|
||||
const u32 num_signal_semaphores = signal ? 2U : 1U;
|
||||
const u32 num_signal_semaphores = signal_semaphore ? 2U : 1U;
|
||||
const std::array signal_values{signal_value, u64(0)};
|
||||
const std::array signal_semaphores{handle, signal};
|
||||
const std::array signal_semaphores{handle, signal_semaphore};
|
||||
|
||||
const u32 num_wait_semaphores = wait ? 2U : 1U;
|
||||
const u32 num_wait_semaphores = wait_semaphore ? 2U : 1U;
|
||||
const std::array wait_values{signal_value - 1, u64(1)};
|
||||
const std::array wait_semaphores{handle, wait};
|
||||
const std::array wait_semaphores{handle, wait_semaphore};
|
||||
|
||||
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
|
||||
vk::PipelineStageFlagBits::eAllCommands,
|
||||
@@ -90,11 +173,6 @@ void Scheduler::Flush(std::atomic_bool* submit_done, vk::Semaphore signal, vk::S
|
||||
LOG_CRITICAL(Render_Vulkan, "Device lost during submit: {}", err.what());
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (submit_done) {
|
||||
*submit_done = true;
|
||||
submit_done->notify_one();
|
||||
}
|
||||
});
|
||||
|
||||
if (!use_worker_thread) {
|
||||
@@ -105,72 +183,6 @@ void Scheduler::Flush(std::atomic_bool* submit_done, vk::Semaphore signal, vk::S
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
|
||||
const u64 presubmit_tick = CurrentTick();
|
||||
std::atomic_bool submit_done{false};
|
||||
|
||||
Flush(&submit_done, signal, wait);
|
||||
if (use_worker_thread) {
|
||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||
submit_done.wait(false);
|
||||
}
|
||||
Wait(presubmit_tick);
|
||||
}
|
||||
|
||||
void Scheduler::Wait(u64 tick) {
|
||||
if (tick >= master_semaphore.CurrentTick()) {
|
||||
// Make sure we are not waiting for the current tick without signalling
|
||||
Flush();
|
||||
}
|
||||
master_semaphore.Wait(tick);
|
||||
}
|
||||
|
||||
void Scheduler::DispatchWork() {
|
||||
if (!use_worker_thread || chunk->Empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lock{work_mutex};
|
||||
work_queue.push(std::move(chunk));
|
||||
}
|
||||
|
||||
work_cv.notify_one();
|
||||
AcquireNewChunk();
|
||||
}
|
||||
|
||||
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||
Common::SetCurrentThreadName("VulkanWorker");
|
||||
do {
|
||||
std::unique_ptr<CommandChunk> work;
|
||||
{
|
||||
std::unique_lock lock{work_mutex};
|
||||
Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
|
||||
if (stop_token.stop_requested()) {
|
||||
continue;
|
||||
}
|
||||
work = std::move(work_queue.front());
|
||||
work_queue.pop();
|
||||
}
|
||||
const bool has_submit = work->HasSubmit();
|
||||
work->ExecuteAll(current_cmdbuf);
|
||||
if (has_submit) {
|
||||
AllocateWorkerCommandBuffers();
|
||||
}
|
||||
std::scoped_lock reserve_lock{reserve_mutex};
|
||||
chunk_reserve.push_back(std::move(work));
|
||||
} while (!stop_token.stop_requested());
|
||||
}
|
||||
|
||||
void Scheduler::AllocateWorkerCommandBuffers() {
|
||||
const vk::CommandBufferBeginInfo begin_info = {
|
||||
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
|
||||
};
|
||||
|
||||
current_cmdbuf = command_pool.Commit();
|
||||
current_cmdbuf.begin(begin_info);
|
||||
}
|
||||
|
||||
void Scheduler::AcquireNewChunk() {
|
||||
std::scoped_lock lock{reserve_mutex};
|
||||
if (chunk_reserve.empty()) {
|
||||
|
@@ -39,12 +39,15 @@ public:
|
||||
~Scheduler();
|
||||
|
||||
/// Sends the current execution context to the GPU.
|
||||
void Flush(std::atomic_bool* submit_done = nullptr, vk::Semaphore signal = nullptr,
|
||||
vk::Semaphore wait = nullptr);
|
||||
void Flush(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
|
||||
|
||||
/// Sends the current execution context to the GPU and waits for it to complete.
|
||||
void Finish(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
|
||||
|
||||
/// Waits for the worker thread to finish executing everything. After this function returns it's
|
||||
/// safe to touch worker resources.
|
||||
void WaitWorker();
|
||||
|
||||
/// Waits for the given tick to trigger on the GPU.
|
||||
void Wait(u64 tick);
|
||||
|
||||
@@ -190,6 +193,8 @@ private:
|
||||
|
||||
void AllocateWorkerCommandBuffers();
|
||||
|
||||
void SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wait_semaphore);
|
||||
|
||||
void AcquireNewChunk();
|
||||
|
||||
private:
|
||||
@@ -206,6 +211,7 @@ private:
|
||||
std::mutex work_mutex;
|
||||
std::mutex queue_mutex;
|
||||
std::condition_variable_any work_cv;
|
||||
std::condition_variable wait_cv;
|
||||
std::jthread worker_thread;
|
||||
bool use_worker_thread;
|
||||
};
|
||||
|
@@ -1668,8 +1668,8 @@ layout (set = 0, binding = 0, std140) uniform vs_config {
|
||||
if (used_regs[i]) {
|
||||
const auto flags = config.state.load_flags[i];
|
||||
const std::string_view prefix = MakeLoadPrefix(flags);
|
||||
out += fmt::format("layout(location = {0}) in {1}vec4 vs_in_typed_reg{0};\n", i,
|
||||
prefix);
|
||||
out +=
|
||||
fmt::format("layout(location = {0}) in {1}vec4 vs_in_typed_reg{0};\n", i, prefix);
|
||||
out += fmt::format("vec4 vs_in_reg{0} = vec4(vs_in_typed_reg{0});\n", i);
|
||||
}
|
||||
}
|
||||
|
@@ -163,7 +163,6 @@ Frame* PresentMailbox::GetRenderFrame() {
|
||||
}
|
||||
|
||||
device.resetFences(frame->present_done);
|
||||
frame->is_submitted = false;
|
||||
return frame;
|
||||
}
|
||||
|
||||
@@ -328,9 +327,6 @@ void PresentMailbox::CopyToSwapchain(Frame* frame) {
|
||||
.pSignalSemaphores = &present_ready,
|
||||
};
|
||||
|
||||
// Ensure we won't wait on a semaphore that has no way of being signaled
|
||||
frame->is_submitted.wait(false);
|
||||
|
||||
try {
|
||||
std::scoped_lock lock{scheduler.QueueMutex(), frame->fence_mutex};
|
||||
graphics_queue.submit(submit_info, frame->present_done);
|
||||
|
@@ -30,7 +30,6 @@ struct Frame {
|
||||
vk::Fence present_done{};
|
||||
std::mutex fence_mutex{};
|
||||
vk::CommandBuffer cmdbuf{};
|
||||
std::atomic_bool is_submitted{false};
|
||||
};
|
||||
|
||||
class PresentMailbox final {
|
||||
|
Reference in New Issue
Block a user