renderer_vulkan: Revert some recent present changes

* Fixes the deadlocking on mingw until I rewrite presentation again
This commit is contained in:
GPUCode
2023-03-11 20:45:19 +02:00
parent c5f2267306
commit 72c1785bf0
6 changed files with 95 additions and 81 deletions

View File

@ -217,8 +217,9 @@ void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
DrawScreens(frame, layout, flipped);
scheduler.Flush(&frame->is_submitted, frame->render_ready);
mailbox->Present(frame);
scheduler.Flush(frame->render_ready);
scheduler.Record([&mailbox, frame](vk::CommandBuffer) { mailbox->Present(frame); });
scheduler.DispatchWork();
}
void RendererVulkan::BeginRendering(Frame* frame) {

View File

@ -42,81 +42,29 @@ Scheduler::Scheduler(const Instance& instance, RenderpassCache& renderpass_cache
Scheduler::~Scheduler() = default;
void Scheduler::Flush(std::atomic_bool* submit_done, vk::Semaphore signal, vk::Semaphore wait) {
const vk::Semaphore handle = master_semaphore.Handle();
const u64 signal_value = master_semaphore.NextTick();
state = StateFlags::AllDirty;
renderpass_cache.EndRendering();
Record([signal, wait, handle, signal_value, submit_done, this](vk::CommandBuffer cmdbuf) {
MICROPROFILE_SCOPE(Vulkan_Submit);
cmdbuf.end();
const u32 num_signal_semaphores = signal ? 2U : 1U;
const std::array signal_values{signal_value, u64(0)};
const std::array signal_semaphores{handle, signal};
const u32 num_wait_semaphores = wait ? 2U : 1U;
const std::array wait_values{signal_value - 1, u64(1)};
const std::array wait_semaphores{handle, wait};
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
vk::PipelineStageFlagBits::eAllCommands,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
};
const vk::TimelineSemaphoreSubmitInfoKHR timeline_si = {
.waitSemaphoreValueCount = num_wait_semaphores,
.pWaitSemaphoreValues = wait_values.data(),
.signalSemaphoreValueCount = num_signal_semaphores,
.pSignalSemaphoreValues = signal_values.data(),
};
const vk::SubmitInfo submit_info = {
.pNext = &timeline_si,
.waitSemaphoreCount = num_wait_semaphores,
.pWaitSemaphores = wait_semaphores.data(),
.pWaitDstStageMask = wait_stage_masks.data(),
.commandBufferCount = 1u,
.pCommandBuffers = &cmdbuf,
.signalSemaphoreCount = num_signal_semaphores,
.pSignalSemaphores = signal_semaphores.data(),
};
try {
std::scoped_lock lock{queue_mutex};
instance.GetGraphicsQueue().submit(submit_info);
} catch (vk::DeviceLostError& err) {
LOG_CRITICAL(Render_Vulkan, "Device lost during submit: {}", err.what());
UNREACHABLE();
}
if (submit_done) {
*submit_done = true;
submit_done->notify_one();
}
});
if (!use_worker_thread) {
AllocateWorkerCommandBuffers();
} else {
chunk->MarkSubmit();
DispatchWork();
}
void Scheduler::Flush(vk::Semaphore signal, vk::Semaphore wait) {
SubmitExecution(signal, wait);
}
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
const u64 presubmit_tick = CurrentTick();
std::atomic_bool submit_done{false};
Flush(&submit_done, signal, wait);
if (use_worker_thread) {
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
submit_done.wait(false);
}
SubmitExecution(signal, wait);
WaitWorker();
Wait(presubmit_tick);
}
void Scheduler::WaitWorker() {
if (!use_worker_thread) {
return;
}
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
DispatchWork();
std::unique_lock lock{work_mutex};
wait_cv.wait(lock, [this] { return work_queue.empty(); });
}
void Scheduler::Wait(u64 tick) {
if (tick >= master_semaphore.CurrentTick()) {
// Make sure we are not waiting for the current tick without signalling
@ -143,17 +91,22 @@ void Scheduler::WorkerThread(std::stop_token stop_token) {
Common::SetCurrentThreadName("VulkanWorker");
do {
std::unique_ptr<CommandChunk> work;
bool has_submit{false};
{
std::unique_lock lock{work_mutex};
if (work_queue.empty()) {
wait_cv.notify_all();
}
Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
if (stop_token.stop_requested()) {
continue;
}
work = std::move(work_queue.front());
work_queue.pop();
has_submit = work->HasSubmit();
work->ExecuteAll(current_cmdbuf);
}
const bool has_submit = work->HasSubmit();
work->ExecuteAll(current_cmdbuf);
if (has_submit) {
AllocateWorkerCommandBuffers();
}
@ -171,6 +124,65 @@ void Scheduler::AllocateWorkerCommandBuffers() {
current_cmdbuf.begin(begin_info);
}
void Scheduler::SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wait_semaphore) {
const vk::Semaphore handle = master_semaphore.Handle();
const u64 signal_value = master_semaphore.NextTick();
state = StateFlags::AllDirty;
renderpass_cache.EndRendering();
Record(
[signal_semaphore, wait_semaphore, handle, signal_value, this](vk::CommandBuffer cmdbuf) {
MICROPROFILE_SCOPE(Vulkan_Submit);
cmdbuf.end();
const u32 num_signal_semaphores = signal_semaphore ? 2U : 1U;
const std::array signal_values{signal_value, u64(0)};
const std::array signal_semaphores{handle, signal_semaphore};
const u32 num_wait_semaphores = wait_semaphore ? 2U : 1U;
const std::array wait_values{signal_value - 1, u64(1)};
const std::array wait_semaphores{handle, wait_semaphore};
static constexpr std::array<vk::PipelineStageFlags, 2> wait_stage_masks = {
vk::PipelineStageFlagBits::eAllCommands,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
};
const vk::TimelineSemaphoreSubmitInfoKHR timeline_si = {
.waitSemaphoreValueCount = num_wait_semaphores,
.pWaitSemaphoreValues = wait_values.data(),
.signalSemaphoreValueCount = num_signal_semaphores,
.pSignalSemaphoreValues = signal_values.data(),
};
const vk::SubmitInfo submit_info = {
.pNext = &timeline_si,
.waitSemaphoreCount = num_wait_semaphores,
.pWaitSemaphores = wait_semaphores.data(),
.pWaitDstStageMask = wait_stage_masks.data(),
.commandBufferCount = 1u,
.pCommandBuffers = &cmdbuf,
.signalSemaphoreCount = num_signal_semaphores,
.pSignalSemaphores = signal_semaphores.data(),
};
try {
std::scoped_lock lock{queue_mutex};
instance.GetGraphicsQueue().submit(submit_info);
} catch (vk::DeviceLostError& err) {
LOG_CRITICAL(Render_Vulkan, "Device lost during submit: {}", err.what());
UNREACHABLE();
}
});
if (!use_worker_thread) {
AllocateWorkerCommandBuffers();
} else {
chunk->MarkSubmit();
DispatchWork();
}
}
void Scheduler::AcquireNewChunk() {
std::scoped_lock lock{reserve_mutex};
if (chunk_reserve.empty()) {

View File

@ -39,12 +39,15 @@ public:
~Scheduler();
/// Sends the current execution context to the GPU.
void Flush(std::atomic_bool* submit_done = nullptr, vk::Semaphore signal = nullptr,
vk::Semaphore wait = nullptr);
void Flush(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
/// Sends the current execution context to the GPU and waits for it to complete.
void Finish(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
/// Waits for the worker thread to finish executing everything. After this function returns it's
/// safe to touch worker resources.
void WaitWorker();
/// Waits for the given tick to trigger on the GPU.
void Wait(u64 tick);
@ -190,6 +193,8 @@ private:
void AllocateWorkerCommandBuffers();
void SubmitExecution(vk::Semaphore signal_semaphore, vk::Semaphore wait_semaphore);
void AcquireNewChunk();
private:
@ -206,6 +211,7 @@ private:
std::mutex work_mutex;
std::mutex queue_mutex;
std::condition_variable_any work_cv;
std::condition_variable wait_cv;
std::jthread worker_thread;
bool use_worker_thread;
};

View File

@ -1668,8 +1668,8 @@ layout (set = 0, binding = 0, std140) uniform vs_config {
if (used_regs[i]) {
const auto flags = config.state.load_flags[i];
const std::string_view prefix = MakeLoadPrefix(flags);
out += fmt::format("layout(location = {0}) in {1}vec4 vs_in_typed_reg{0};\n", i,
prefix);
out +=
fmt::format("layout(location = {0}) in {1}vec4 vs_in_typed_reg{0};\n", i, prefix);
out += fmt::format("vec4 vs_in_reg{0} = vec4(vs_in_typed_reg{0});\n", i);
}
}

View File

@ -163,7 +163,6 @@ Frame* PresentMailbox::GetRenderFrame() {
}
device.resetFences(frame->present_done);
frame->is_submitted = false;
return frame;
}
@ -328,9 +327,6 @@ void PresentMailbox::CopyToSwapchain(Frame* frame) {
.pSignalSemaphores = &present_ready,
};
// Ensure we won't wait on a semaphore that has no way of being signaled
frame->is_submitted.wait(false);
try {
std::scoped_lock lock{scheduler.QueueMutex(), frame->fence_mutex};
graphics_queue.submit(submit_info, frame->present_done);

View File

@ -30,7 +30,6 @@ struct Frame {
vk::Fence present_done{};
std::mutex fence_mutex{};
vk::CommandBuffer cmdbuf{};
std::atomic_bool is_submitted{false};
};
class PresentMailbox final {