renderer_vulkan: Small cleanups
This commit is contained in:
@ -217,7 +217,7 @@ void RendererVulkan::RenderToMailbox(const Layout::FramebufferLayout& layout,
|
||||
|
||||
DrawScreens(frame, layout, flipped);
|
||||
|
||||
scheduler.Flush(frame->render_ready, nullptr, &frame->is_submitted);
|
||||
scheduler.Flush(&frame->is_submitted, frame->render_ready);
|
||||
mailbox->Present(frame);
|
||||
}
|
||||
|
||||
|
@ -210,10 +210,17 @@ void RasterizerVulkan::SetupVertexArray() {
|
||||
const PAddr data_addr =
|
||||
base_address + loader.data_offset + (vs_input_index_min * loader.byte_count);
|
||||
const u32 vertex_num = vs_input_index_max - vs_input_index_min + 1;
|
||||
const u32 data_size = loader.byte_count * vertex_num;
|
||||
u32 data_size = loader.byte_count * vertex_num;
|
||||
res_cache.FlushRegion(data_addr, data_size);
|
||||
|
||||
const u8* src_ptr = memory.GetPhysicalPointer(data_addr);
|
||||
const MemoryRef src_ref = memory.GetPhysicalRef(data_addr);
|
||||
if (src_ref.GetSize() < data_size) {
|
||||
LOG_ERROR(Render_Vulkan,
|
||||
"Vertex buffer size {} exceeds available space {} at address {:#016X}",
|
||||
data_size, src_ref.GetSize(), data_addr);
|
||||
}
|
||||
|
||||
const u8* src_ptr = src_ref.GetPtr();
|
||||
u8* dst_ptr = array_ptr + buffer_offset;
|
||||
|
||||
// Align stride up if required by Vulkan implementation.
|
||||
|
@ -42,65 +42,7 @@ Scheduler::Scheduler(const Instance& instance, RenderpassCache& renderpass_cache
|
||||
|
||||
Scheduler::~Scheduler() = default;
|
||||
|
||||
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
|
||||
const u64 presubmit_tick = CurrentTick();
|
||||
std::atomic_bool submit_done{false};
|
||||
|
||||
Flush(signal, wait, &submit_done);
|
||||
if (use_worker_thread) {
|
||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||
submit_done.wait(false);
|
||||
}
|
||||
Wait(presubmit_tick);
|
||||
}
|
||||
|
||||
void Scheduler::DispatchWork() {
|
||||
if (!use_worker_thread || chunk->Empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lock{work_mutex};
|
||||
work_queue.push(std::move(chunk));
|
||||
}
|
||||
|
||||
work_cv.notify_one();
|
||||
AcquireNewChunk();
|
||||
}
|
||||
|
||||
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||
Common::SetCurrentThreadName("VulkanWorker");
|
||||
do {
|
||||
std::unique_ptr<CommandChunk> work;
|
||||
{
|
||||
std::unique_lock lock{work_mutex};
|
||||
Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
|
||||
if (stop_token.stop_requested()) {
|
||||
continue;
|
||||
}
|
||||
work = std::move(work_queue.front());
|
||||
work_queue.pop();
|
||||
}
|
||||
const bool has_submit = work->HasSubmit();
|
||||
work->ExecuteAll(current_cmdbuf);
|
||||
if (has_submit) {
|
||||
AllocateWorkerCommandBuffers();
|
||||
}
|
||||
std::scoped_lock reserve_lock{reserve_mutex};
|
||||
chunk_reserve.push_back(std::move(work));
|
||||
} while (!stop_token.stop_requested());
|
||||
}
|
||||
|
||||
void Scheduler::AllocateWorkerCommandBuffers() {
|
||||
const vk::CommandBufferBeginInfo begin_info = {
|
||||
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
|
||||
};
|
||||
|
||||
current_cmdbuf = command_pool.Commit();
|
||||
current_cmdbuf.begin(begin_info);
|
||||
}
|
||||
|
||||
void Scheduler::Flush(vk::Semaphore signal, vk::Semaphore wait, std::atomic_bool* submit_done) {
|
||||
void Scheduler::Flush(std::atomic_bool* submit_done, vk::Semaphore signal, vk::Semaphore wait) {
|
||||
const vk::Semaphore handle = master_semaphore.Handle();
|
||||
const u64 signal_value = master_semaphore.NextTick();
|
||||
state = StateFlags::AllDirty;
|
||||
@ -163,6 +105,72 @@ void Scheduler::Flush(vk::Semaphore signal, vk::Semaphore wait, std::atomic_bool
|
||||
}
|
||||
}
|
||||
|
||||
void Scheduler::Finish(vk::Semaphore signal, vk::Semaphore wait) {
|
||||
const u64 presubmit_tick = CurrentTick();
|
||||
std::atomic_bool submit_done{false};
|
||||
|
||||
Flush(&submit_done, signal, wait);
|
||||
if (use_worker_thread) {
|
||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||
submit_done.wait(false);
|
||||
}
|
||||
Wait(presubmit_tick);
|
||||
}
|
||||
|
||||
void Scheduler::Wait(u64 tick) {
|
||||
if (tick >= master_semaphore.CurrentTick()) {
|
||||
// Make sure we are not waiting for the current tick without signalling
|
||||
Flush();
|
||||
}
|
||||
master_semaphore.Wait(tick);
|
||||
}
|
||||
|
||||
void Scheduler::DispatchWork() {
|
||||
if (!use_worker_thread || chunk->Empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lock{work_mutex};
|
||||
work_queue.push(std::move(chunk));
|
||||
}
|
||||
|
||||
work_cv.notify_one();
|
||||
AcquireNewChunk();
|
||||
}
|
||||
|
||||
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||
Common::SetCurrentThreadName("VulkanWorker");
|
||||
do {
|
||||
std::unique_ptr<CommandChunk> work;
|
||||
{
|
||||
std::unique_lock lock{work_mutex};
|
||||
Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
|
||||
if (stop_token.stop_requested()) {
|
||||
continue;
|
||||
}
|
||||
work = std::move(work_queue.front());
|
||||
work_queue.pop();
|
||||
}
|
||||
const bool has_submit = work->HasSubmit();
|
||||
work->ExecuteAll(current_cmdbuf);
|
||||
if (has_submit) {
|
||||
AllocateWorkerCommandBuffers();
|
||||
}
|
||||
std::scoped_lock reserve_lock{reserve_mutex};
|
||||
chunk_reserve.push_back(std::move(work));
|
||||
} while (!stop_token.stop_requested());
|
||||
}
|
||||
|
||||
void Scheduler::AllocateWorkerCommandBuffers() {
|
||||
const vk::CommandBufferBeginInfo begin_info = {
|
||||
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
|
||||
};
|
||||
|
||||
current_cmdbuf = command_pool.Commit();
|
||||
current_cmdbuf.begin(begin_info);
|
||||
}
|
||||
|
||||
void Scheduler::AcquireNewChunk() {
|
||||
std::scoped_lock lock{reserve_mutex};
|
||||
if (chunk_reserve.empty()) {
|
||||
|
@ -39,12 +39,15 @@ public:
|
||||
~Scheduler();
|
||||
|
||||
/// Sends the current execution context to the GPU.
|
||||
void Flush(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr,
|
||||
std::atomic_bool* submit_done = nullptr);
|
||||
void Flush(std::atomic_bool* submit_done = nullptr, vk::Semaphore signal = nullptr,
|
||||
vk::Semaphore wait = nullptr);
|
||||
|
||||
/// Sends the current execution context to the GPU and waits for it to complete.
|
||||
void Finish(vk::Semaphore signal = nullptr, vk::Semaphore wait = nullptr);
|
||||
|
||||
/// Waits for the given tick to trigger on the GPU.
|
||||
void Wait(u64 tick);
|
||||
|
||||
/// Sends currently recorded work to the worker thread.
|
||||
void DispatchWork();
|
||||
|
||||
@ -93,16 +96,6 @@ public:
|
||||
return master_semaphore.IsFree(tick);
|
||||
}
|
||||
|
||||
/// Waits for the given tick to trigger on the GPU.
|
||||
void Wait(u64 tick) {
|
||||
if (tick >= master_semaphore.CurrentTick()) {
|
||||
// Make sure we are not waiting for the current tick without signalling
|
||||
LOG_WARNING(Render_Vulkan, "Flushing current tick");
|
||||
Flush();
|
||||
}
|
||||
master_semaphore.Wait(tick);
|
||||
}
|
||||
|
||||
/// Returns the master timeline semaphore.
|
||||
[[nodiscard]] MasterSemaphore& GetMasterSemaphore() noexcept {
|
||||
return master_semaphore;
|
||||
|
Reference in New Issue
Block a user