yuzu-android/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp

259 lines
10 KiB
C++

// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
#include <algorithm>
#include <utility>
#include <vector>
#include <fmt/format.h>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/literals.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
namespace {
using namespace Common::Literals;
// Maximum potential alignment of a Vulkan buffer
constexpr VkDeviceSize MAX_ALIGNMENT = 256;
// Stream buffer size in bytes
constexpr VkDeviceSize MAX_STREAM_BUFFER_SIZE = 128_MiB;
size_t GetStreamBufferSize(const Device& device) {
VkDeviceSize size{0};
if (device.HasDebuggingToolAttached()) {
ForEachDeviceLocalHostVisibleHeap(device, [&size](size_t index, VkMemoryHeap& heap) {
size = std::max(size, heap.size);
});
// If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be
// loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue
// as the heap will be much larger.
if (size <= 256_MiB) {
size = size * 40 / 100;
}
} else {
size = MAX_STREAM_BUFFER_SIZE;
}
return std::min(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE);
}
} // Anonymous namespace
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
Scheduler& scheduler_)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
stream_buffer_size{GetStreamBufferSize(device)}, region_size{stream_buffer_size /
StagingBufferPool::NUM_SYNCS} {
VkBufferCreateInfo stream_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = stream_buffer_size,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
if (device.IsExtTransformFeedbackSupported()) {
stream_ci.usage |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
}
stream_buffer = memory_allocator.CreateBuffer(stream_ci, MemoryUsage::Stream);
if (device.HasDebuggingToolAttached()) {
stream_buffer.SetObjectNameEXT("Stream Buffer");
}
stream_pointer = stream_buffer.Mapped();
ASSERT_MSG(!stream_pointer.empty(), "Stream buffer must be host visible!");
}
StagingBufferPool::~StagingBufferPool() = default;
StagingBufferRef StagingBufferPool::Request(size_t size, MemoryUsage usage, bool deferred) {
if (!deferred && usage == MemoryUsage::Upload && size <= region_size) {
return GetStreamBuffer(size);
}
return GetStagingBuffer(size, usage, deferred);
}
void StagingBufferPool::FreeDeferred(StagingBufferRef& ref) {
auto& entries = GetCache(ref.usage)[ref.log2_level].entries;
const auto is_this_one = [&ref](const StagingBuffer& entry) {
return entry.index == ref.index;
};
auto it = std::find_if(entries.begin(), entries.end(), is_this_one);
ASSERT(it != entries.end());
ASSERT(it->deferred);
it->tick = scheduler.CurrentTick();
it->deferred = false;
}
void StagingBufferPool::TickFrame() {
current_delete_level = (current_delete_level + 1) % NUM_LEVELS;
ReleaseCache(MemoryUsage::DeviceLocal);
ReleaseCache(MemoryUsage::Upload);
ReleaseCache(MemoryUsage::Download);
}
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
if (AreRegionsActive(Region(free_iterator) + 1,
std::min(Region(iterator + size) + 1, NUM_SYNCS))) {
// Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
}
const u64 current_tick = scheduler.CurrentTick();
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
current_tick);
used_iterator = iterator;
free_iterator = std::max(free_iterator, iterator + size);
if (iterator + size >= stream_buffer_size) {
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
current_tick);
used_iterator = 0;
iterator = 0;
free_iterator = size;
if (AreRegionsActive(0, Region(size) + 1)) {
// Avoid waiting for the previous usages to be free
return GetStagingBuffer(size, MemoryUsage::Upload);
}
}
const size_t offset = iterator;
iterator = Common::AlignUp(iterator + size, MAX_ALIGNMENT);
return StagingBufferRef{
.buffer = *stream_buffer,
.offset = static_cast<VkDeviceSize>(offset),
.mapped_span = stream_pointer.subspan(offset, size),
.usage{},
.log2_level{},
.index{},
};
}
bool StagingBufferPool::AreRegionsActive(size_t region_begin, size_t region_end) const {
const u64 gpu_tick = scheduler.GetMasterSemaphore().KnownGpuTick();
return std::any_of(sync_ticks.begin() + region_begin, sync_ticks.begin() + region_end,
[gpu_tick](u64 sync_tick) { return gpu_tick < sync_tick; });
};
StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage,
bool deferred) {
if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, usage, deferred)) {
return *ref;
}
return CreateStagingBuffer(size, usage, deferred);
}
std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t size,
MemoryUsage usage,
bool deferred) {
StagingBuffers& cache_level = GetCache(usage)[Common::Log2Ceil64(size)];
const auto is_free = [this](const StagingBuffer& entry) {
return !entry.deferred && scheduler.IsFree(entry.tick);
};
auto& entries = cache_level.entries;
const auto hint_it = entries.begin() + cache_level.iterate_index;
auto it = std::find_if(entries.begin() + cache_level.iterate_index, entries.end(), is_free);
if (it == entries.end()) {
it = std::find_if(entries.begin(), hint_it, is_free);
if (it == hint_it) {
return std::nullopt;
}
}
cache_level.iterate_index = std::distance(entries.begin(), it) + 1;
it->tick = deferred ? std::numeric_limits<u64>::max() : scheduler.CurrentTick();
ASSERT(!it->deferred);
it->deferred = deferred;
return it->Ref();
}
StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage,
bool deferred) {
const u32 log2 = Common::Log2Ceil64(size);
VkBufferCreateInfo buffer_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = 1ULL << log2,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
if (device.IsExtTransformFeedbackSupported()) {
buffer_ci.usage |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
}
vk::Buffer buffer = memory_allocator.CreateBuffer(buffer_ci, usage);
if (device.HasDebuggingToolAttached()) {
++buffer_index;
buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str());
}
const std::span<u8> mapped_span = buffer.Mapped();
StagingBuffer& entry = GetCache(usage)[log2].entries.emplace_back(StagingBuffer{
.buffer = std::move(buffer),
.mapped_span = mapped_span,
.usage = usage,
.log2_level = log2,
.index = unique_ids++,
.tick = deferred ? std::numeric_limits<u64>::max() : scheduler.CurrentTick(),
.deferred = deferred,
});
return entry.Ref();
}
StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage usage) {
switch (usage) {
case MemoryUsage::DeviceLocal:
return device_local_cache;
case MemoryUsage::Upload:
return upload_cache;
case MemoryUsage::Download:
return download_cache;
default:
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return upload_cache;
}
}
void StagingBufferPool::ReleaseCache(MemoryUsage usage) {
ReleaseLevel(GetCache(usage), current_delete_level);
}
void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) {
constexpr size_t deletions_per_tick = 16;
auto& staging = cache[log2];
auto& entries = staging.entries;
const size_t old_size = entries.size();
const auto is_deletable = [this](const StagingBuffer& entry) {
return scheduler.IsFree(entry.tick);
};
const size_t begin_offset = staging.delete_index;
const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
const auto begin = entries.begin() + begin_offset;
const auto end = entries.begin() + end_offset;
entries.erase(std::remove_if(begin, end, is_deletable), end);
const size_t new_size = entries.size();
staging.delete_index += deletions_per_tick;
if (staging.delete_index >= new_size) {
staging.delete_index = 0;
}
if (staging.iterate_index > new_size) {
staging.iterate_index = 0;
}
}
} // namespace Vulkan