Compare commits
8 Commits
android-24
...
android-24
Author | SHA1 | Date | |
---|---|---|---|
9eb63f09cd | |||
f606a52325 | |||
213ec6ee83 | |||
97b3477bf0 | |||
1817cedc87 | |||
ed59173a5c | |||
8d666015ae | |||
bcd7fa563a |
@ -3,8 +3,10 @@
|
||||
| [12461](https://github.com/yuzu-emu/yuzu//pull/12461) | [`acc26667b`](https://github.com/yuzu-emu/yuzu//pull/12461/files) | Rework Nvdec and VIC to fix out-of-order videos, and speed up decoding. | [Kelebek1](https://github.com/Kelebek1/) | Yes |
|
||||
| [12749](https://github.com/yuzu-emu/yuzu//pull/12749) | [`aad4b0d6f`](https://github.com/yuzu-emu/yuzu//pull/12749/files) | general: workarounds for SMMU syncing issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12756](https://github.com/yuzu-emu/yuzu//pull/12756) | [`4677fd3f6`](https://github.com/yuzu-emu/yuzu//pull/12756/files) | general: applet multiprocess | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12873](https://github.com/yuzu-emu/yuzu//pull/12873) | [`7a4ea8991`](https://github.com/yuzu-emu/yuzu//pull/12873/files) | GPU: Implement channel scheduling. | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
||||
| [12975](https://github.com/yuzu-emu/yuzu//pull/12975) | [`9ce43ee67`](https://github.com/yuzu-emu/yuzu//pull/12975/files) | Texture Cache: Fix untracking on GPU remap | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
||||
| [12873](https://github.com/yuzu-emu/yuzu//pull/12873) | [`1f64c9adf`](https://github.com/yuzu-emu/yuzu//pull/12873/files) | GPU: Implement channel scheduling. | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
||||
| [12955](https://github.com/yuzu-emu/yuzu//pull/12955) | [`8d2ad3d8f`](https://github.com/yuzu-emu/yuzu//pull/12955/files) | dmnt: cheat: Avoid invalidating cache on 32bit | [german77](https://github.com/german77/) | Yes |
|
||||
| [12978](https://github.com/yuzu-emu/yuzu//pull/12978) | [`4eeac731f`](https://github.com/yuzu-emu/yuzu//pull/12978/files) | host_shaders: add vendor workaround for adreno drivers | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12980](https://github.com/yuzu-emu/yuzu//pull/12980) | [`211544fbc`](https://github.com/yuzu-emu/yuzu//pull/12980/files) | dynarmic: Fix invalidation race | [merryhime](https://github.com/merryhime/) | Yes |
|
||||
|
||||
|
||||
End of merge log. You can find the original README.md below the break.
|
||||
|
2
externals/nx_tzdb/tzdb_to_nx
vendored
2
externals/nx_tzdb/tzdb_to_nx
vendored
Submodule externals/nx_tzdb/tzdb_to_nx updated: 9792969023...404d390045
@ -65,7 +65,8 @@ void StandardVmCallbacks::MemoryWriteUnsafe(VAddr address, const void* data, u64
|
||||
return;
|
||||
}
|
||||
|
||||
if (system.ApplicationMemory().WriteBlock(address, data, size)) {
|
||||
if (system.ApplicationMemory().WriteBlock(address, data, size) &&
|
||||
system.ApplicationProcess()->Is64Bit()) {
|
||||
Core::InvalidateInstructionCacheRange(system.ApplicationProcess(), address, size);
|
||||
}
|
||||
}
|
||||
|
@ -885,9 +885,15 @@ void Config::Reload() {
|
||||
}
|
||||
|
||||
void Config::ClearControlPlayerValues() const {
|
||||
// Removes the entire [Controls] section
|
||||
// If key is an empty string, all keys in the current group() are removed.
|
||||
const char* section = Settings::TranslateCategory(Settings::Category::Controls);
|
||||
config->Delete(section, nullptr, true);
|
||||
CSimpleIniA::TNamesDepend keys;
|
||||
config->GetAllKeys(section, keys);
|
||||
for (const auto& key : keys) {
|
||||
if (std::string(config->GetValue(section, key.pItem)).empty()) {
|
||||
config->Delete(section, key.pItem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const std::string& Config::GetConfigFilePath() const {
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/fiber.h"
|
||||
@ -54,30 +53,29 @@ void Scheduler::Init() {
|
||||
}
|
||||
|
||||
void Scheduler::Resume() {
|
||||
while (UpdateHighestPriorityChannel()) {
|
||||
impl->current_fifo->scheduled_count++;
|
||||
Common::Fiber::YieldTo(impl->master_control, *impl->current_fifo->context);
|
||||
}
|
||||
}
|
||||
|
||||
bool Scheduler::UpdateHighestPriorityChannel() {
|
||||
std::scoped_lock lk(impl->scheduling_guard);
|
||||
|
||||
// Clear needs to schedule state.
|
||||
impl->must_reschedule = false;
|
||||
|
||||
// By default, we don't have a channel to schedule.
|
||||
impl->current_fifo = nullptr;
|
||||
|
||||
// Check each level to see if we can schedule.
|
||||
for (auto& level : impl->schedule_priority_queue) {
|
||||
if (ScheduleLevel(level.second)) {
|
||||
return true;
|
||||
bool pending_work;
|
||||
do {
|
||||
pending_work = false;
|
||||
{
|
||||
std::unique_lock lk(impl->scheduling_guard);
|
||||
impl->current_fifo = nullptr;
|
||||
auto it = impl->schedule_priority_queue.begin();
|
||||
while (it != impl->schedule_priority_queue.end()) {
|
||||
pending_work = ScheduleLevel(it->second);
|
||||
if (pending_work) {
|
||||
break;
|
||||
}
|
||||
it = std::next(it);
|
||||
}
|
||||
if (pending_work) {
|
||||
impl->must_reschedule = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing to schedule.
|
||||
return false;
|
||||
if (impl->current_fifo) {
|
||||
impl->current_fifo->scheduled_count++;
|
||||
Common::Fiber::YieldTo(impl->master_control, *impl->current_fifo->context);
|
||||
}
|
||||
} while (pending_work);
|
||||
}
|
||||
|
||||
bool Scheduler::ScheduleLevel(std::list<size_t>& queue) {
|
||||
@ -85,48 +83,34 @@ bool Scheduler::ScheduleLevel(std::list<size_t>& queue) {
|
||||
size_t min_schedule_count = std::numeric_limits<size_t>::max();
|
||||
for (auto id : queue) {
|
||||
auto& fifo = impl->gpfifos[id];
|
||||
std::scoped_lock lk(fifo.guard);
|
||||
|
||||
// With no pending work and nothing running, this channel can't be scheduled.
|
||||
if (fifo.pending_work.empty() && !fifo.is_running) {
|
||||
continue;
|
||||
std::scoped_lock lk2(fifo.guard);
|
||||
if (!fifo.pending_work.empty() || fifo.is_running) {
|
||||
if (fifo.scheduled_count > min_schedule_count) {
|
||||
continue;
|
||||
}
|
||||
if (fifo.scheduled_count < fifo.yield_count) {
|
||||
fifo.scheduled_count++;
|
||||
continue;
|
||||
}
|
||||
min_schedule_count = fifo.scheduled_count;
|
||||
impl->current_fifo = &fifo;
|
||||
found_anything = true;
|
||||
}
|
||||
// Prioritize channels at current priority which have been run the least.
|
||||
if (fifo.scheduled_count > min_schedule_count) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try not to select the same channel we just yielded from.
|
||||
if (fifo.scheduled_count < fifo.yield_count) {
|
||||
fifo.scheduled_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Update best selection.
|
||||
min_schedule_count = fifo.scheduled_count;
|
||||
impl->current_fifo = &fifo;
|
||||
found_anything = true;
|
||||
}
|
||||
return found_anything;
|
||||
}
|
||||
|
||||
void Scheduler::ChangePriority(s32 channel_id, u32 new_priority) {
|
||||
std::scoped_lock lk(impl->scheduling_guard);
|
||||
// Ensure we are tracking this channel.
|
||||
std::unique_lock lk(impl->scheduling_guard);
|
||||
auto fifo_it = impl->channel_gpfifo_ids.find(channel_id);
|
||||
if (fifo_it == impl->channel_gpfifo_ids.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the fifo and update its priority.
|
||||
const size_t fifo_id = fifo_it->second;
|
||||
auto& fifo = impl->gpfifos[fifo_id];
|
||||
const auto old_priority = std::exchange(fifo.info->priority, new_priority);
|
||||
|
||||
// Create the new level if needed.
|
||||
const auto old_priority = fifo.info->priority;
|
||||
fifo.info->priority = new_priority;
|
||||
impl->schedule_priority_queue.try_emplace(new_priority);
|
||||
|
||||
// Remove the old level and add to the new level.
|
||||
impl->schedule_priority_queue[new_priority].push_back(fifo_id);
|
||||
impl->schedule_priority_queue[old_priority].remove_if(
|
||||
[fifo_id](size_t id) { return id == fifo_id; });
|
||||
@ -134,8 +118,6 @@ void Scheduler::ChangePriority(s32 channel_id, u32 new_priority) {
|
||||
|
||||
void Scheduler::Yield() {
|
||||
ASSERT(impl->current_fifo != nullptr);
|
||||
|
||||
// Set yield count higher
|
||||
impl->current_fifo->yield_count = impl->current_fifo->scheduled_count + 1;
|
||||
Common::Fiber::YieldTo(impl->current_fifo->context, *impl->master_control);
|
||||
gpu.BindChannel(impl->current_fifo->bind_id);
|
||||
@ -144,73 +126,50 @@ void Scheduler::Yield() {
|
||||
void Scheduler::CheckStatus() {
|
||||
{
|
||||
std::unique_lock lk(impl->scheduling_guard);
|
||||
// If no reschedule is needed, don't transfer control
|
||||
if (!impl->must_reschedule) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Transfer control to the scheduler
|
||||
Common::Fiber::YieldTo(impl->current_fifo->context, *impl->master_control);
|
||||
gpu.BindChannel(impl->current_fifo->bind_id);
|
||||
}
|
||||
|
||||
void Scheduler::Push(s32 channel, CommandList&& entries) {
|
||||
std::scoped_lock lk(impl->scheduling_guard);
|
||||
// Get and ensure we have this channel.
|
||||
std::unique_lock lk(impl->scheduling_guard);
|
||||
auto it = impl->channel_gpfifo_ids.find(channel);
|
||||
ASSERT(it != impl->channel_gpfifo_ids.end());
|
||||
auto gpfifo_id = it->second;
|
||||
auto& fifo = impl->gpfifos[gpfifo_id];
|
||||
// Add the new new work to the channel.
|
||||
{
|
||||
std::scoped_lock lk2(fifo.guard);
|
||||
fifo.pending_work.emplace_back(std::move(entries));
|
||||
}
|
||||
|
||||
// If the current running FIFO is null or the one being pushed to then
|
||||
// just return
|
||||
if (impl->current_fifo == nullptr || impl->current_fifo == &fifo) {
|
||||
return;
|
||||
if (impl->current_fifo != nullptr && impl->current_fifo->info->priority < fifo.info->priority) {
|
||||
impl->must_reschedule = true;
|
||||
}
|
||||
|
||||
// If the current fifo has higher or equal priority to the current fifo then return
|
||||
if (impl->current_fifo->info->priority >= fifo.info->priority) {
|
||||
return;
|
||||
}
|
||||
// Mark scheduler update as required.
|
||||
impl->must_reschedule = true;
|
||||
}
|
||||
|
||||
void Scheduler::ChannelLoop(size_t gpfifo_id, s32 channel_id) {
|
||||
auto& fifo = impl->gpfifos[gpfifo_id];
|
||||
auto* channel_state = fifo.info.get();
|
||||
const auto SendToPuller = [&] {
|
||||
std::scoped_lock lk(fifo.guard);
|
||||
if (fifo.pending_work.empty()) {
|
||||
// Stop if no work available.
|
||||
fifo.is_running = false;
|
||||
return false;
|
||||
}
|
||||
// Otherwise, send work to puller and mark as running.
|
||||
CommandList&& entries = std::move(fifo.pending_work.front());
|
||||
channel_state->dma_pusher->Push(std::move(entries));
|
||||
fifo.pending_work.pop_front();
|
||||
fifo.is_running = true;
|
||||
// Succeed.
|
||||
return true;
|
||||
};
|
||||
// Inform the GPU about the current channel.
|
||||
gpu.BindChannel(channel_id);
|
||||
auto& fifo = impl->gpfifos[gpfifo_id];
|
||||
while (true) {
|
||||
while (SendToPuller()) {
|
||||
// Execute.
|
||||
auto* channel_state = fifo.info.get();
|
||||
fifo.guard.lock();
|
||||
while (!fifo.pending_work.empty()) {
|
||||
fifo.is_running = true;
|
||||
{
|
||||
CommandList&& entries = std::move(fifo.pending_work.front());
|
||||
channel_state->dma_pusher->Push(std::move(entries));
|
||||
fifo.pending_work.pop_front();
|
||||
}
|
||||
fifo.guard.unlock();
|
||||
channel_state->dma_pusher->DispatchCalls();
|
||||
// Reschedule.
|
||||
CheckStatus();
|
||||
fifo.guard.lock();
|
||||
}
|
||||
// Return to host execution when all work is completed.
|
||||
fifo.is_running = false;
|
||||
fifo.guard.unlock();
|
||||
Common::Fiber::YieldTo(fifo.context, *impl->master_control);
|
||||
// Inform the GPU about the current channel.
|
||||
gpu.BindChannel(channel_id);
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ private:
|
||||
void ChannelLoop(size_t gpfifo_id, s32 channel_id);
|
||||
bool ScheduleLevel(std::list<size_t>& queue);
|
||||
void CheckStatus();
|
||||
bool UpdateHighestPriorityChannel();
|
||||
|
||||
struct SchedulerImpl;
|
||||
std::unique_ptr<SchedulerImpl> impl;
|
||||
|
@ -42,7 +42,6 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
||||
};
|
||||
}
|
||||
rescaleable = false;
|
||||
is_sparse = config.is_sparse != 0;
|
||||
tile_width_spacing = config.tile_width_spacing;
|
||||
if (config.texture_type != TextureType::Texture2D &&
|
||||
config.texture_type != TextureType::Texture2DNoMipmap) {
|
||||
|
@ -41,7 +41,6 @@ struct ImageInfo {
|
||||
bool downscaleable = false;
|
||||
bool forced_flushed = false;
|
||||
bool dma_downloaded = false;
|
||||
bool is_sparse = false;
|
||||
};
|
||||
|
||||
} // namespace VideoCommon
|
||||
|
@ -600,17 +600,17 @@ void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t siz
|
||||
[&](ImageId id, Image&) { deleted_images.push_back(id); });
|
||||
for (const ImageId id : deleted_images) {
|
||||
Image& image = slot_images[id];
|
||||
if (False(image.flags & ImageFlagBits::CpuModified)) {
|
||||
image.flags |= ImageFlagBits::CpuModified;
|
||||
if (True(image.flags & ImageFlagBits::Tracked)) {
|
||||
UntrackImage(image, id);
|
||||
}
|
||||
if (True(image.flags & ImageFlagBits::CpuModified)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
image.flags |= ImageFlagBits::CpuModified;
|
||||
if (True(image.flags & ImageFlagBits::Remapped)) {
|
||||
continue;
|
||||
}
|
||||
image.flags |= ImageFlagBits::Remapped;
|
||||
if (True(image.flags & ImageFlagBits::Tracked)) {
|
||||
UntrackImage(image, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1469,8 +1469,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DA
|
||||
const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
|
||||
Image& new_image = slot_images[new_image_id];
|
||||
|
||||
if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes) &&
|
||||
new_info.is_sparse) {
|
||||
if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
|
||||
new_image.flags |= ImageFlagBits::Sparse;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user