Compare commits
7 Commits
android-24
...
android-24
Author | SHA1 | Date | |
---|---|---|---|
7d950e8469 | |||
2a4e06bd70 | |||
60d345af8d | |||
8e883edd35 | |||
5a0ca3984e | |||
8ab059bbb7 | |||
9dbf753b4b |
@ -2,9 +2,10 @@
|
|||||||
|----|----|----|----|----|
|
|----|----|----|----|----|
|
||||||
| [12461](https://github.com/yuzu-emu/yuzu//pull/12461) | [`acc26667b`](https://github.com/yuzu-emu/yuzu//pull/12461/files) | Rework Nvdec and VIC to fix out-of-order videos, and speed up decoding. | [Kelebek1](https://github.com/Kelebek1/) | Yes |
|
| [12461](https://github.com/yuzu-emu/yuzu//pull/12461) | [`acc26667b`](https://github.com/yuzu-emu/yuzu//pull/12461/files) | Rework Nvdec and VIC to fix out-of-order videos, and speed up decoding. | [Kelebek1](https://github.com/Kelebek1/) | Yes |
|
||||||
| [12749](https://github.com/yuzu-emu/yuzu//pull/12749) | [`aad4b0d6f`](https://github.com/yuzu-emu/yuzu//pull/12749/files) | general: workarounds for SMMU syncing issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
| [12749](https://github.com/yuzu-emu/yuzu//pull/12749) | [`aad4b0d6f`](https://github.com/yuzu-emu/yuzu//pull/12749/files) | general: workarounds for SMMU syncing issues | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||||
| [12756](https://github.com/yuzu-emu/yuzu//pull/12756) | [`4677fd3f6`](https://github.com/yuzu-emu/yuzu//pull/12756/files) | general: applet multiprocess | [liamwhite](https://github.com/liamwhite/) | Yes |
|
| [12756](https://github.com/yuzu-emu/yuzu//pull/12756) | [`13fd37ef6`](https://github.com/yuzu-emu/yuzu//pull/12756/files) | general: applet multiprocess | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||||
| [12873](https://github.com/yuzu-emu/yuzu//pull/12873) | [`7a4ea8991`](https://github.com/yuzu-emu/yuzu//pull/12873/files) | GPU: Implement channel scheduling. | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
| [12949](https://github.com/yuzu-emu/yuzu//pull/12949) | [`5a64a77df`](https://github.com/yuzu-emu/yuzu//pull/12949/files) | service: add os types and multi wait API | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||||
| [12975](https://github.com/yuzu-emu/yuzu//pull/12975) | [`9ce43ee67`](https://github.com/yuzu-emu/yuzu//pull/12975/files) | Texture Cache: Fix untracking on GPU remap | [FernandoS27](https://github.com/FernandoS27/) | Yes |
|
| [12955](https://github.com/yuzu-emu/yuzu//pull/12955) | [`8d2ad3d8f`](https://github.com/yuzu-emu/yuzu//pull/12955/files) | dmnt: cheat: Avoid invalidating cache on 32bit | [german77](https://github.com/german77/) | Yes |
|
||||||
|
| [12969](https://github.com/yuzu-emu/yuzu//pull/12969) | [`5bf64e874`](https://github.com/yuzu-emu/yuzu//pull/12969/files) | service: bcat: Migrate and refractor service to new IPC | [german77](https://github.com/german77/) | Yes |
|
||||||
|
|
||||||
|
|
||||||
End of merge log. You can find the original README.md below the break.
|
End of merge log. You can find the original README.md below the break.
|
||||||
|
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: ba8192d890...ca0e264f4f
2
externals/nx_tzdb/tzdb_to_nx
vendored
2
externals/nx_tzdb/tzdb_to_nx
vendored
Submodule externals/nx_tzdb/tzdb_to_nx updated: 9792969023...404d390045
@ -104,14 +104,12 @@ std::shared_ptr<ILibraryAppletAccessor> CreateGuestApplet(Core::System& system,
|
|||||||
|
|
||||||
// TODO: enable other versions of applets
|
// TODO: enable other versions of applets
|
||||||
enum : u8 {
|
enum : u8 {
|
||||||
Firmware1400 = 14,
|
Firmware1600 = 15,
|
||||||
Firmware1500 = 15,
|
Firmware1700 = 16,
|
||||||
Firmware1600 = 16,
|
|
||||||
Firmware1700 = 17,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto process = std::make_unique<Process>(system);
|
auto process = std::make_unique<Process>(system);
|
||||||
if (!process->Initialize(program_id, Firmware1400, Firmware1700)) {
|
if (!process->Initialize(program_id, Firmware1600, Firmware1700)) {
|
||||||
// Couldn't initialize the guest process
|
// Couldn't initialize the guest process
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
@ -29,8 +29,8 @@ IBcatService::IBcatService(Core::System& system_, BcatBackend& backend_)
|
|||||||
}} {
|
}} {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{10100, D<&IBcatService::RequestSyncDeliveryCache>, "RequestSyncDeliveryCache"},
|
{10100, C<&IBcatService::RequestSyncDeliveryCache>, "RequestSyncDeliveryCache"},
|
||||||
{10101, D<&IBcatService::RequestSyncDeliveryCacheWithDirectoryName>, "RequestSyncDeliveryCacheWithDirectoryName"},
|
{10101, C<&IBcatService::RequestSyncDeliveryCacheWithDirectoryName>, "RequestSyncDeliveryCacheWithDirectoryName"},
|
||||||
{10200, nullptr, "CancelSyncDeliveryCacheRequest"},
|
{10200, nullptr, "CancelSyncDeliveryCacheRequest"},
|
||||||
{20100, nullptr, "RequestSyncDeliveryCacheWithApplicationId"},
|
{20100, nullptr, "RequestSyncDeliveryCacheWithApplicationId"},
|
||||||
{20101, nullptr, "RequestSyncDeliveryCacheWithApplicationIdAndDirectoryName"},
|
{20101, nullptr, "RequestSyncDeliveryCacheWithApplicationIdAndDirectoryName"},
|
||||||
@ -39,7 +39,7 @@ IBcatService::IBcatService(Core::System& system_, BcatBackend& backend_)
|
|||||||
{20400, nullptr, "RegisterSystemApplicationDeliveryTask"},
|
{20400, nullptr, "RegisterSystemApplicationDeliveryTask"},
|
||||||
{20401, nullptr, "UnregisterSystemApplicationDeliveryTask"},
|
{20401, nullptr, "UnregisterSystemApplicationDeliveryTask"},
|
||||||
{20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"},
|
{20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"},
|
||||||
{30100, D<&IBcatService::SetPassphrase>, "SetPassphrase"},
|
{30100, C<&IBcatService::SetPassphrase>, "SetPassphrase"},
|
||||||
{30101, nullptr, "Unknown30101"},
|
{30101, nullptr, "Unknown30101"},
|
||||||
{30102, nullptr, "Unknown30102"},
|
{30102, nullptr, "Unknown30102"},
|
||||||
{30200, nullptr, "RegisterBackgroundDeliveryTask"},
|
{30200, nullptr, "RegisterBackgroundDeliveryTask"},
|
||||||
@ -47,11 +47,11 @@ IBcatService::IBcatService(Core::System& system_, BcatBackend& backend_)
|
|||||||
{30202, nullptr, "BlockDeliveryTask"},
|
{30202, nullptr, "BlockDeliveryTask"},
|
||||||
{30203, nullptr, "UnblockDeliveryTask"},
|
{30203, nullptr, "UnblockDeliveryTask"},
|
||||||
{30210, nullptr, "SetDeliveryTaskTimer"},
|
{30210, nullptr, "SetDeliveryTaskTimer"},
|
||||||
{30300, D<&IBcatService::RegisterSystemApplicationDeliveryTasks>, "RegisterSystemApplicationDeliveryTasks"},
|
{30300, C<&IBcatService::RegisterSystemApplicationDeliveryTasks>, "RegisterSystemApplicationDeliveryTasks"},
|
||||||
{90100, nullptr, "EnumerateBackgroundDeliveryTask"},
|
{90100, nullptr, "EnumerateBackgroundDeliveryTask"},
|
||||||
{90101, nullptr, "Unknown90101"},
|
{90101, nullptr, "Unknown90101"},
|
||||||
{90200, nullptr, "GetDeliveryList"},
|
{90200, nullptr, "GetDeliveryList"},
|
||||||
{90201, D<&IBcatService::ClearDeliveryCacheStorage>, "ClearDeliveryCacheStorage"},
|
{90201, C<&IBcatService::ClearDeliveryCacheStorage>, "ClearDeliveryCacheStorage"},
|
||||||
{90202, nullptr, "ClearDeliveryTaskSubscriptionStatus"},
|
{90202, nullptr, "ClearDeliveryTaskSubscriptionStatus"},
|
||||||
{90300, nullptr, "GetPushNotificationLog"},
|
{90300, nullptr, "GetPushNotificationLog"},
|
||||||
{90301, nullptr, "Unknown90301"},
|
{90301, nullptr, "Unknown90301"},
|
||||||
|
@ -24,9 +24,9 @@ IDeliveryCacheDirectoryService::IDeliveryCacheDirectoryService(Core::System& sys
|
|||||||
: ServiceFramework{system_, "IDeliveryCacheDirectoryService"}, root(std::move(root_)) {
|
: ServiceFramework{system_, "IDeliveryCacheDirectoryService"}, root(std::move(root_)) {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, D<&IDeliveryCacheDirectoryService::Open>, "Open"},
|
{0, C<&IDeliveryCacheDirectoryService::Open>, "Open"},
|
||||||
{1, D<&IDeliveryCacheDirectoryService::Read>, "Read"},
|
{1, C<&IDeliveryCacheDirectoryService::Read>, "Read"},
|
||||||
{2, D<&IDeliveryCacheDirectoryService::GetCount>, "GetCount"},
|
{2, C<&IDeliveryCacheDirectoryService::GetCount>, "GetCount"},
|
||||||
};
|
};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
|
@ -14,10 +14,10 @@ IDeliveryCacheFileService::IDeliveryCacheFileService(Core::System& system_,
|
|||||||
: ServiceFramework{system_, "IDeliveryCacheFileService"}, root(std::move(root_)) {
|
: ServiceFramework{system_, "IDeliveryCacheFileService"}, root(std::move(root_)) {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, D<&IDeliveryCacheFileService::Open>, "Open"},
|
{0, C<&IDeliveryCacheFileService::Open>, "Open"},
|
||||||
{1, D<&IDeliveryCacheFileService::Read>, "Read"},
|
{1, C<&IDeliveryCacheFileService::Read>, "Read"},
|
||||||
{2, D<&IDeliveryCacheFileService::GetSize>, "GetSize"},
|
{2, C<&IDeliveryCacheFileService::GetSize>, "GetSize"},
|
||||||
{3, D<&IDeliveryCacheFileService::GetDigest>, "GetDigest"},
|
{3, C<&IDeliveryCacheFileService::GetDigest>, "GetDigest"},
|
||||||
};
|
};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
|
@ -13,8 +13,8 @@ IDeliveryCacheProgressService::IDeliveryCacheProgressService(Core::System& syste
|
|||||||
: ServiceFramework{system_, "IDeliveryCacheProgressService"}, event{event_}, impl{impl_} {
|
: ServiceFramework{system_, "IDeliveryCacheProgressService"}, event{event_}, impl{impl_} {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, D<&IDeliveryCacheProgressService::GetEvent>, "Get"},
|
{0, C<&IDeliveryCacheProgressService::GetEvent>, "Get"},
|
||||||
{1, D<&IDeliveryCacheProgressService::GetImpl>, "Get"},
|
{0, C<&IDeliveryCacheProgressService::GetImpl>, "Get"},
|
||||||
};
|
};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
|
@ -14,9 +14,9 @@ IDeliveryCacheStorageService::IDeliveryCacheStorageService(Core::System& system_
|
|||||||
: ServiceFramework{system_, "IDeliveryCacheStorageService"}, root(std::move(root_)) {
|
: ServiceFramework{system_, "IDeliveryCacheStorageService"}, root(std::move(root_)) {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, D<&IDeliveryCacheStorageService::CreateFileService>, "CreateFileService"},
|
{0, C<&IDeliveryCacheStorageService::CreateFileService>, "CreateFileService"},
|
||||||
{1, D<&IDeliveryCacheStorageService::CreateDirectoryService>, "CreateDirectoryService"},
|
{1, C<&IDeliveryCacheStorageService::CreateDirectoryService>, "CreateDirectoryService"},
|
||||||
{10, D<&IDeliveryCacheStorageService::EnumerateDeliveryCacheDirectory>, "EnumerateDeliveryCacheDirectory"},
|
{2, C<&IDeliveryCacheStorageService::EnumerateDeliveryCacheDirectory>, "EnumerateDeliveryCacheDirectory"},
|
||||||
};
|
};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
|
@ -18,9 +18,9 @@ IServiceCreator::IServiceCreator(Core::System& system_, const char* name_)
|
|||||||
: ServiceFramework{system_, name_}, fsc{system.GetFileSystemController()} {
|
: ServiceFramework{system_, name_}, fsc{system.GetFileSystemController()} {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, D<&IServiceCreator::CreateBcatService>, "CreateBcatService"},
|
{0, C<&IServiceCreator::CreateBcatService>, "CreateBcatService"},
|
||||||
{1, D<&IServiceCreator::CreateDeliveryCacheStorageService>, "CreateDeliveryCacheStorageService"},
|
{1, C<&IServiceCreator::CreateDeliveryCacheStorageService>, "CreateDeliveryCacheStorageService"},
|
||||||
{2, D<&IServiceCreator::CreateDeliveryCacheStorageServiceWithApplicationId>, "CreateDeliveryCacheStorageServiceWithApplicationId"},
|
{2, C<&IServiceCreator::CreateDeliveryCacheStorageServiceWithApplicationId>, "CreateDeliveryCacheStorageServiceWithApplicationId"},
|
||||||
{3, nullptr, "CreateDeliveryCacheProgressService"},
|
{3, nullptr, "CreateDeliveryCacheProgressService"},
|
||||||
{4, nullptr, "CreateDeliveryCacheProgressServiceWithApplicationId"},
|
{4, nullptr, "CreateDeliveryCacheProgressServiceWithApplicationId"},
|
||||||
};
|
};
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "video_core/control/channel_state.h"
|
#include "video_core/control/channel_state.h"
|
||||||
#include "video_core/control/scheduler.h"
|
|
||||||
#include "video_core/engines/puller.h"
|
#include "video_core/engines/puller.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/host1x/host1x.h"
|
#include "video_core/host1x/host1x.h"
|
||||||
@ -34,7 +33,6 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
|||||||
syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
|
syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
|
||||||
channel_state{system.GPU().AllocateChannel()} {
|
channel_state{system.GPU().AllocateChannel()} {
|
||||||
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
||||||
channel_state->syncpoint_id = channel_syncpoint;
|
|
||||||
sm_exception_breakpoint_int_report_event =
|
sm_exception_breakpoint_int_report_event =
|
||||||
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
|
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
|
||||||
sm_exception_breakpoint_pause_report_event =
|
sm_exception_breakpoint_pause_report_event =
|
||||||
@ -159,9 +157,6 @@ NvResult nvhost_gpu::SetErrorNotifier(IoctlSetErrorNotifier& params) {
|
|||||||
|
|
||||||
NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) {
|
NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) {
|
||||||
channel_priority = params.priority;
|
channel_priority = params.priority;
|
||||||
if (channel_state->initialized) {
|
|
||||||
system.GPU().Scheduler().ChangePriority(channel_state->bind_id, channel_priority);
|
|
||||||
}
|
|
||||||
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
|
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
@ -319,7 +314,6 @@ NvResult nvhost_gpu::GetWaitbase(IoctlGetWaitbase& params) {
|
|||||||
NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) {
|
NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) {
|
||||||
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
|
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
|
||||||
|
|
||||||
channel_state->timeout = params.timeout;
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,7 +321,6 @@ NvResult nvhost_gpu::ChannelSetTimeslice(IoctlSetTimeslice& params) {
|
|||||||
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
|
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
|
||||||
|
|
||||||
channel_timeslice = params.timeslice;
|
channel_timeslice = params.timeslice;
|
||||||
channel_state->timeslice = params.timeslice;
|
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,8 @@ void StandardVmCallbacks::MemoryWriteUnsafe(VAddr address, const void* data, u64
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (system.ApplicationMemory().WriteBlock(address, data, size)) {
|
if (system.ApplicationMemory().WriteBlock(address, data, size) &&
|
||||||
|
system.ApplicationProcess()->Is64Bit()) {
|
||||||
Core::InvalidateInstructionCacheRange(system.ApplicationProcess(), address, size);
|
Core::InvalidateInstructionCacheRange(system.ApplicationProcess(), address, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -885,9 +885,15 @@ void Config::Reload() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Config::ClearControlPlayerValues() const {
|
void Config::ClearControlPlayerValues() const {
|
||||||
// Removes the entire [Controls] section
|
// If key is an empty string, all keys in the current group() are removed.
|
||||||
const char* section = Settings::TranslateCategory(Settings::Category::Controls);
|
const char* section = Settings::TranslateCategory(Settings::Category::Controls);
|
||||||
config->Delete(section, nullptr, true);
|
CSimpleIniA::TNamesDepend keys;
|
||||||
|
config->GetAllKeys(section, keys);
|
||||||
|
for (const auto& key : keys) {
|
||||||
|
if (std::string(config->GetValue(section, key.pItem)).empty()) {
|
||||||
|
config->Delete(section, key.pItem);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string& Config::GetConfigFilePath() const {
|
const std::string& Config::GetConfigFilePath() const {
|
||||||
|
@ -45,12 +45,6 @@ struct ChannelState {
|
|||||||
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
||||||
|
|
||||||
s32 bind_id = -1;
|
s32 bind_id = -1;
|
||||||
/// Scheduling info
|
|
||||||
u32 syncpoint_id = 0xFFFF;
|
|
||||||
u32 priority = 0;
|
|
||||||
u32 timeslice = 0;
|
|
||||||
u32 timeout = 0;
|
|
||||||
|
|
||||||
/// 3D engine
|
/// 3D engine
|
||||||
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
||||||
/// 2D engine
|
/// 2D engine
|
||||||
|
@ -1,245 +1,32 @@
|
|||||||
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <deque>
|
|
||||||
#include <map>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/fiber.h"
|
#include "video_core/control/channel_state.h"
|
||||||
#include "video_core/control/scheduler.h"
|
#include "video_core/control/scheduler.h"
|
||||||
#include "video_core/dma_pusher.h"
|
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
|
||||||
namespace Tegra::Control {
|
namespace Tegra::Control {
|
||||||
|
Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
|
||||||
struct GPFifoContext {
|
|
||||||
bool is_active;
|
|
||||||
bool is_running;
|
|
||||||
std::shared_ptr<Common::Fiber> context;
|
|
||||||
std::deque<CommandList> pending_work;
|
|
||||||
std::mutex guard;
|
|
||||||
s32 bind_id;
|
|
||||||
std::shared_ptr<ChannelState> info;
|
|
||||||
size_t yield_count;
|
|
||||||
size_t scheduled_count;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Scheduler::SchedulerImpl {
|
|
||||||
// Fifos
|
|
||||||
std::map<u32, std::list<size_t>, std::greater<u32>> schedule_priority_queue;
|
|
||||||
std::unordered_map<s32, size_t> channel_gpfifo_ids;
|
|
||||||
std::deque<GPFifoContext> gpfifos;
|
|
||||||
std::deque<size_t> free_fifos;
|
|
||||||
|
|
||||||
// Scheduling
|
|
||||||
std::mutex scheduling_guard;
|
|
||||||
std::shared_ptr<Common::Fiber> master_control;
|
|
||||||
bool must_reschedule{};
|
|
||||||
GPFifoContext* current_fifo{};
|
|
||||||
};
|
|
||||||
|
|
||||||
Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {
|
|
||||||
impl = std::make_unique<SchedulerImpl>();
|
|
||||||
}
|
|
||||||
|
|
||||||
Scheduler::~Scheduler() = default;
|
Scheduler::~Scheduler() = default;
|
||||||
|
|
||||||
void Scheduler::Init() {
|
|
||||||
impl->master_control = Common::Fiber::ThreadToFiber();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Resume() {
|
|
||||||
while (UpdateHighestPriorityChannel()) {
|
|
||||||
impl->current_fifo->scheduled_count++;
|
|
||||||
Common::Fiber::YieldTo(impl->master_control, *impl->current_fifo->context);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Scheduler::UpdateHighestPriorityChannel() {
|
|
||||||
std::scoped_lock lk(impl->scheduling_guard);
|
|
||||||
|
|
||||||
// Clear needs to schedule state.
|
|
||||||
impl->must_reschedule = false;
|
|
||||||
|
|
||||||
// By default, we don't have a channel to schedule.
|
|
||||||
impl->current_fifo = nullptr;
|
|
||||||
|
|
||||||
// Check each level to see if we can schedule.
|
|
||||||
for (auto& level : impl->schedule_priority_queue) {
|
|
||||||
if (ScheduleLevel(level.second)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nothing to schedule.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Scheduler::ScheduleLevel(std::list<size_t>& queue) {
|
|
||||||
bool found_anything = false;
|
|
||||||
size_t min_schedule_count = std::numeric_limits<size_t>::max();
|
|
||||||
for (auto id : queue) {
|
|
||||||
auto& fifo = impl->gpfifos[id];
|
|
||||||
std::scoped_lock lk(fifo.guard);
|
|
||||||
|
|
||||||
// With no pending work and nothing running, this channel can't be scheduled.
|
|
||||||
if (fifo.pending_work.empty() && !fifo.is_running) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Prioritize channels at current priority which have been run the least.
|
|
||||||
if (fifo.scheduled_count > min_schedule_count) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try not to select the same channel we just yielded from.
|
|
||||||
if (fifo.scheduled_count < fifo.yield_count) {
|
|
||||||
fifo.scheduled_count++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update best selection.
|
|
||||||
min_schedule_count = fifo.scheduled_count;
|
|
||||||
impl->current_fifo = &fifo;
|
|
||||||
found_anything = true;
|
|
||||||
}
|
|
||||||
return found_anything;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::ChangePriority(s32 channel_id, u32 new_priority) {
|
|
||||||
std::scoped_lock lk(impl->scheduling_guard);
|
|
||||||
// Ensure we are tracking this channel.
|
|
||||||
auto fifo_it = impl->channel_gpfifo_ids.find(channel_id);
|
|
||||||
if (fifo_it == impl->channel_gpfifo_ids.end()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the fifo and update its priority.
|
|
||||||
const size_t fifo_id = fifo_it->second;
|
|
||||||
auto& fifo = impl->gpfifos[fifo_id];
|
|
||||||
const auto old_priority = std::exchange(fifo.info->priority, new_priority);
|
|
||||||
|
|
||||||
// Create the new level if needed.
|
|
||||||
impl->schedule_priority_queue.try_emplace(new_priority);
|
|
||||||
|
|
||||||
// Remove the old level and add to the new level.
|
|
||||||
impl->schedule_priority_queue[new_priority].push_back(fifo_id);
|
|
||||||
impl->schedule_priority_queue[old_priority].remove_if(
|
|
||||||
[fifo_id](size_t id) { return id == fifo_id; });
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Yield() {
|
|
||||||
ASSERT(impl->current_fifo != nullptr);
|
|
||||||
|
|
||||||
// Set yield count higher
|
|
||||||
impl->current_fifo->yield_count = impl->current_fifo->scheduled_count + 1;
|
|
||||||
Common::Fiber::YieldTo(impl->current_fifo->context, *impl->master_control);
|
|
||||||
gpu.BindChannel(impl->current_fifo->bind_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::CheckStatus() {
|
|
||||||
{
|
|
||||||
std::unique_lock lk(impl->scheduling_guard);
|
|
||||||
// If no reschedule is needed, don't transfer control
|
|
||||||
if (!impl->must_reschedule) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Transfer control to the scheduler
|
|
||||||
Common::Fiber::YieldTo(impl->current_fifo->context, *impl->master_control);
|
|
||||||
gpu.BindChannel(impl->current_fifo->bind_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Push(s32 channel, CommandList&& entries) {
|
void Scheduler::Push(s32 channel, CommandList&& entries) {
|
||||||
std::scoped_lock lk(impl->scheduling_guard);
|
std::unique_lock lk(scheduling_guard);
|
||||||
// Get and ensure we have this channel.
|
auto it = channels.find(channel);
|
||||||
auto it = impl->channel_gpfifo_ids.find(channel);
|
ASSERT(it != channels.end());
|
||||||
ASSERT(it != impl->channel_gpfifo_ids.end());
|
auto channel_state = it->second;
|
||||||
auto gpfifo_id = it->second;
|
gpu.BindChannel(channel_state->bind_id);
|
||||||
auto& fifo = impl->gpfifos[gpfifo_id];
|
channel_state->dma_pusher->Push(std::move(entries));
|
||||||
// Add the new new work to the channel.
|
channel_state->dma_pusher->DispatchCalls();
|
||||||
{
|
|
||||||
std::scoped_lock lk2(fifo.guard);
|
|
||||||
fifo.pending_work.emplace_back(std::move(entries));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the current running FIFO is null or the one being pushed to then
|
|
||||||
// just return
|
|
||||||
if (impl->current_fifo == nullptr || impl->current_fifo == &fifo) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the current fifo has higher or equal priority to the current fifo then return
|
|
||||||
if (impl->current_fifo->info->priority >= fifo.info->priority) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Mark scheduler update as required.
|
|
||||||
impl->must_reschedule = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::ChannelLoop(size_t gpfifo_id, s32 channel_id) {
|
|
||||||
auto& fifo = impl->gpfifos[gpfifo_id];
|
|
||||||
auto* channel_state = fifo.info.get();
|
|
||||||
const auto SendToPuller = [&] {
|
|
||||||
std::scoped_lock lk(fifo.guard);
|
|
||||||
if (fifo.pending_work.empty()) {
|
|
||||||
// Stop if no work available.
|
|
||||||
fifo.is_running = false;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// Otherwise, send work to puller and mark as running.
|
|
||||||
CommandList&& entries = std::move(fifo.pending_work.front());
|
|
||||||
channel_state->dma_pusher->Push(std::move(entries));
|
|
||||||
fifo.pending_work.pop_front();
|
|
||||||
fifo.is_running = true;
|
|
||||||
// Succeed.
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
// Inform the GPU about the current channel.
|
|
||||||
gpu.BindChannel(channel_id);
|
|
||||||
while (true) {
|
|
||||||
while (SendToPuller()) {
|
|
||||||
// Execute.
|
|
||||||
channel_state->dma_pusher->DispatchCalls();
|
|
||||||
// Reschedule.
|
|
||||||
CheckStatus();
|
|
||||||
}
|
|
||||||
// Return to host execution when all work is completed.
|
|
||||||
Common::Fiber::YieldTo(fifo.context, *impl->master_control);
|
|
||||||
// Inform the GPU about the current channel.
|
|
||||||
gpu.BindChannel(channel_id);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
|
void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
|
||||||
s32 channel = new_channel->bind_id;
|
s32 channel = new_channel->bind_id;
|
||||||
std::unique_lock lk(impl->scheduling_guard);
|
std::unique_lock lk(scheduling_guard);
|
||||||
|
channels.emplace(channel, new_channel);
|
||||||
size_t new_fifo_id;
|
|
||||||
if (!impl->free_fifos.empty()) {
|
|
||||||
new_fifo_id = impl->free_fifos.front();
|
|
||||||
impl->free_fifos.pop_front();
|
|
||||||
} else {
|
|
||||||
new_fifo_id = impl->gpfifos.size();
|
|
||||||
impl->gpfifos.emplace_back();
|
|
||||||
}
|
|
||||||
auto& new_fifo = impl->gpfifos[new_fifo_id];
|
|
||||||
impl->channel_gpfifo_ids[channel] = new_fifo_id;
|
|
||||||
new_fifo.is_active = true;
|
|
||||||
new_fifo.bind_id = channel;
|
|
||||||
new_fifo.pending_work.clear();
|
|
||||||
new_fifo.info = new_channel;
|
|
||||||
new_fifo.scheduled_count = 0;
|
|
||||||
new_fifo.yield_count = 0;
|
|
||||||
new_fifo.is_running = false;
|
|
||||||
impl->schedule_priority_queue.try_emplace(new_channel->priority);
|
|
||||||
impl->schedule_priority_queue[new_channel->priority].push_back(new_fifo_id);
|
|
||||||
std::function<void()> callback = std::bind(&Scheduler::ChannelLoop, this, new_fifo_id, channel);
|
|
||||||
new_fifo.context = std::make_shared<Common::Fiber>(std::move(callback));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Tegra::Control
|
} // namespace Tegra::Control
|
||||||
|
@ -3,11 +3,10 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <list>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "video_core/control/channel_state.h"
|
|
||||||
#include "video_core/dma_pusher.h"
|
#include "video_core/dma_pusher.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
@ -23,27 +22,13 @@ public:
|
|||||||
explicit Scheduler(GPU& gpu_);
|
explicit Scheduler(GPU& gpu_);
|
||||||
~Scheduler();
|
~Scheduler();
|
||||||
|
|
||||||
void Init();
|
|
||||||
|
|
||||||
void Resume();
|
|
||||||
|
|
||||||
void Yield();
|
|
||||||
|
|
||||||
void Push(s32 channel, CommandList&& entries);
|
void Push(s32 channel, CommandList&& entries);
|
||||||
|
|
||||||
void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
|
void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
|
||||||
|
|
||||||
void ChangePriority(s32 channel_id, u32 new_priority);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void ChannelLoop(size_t gpfifo_id, s32 channel_id);
|
std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
|
||||||
bool ScheduleLevel(std::list<size_t>& queue);
|
std::mutex scheduling_guard;
|
||||||
void CheckStatus();
|
|
||||||
bool UpdateHighestPriorityChannel();
|
|
||||||
|
|
||||||
struct SchedulerImpl;
|
|
||||||
std::unique_ptr<SchedulerImpl> impl;
|
|
||||||
|
|
||||||
GPU& gpu;
|
GPU& gpu;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "video_core/control/channel_state.h"
|
#include "video_core/control/channel_state.h"
|
||||||
#include "video_core/control/scheduler.h"
|
|
||||||
#include "video_core/dma_pusher.h"
|
#include "video_core/dma_pusher.h"
|
||||||
#include "video_core/engines/fermi_2d.h"
|
#include "video_core/engines/fermi_2d.h"
|
||||||
#include "video_core/engines/kepler_compute.h"
|
#include "video_core/engines/kepler_compute.h"
|
||||||
@ -15,8 +14,6 @@
|
|||||||
#include "video_core/engines/maxwell_dma.h"
|
#include "video_core/engines/maxwell_dma.h"
|
||||||
#include "video_core/engines/puller.h"
|
#include "video_core/engines/puller.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/host1x/host1x.h"
|
|
||||||
#include "video_core/host1x/syncpoint_manager.h"
|
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/rasterizer_interface.h"
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
|
||||||
@ -63,14 +60,11 @@ void Puller::ProcessBindMethod(const MethodCall& method_call) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Puller::ProcessFenceActionMethod() {
|
void Puller::ProcessFenceActionMethod() {
|
||||||
auto& syncpoint_manager = gpu.Host1x().GetSyncpointManager();
|
|
||||||
switch (regs.fence_action.op) {
|
switch (regs.fence_action.op) {
|
||||||
case Puller::FenceOperation::Acquire:
|
case Puller::FenceOperation::Acquire:
|
||||||
while (regs.fence_value >
|
// UNIMPLEMENTED_MSG("Channel Scheduling pending.");
|
||||||
syncpoint_manager.GetGuestSyncpointValue(regs.fence_action.syncpoint_id)) {
|
// WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
|
||||||
rasterizer->ReleaseFences();
|
rasterizer->ReleaseFences();
|
||||||
gpu.Scheduler().Yield();
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case Puller::FenceOperation::Increment:
|
case Puller::FenceOperation::Increment:
|
||||||
rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
|
rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
|
||||||
|
@ -387,14 +387,6 @@ std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
|
|||||||
return impl->AllocateChannel();
|
return impl->AllocateChannel();
|
||||||
}
|
}
|
||||||
|
|
||||||
Tegra::Control::Scheduler& GPU::Scheduler() {
|
|
||||||
return *impl->scheduler;
|
|
||||||
}
|
|
||||||
|
|
||||||
const Tegra::Control::Scheduler& GPU::Scheduler() const {
|
|
||||||
return *impl->scheduler;
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::InitChannel(Control::ChannelState& to_init) {
|
void GPU::InitChannel(Control::ChannelState& to_init) {
|
||||||
impl->InitChannel(to_init);
|
impl->InitChannel(to_init);
|
||||||
}
|
}
|
||||||
|
@ -124,8 +124,7 @@ class KeplerCompute;
|
|||||||
|
|
||||||
namespace Control {
|
namespace Control {
|
||||||
struct ChannelState;
|
struct ChannelState;
|
||||||
class Scheduler;
|
}
|
||||||
} // namespace Control
|
|
||||||
|
|
||||||
namespace Host1x {
|
namespace Host1x {
|
||||||
class Host1x;
|
class Host1x;
|
||||||
@ -205,12 +204,6 @@ public:
|
|||||||
/// Returns a const reference to the shader notifier.
|
/// Returns a const reference to the shader notifier.
|
||||||
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
|
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
|
||||||
|
|
||||||
/// Returns GPU Channel Scheduler.
|
|
||||||
[[nodiscard]] Tegra::Control::Scheduler& Scheduler();
|
|
||||||
|
|
||||||
/// Returns GPU Channel Scheduler.
|
|
||||||
[[nodiscard]] const Tegra::Control::Scheduler& Scheduler() const;
|
|
||||||
|
|
||||||
[[nodiscard]] u64 GetTicks() const;
|
[[nodiscard]] u64 GetTicks() const;
|
||||||
|
|
||||||
[[nodiscard]] bool IsAsync() const;
|
[[nodiscard]] bool IsAsync() const;
|
||||||
|
@ -34,15 +34,13 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
|
|||||||
|
|
||||||
CommandDataContainer next;
|
CommandDataContainer next;
|
||||||
|
|
||||||
scheduler.Init();
|
|
||||||
|
|
||||||
while (!stop_token.stop_requested()) {
|
while (!stop_token.stop_requested()) {
|
||||||
state.queue.PopWait(next, stop_token);
|
state.queue.PopWait(next, stop_token);
|
||||||
if (stop_token.stop_requested()) {
|
if (stop_token.stop_requested()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (std::holds_alternative<SubmitListCommand>(next.data)) {
|
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
|
||||||
scheduler.Resume();
|
scheduler.Push(submit_list->channel, std::move(submit_list->entries));
|
||||||
} else if (std::holds_alternative<GPUTickCommand>(next.data)) {
|
} else if (std::holds_alternative<GPUTickCommand>(next.data)) {
|
||||||
system.GPU().TickWork();
|
system.GPU().TickWork();
|
||||||
} else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) {
|
} else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) {
|
||||||
@ -69,16 +67,14 @@ ThreadManager::~ThreadManager() = default;
|
|||||||
|
|
||||||
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
|
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
|
||||||
Core::Frontend::GraphicsContext& context,
|
Core::Frontend::GraphicsContext& context,
|
||||||
Tegra::Control::Scheduler& scheduler_) {
|
Tegra::Control::Scheduler& scheduler) {
|
||||||
rasterizer = renderer.ReadRasterizer();
|
rasterizer = renderer.ReadRasterizer();
|
||||||
scheduler = &scheduler_;
|
|
||||||
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
|
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
|
||||||
std::ref(scheduler_), std::ref(state));
|
std::ref(scheduler), std::ref(state));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
|
void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
|
||||||
scheduler->Push(channel, std::move(entries));
|
PushCommand(SubmitListCommand(channel, std::move(entries)));
|
||||||
PushCommand(SubmitListCommand());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadManager::FlushRegion(DAddr addr, u64 size) {
|
void ThreadManager::FlushRegion(DAddr addr, u64 size) {
|
||||||
|
@ -36,7 +36,13 @@ class RendererBase;
|
|||||||
namespace VideoCommon::GPUThread {
|
namespace VideoCommon::GPUThread {
|
||||||
|
|
||||||
/// Command to signal to the GPU thread that a command list is ready for processing
|
/// Command to signal to the GPU thread that a command list is ready for processing
|
||||||
struct SubmitListCommand final {};
|
struct SubmitListCommand final {
|
||||||
|
explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
|
||||||
|
: channel{channel_}, entries{std::move(entries_)} {}
|
||||||
|
|
||||||
|
s32 channel;
|
||||||
|
Tegra::CommandList entries;
|
||||||
|
};
|
||||||
|
|
||||||
/// Command to signal to the GPU thread to flush a region
|
/// Command to signal to the GPU thread to flush a region
|
||||||
struct FlushRegionCommand final {
|
struct FlushRegionCommand final {
|
||||||
@ -118,7 +124,6 @@ public:
|
|||||||
private:
|
private:
|
||||||
/// Pushes a command to be executed by the GPU thread
|
/// Pushes a command to be executed by the GPU thread
|
||||||
u64 PushCommand(CommandData&& command_data, bool block = false);
|
u64 PushCommand(CommandData&& command_data, bool block = false);
|
||||||
Tegra::Control::Scheduler* scheduler;
|
|
||||||
|
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
const bool is_async;
|
const bool is_async;
|
||||||
|
@ -19,13 +19,15 @@ layout (push_constant) uniform PushConstants {
|
|||||||
// Any member of a push constant block that is declared as an
|
// Any member of a push constant block that is declared as an
|
||||||
// array must only be accessed with dynamically uniform indices.
|
// array must only be accessed with dynamically uniform indices.
|
||||||
ScreenRectVertex GetVertex(int index) {
|
ScreenRectVertex GetVertex(int index) {
|
||||||
if (index < 1) {
|
switch (index) {
|
||||||
|
case 0:
|
||||||
|
default:
|
||||||
return vertices[0];
|
return vertices[0];
|
||||||
} else if (index < 2) {
|
case 1:
|
||||||
return vertices[1];
|
return vertices[1];
|
||||||
} else if (index < 3) {
|
case 2:
|
||||||
return vertices[2];
|
return vertices[2];
|
||||||
} else {
|
case 3:
|
||||||
return vertices[3];
|
return vertices[3];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,6 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
rescaleable = false;
|
rescaleable = false;
|
||||||
is_sparse = config.is_sparse != 0;
|
|
||||||
tile_width_spacing = config.tile_width_spacing;
|
tile_width_spacing = config.tile_width_spacing;
|
||||||
if (config.texture_type != TextureType::Texture2D &&
|
if (config.texture_type != TextureType::Texture2D &&
|
||||||
config.texture_type != TextureType::Texture2DNoMipmap) {
|
config.texture_type != TextureType::Texture2DNoMipmap) {
|
||||||
|
@ -41,7 +41,6 @@ struct ImageInfo {
|
|||||||
bool downscaleable = false;
|
bool downscaleable = false;
|
||||||
bool forced_flushed = false;
|
bool forced_flushed = false;
|
||||||
bool dma_downloaded = false;
|
bool dma_downloaded = false;
|
||||||
bool is_sparse = false;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace VideoCommon
|
} // namespace VideoCommon
|
||||||
|
@ -600,17 +600,17 @@ void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t siz
|
|||||||
[&](ImageId id, Image&) { deleted_images.push_back(id); });
|
[&](ImageId id, Image&) { deleted_images.push_back(id); });
|
||||||
for (const ImageId id : deleted_images) {
|
for (const ImageId id : deleted_images) {
|
||||||
Image& image = slot_images[id];
|
Image& image = slot_images[id];
|
||||||
if (False(image.flags & ImageFlagBits::CpuModified)) {
|
if (True(image.flags & ImageFlagBits::CpuModified)) {
|
||||||
image.flags |= ImageFlagBits::CpuModified;
|
continue;
|
||||||
if (True(image.flags & ImageFlagBits::Tracked)) {
|
|
||||||
UntrackImage(image, id);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
image.flags |= ImageFlagBits::CpuModified;
|
||||||
if (True(image.flags & ImageFlagBits::Remapped)) {
|
if (True(image.flags & ImageFlagBits::Remapped)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
image.flags |= ImageFlagBits::Remapped;
|
image.flags |= ImageFlagBits::Remapped;
|
||||||
|
if (True(image.flags & ImageFlagBits::Tracked)) {
|
||||||
|
UntrackImage(image, id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1469,8 +1469,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DA
|
|||||||
const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
|
const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
|
||||||
Image& new_image = slot_images[new_image_id];
|
Image& new_image = slot_images[new_image_id];
|
||||||
|
|
||||||
if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes) &&
|
if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
|
||||||
new_info.is_sparse) {
|
|
||||||
new_image.flags |= ImageFlagBits::Sparse;
|
new_image.flags |= ImageFlagBits::Sparse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user