Compare commits
5 Commits
android-17
...
android-17
Author | SHA1 | Date | |
---|---|---|---|
bcd1f34ce9 | |||
1b18205166 | |||
83236d44ab | |||
6e215fbf25 | |||
0b0f4c0586 |
@ -1,7 +1,9 @@
|
||||
| Pull Request | Commit | Title | Author | Merged? |
|
||||
|----|----|----|----|----|
|
||||
| [12466](https://github.com/yuzu-emu/yuzu//pull/12466) | [`ddda76f9b`](https://github.com/yuzu-emu/yuzu//pull/12466/files) | core: track separate heap allocation for linux | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12454](https://github.com/yuzu-emu/yuzu//pull/12454) | [`3a4e7d45f`](https://github.com/yuzu-emu/yuzu//pull/12454/files) | core_timing: minor refactors | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12466](https://github.com/yuzu-emu/yuzu//pull/12466) | [`adb2af0a2`](https://github.com/yuzu-emu/yuzu//pull/12466/files) | core: track separate heap allocation for linux | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
| [12479](https://github.com/yuzu-emu/yuzu//pull/12479) | [`20e040723`](https://github.com/yuzu-emu/yuzu//pull/12479/files) | video_core: Fix buffer_row_length for linear compressed textures | [GPUCode](https://github.com/GPUCode/) | Yes |
|
||||
| [12487](https://github.com/yuzu-emu/yuzu//pull/12487) | [`d0c60605a`](https://github.com/yuzu-emu/yuzu//pull/12487/files) | shader_recompiler: use default value for clip distances array | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
|
||||
|
||||
End of merge log. You can find the original README.md below the break.
|
||||
|
@ -18,9 +18,7 @@ constexpr auto INCREMENT_TIME{5ms};
|
||||
DeviceSession::DeviceSession(Core::System& system_)
|
||||
: system{system_}, thread_event{Core::Timing::CreateEvent(
|
||||
"AudioOutSampleTick",
|
||||
[this](std::uintptr_t, s64 time, std::chrono::nanoseconds) {
|
||||
return ThreadFunc();
|
||||
})} {}
|
||||
[this](s64 time, std::chrono::nanoseconds) { return ThreadFunc(); })} {}
|
||||
|
||||
DeviceSession::~DeviceSession() {
|
||||
Finalize();
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
|
||||
#include "common/heap_tracker.h"
|
||||
@ -11,11 +11,25 @@ namespace Common {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr s64 MaxResidentMapCount = 0x8000;
|
||||
s64 GetMaxPermissibleResidentMapCount() {
|
||||
// Default value.
|
||||
s64 value = 65530;
|
||||
|
||||
// Try to read how many mappings we can make.
|
||||
std::ifstream s("/proc/sys/vm/max_map_count");
|
||||
s >> value;
|
||||
|
||||
// Print, for debug.
|
||||
LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
|
||||
|
||||
// Allow 20000 maps for other code and to account for split inaccuracy.
|
||||
return std::max<s64>(value - 20000, 0);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {}
|
||||
HeapTracker::HeapTracker(Common::HostMemory& buffer)
|
||||
: m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
|
||||
HeapTracker::~HeapTracker() = default;
|
||||
|
||||
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
||||
@ -74,8 +88,8 @@ void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_hea
|
||||
}
|
||||
|
||||
// Erase from map.
|
||||
it = m_mappings.erase(it);
|
||||
ASSERT(--m_map_count >= 0);
|
||||
it = m_mappings.erase(it);
|
||||
|
||||
// Free the item.
|
||||
delete item;
|
||||
@ -94,8 +108,8 @@ void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission p
|
||||
this->SplitHeapMap(virtual_offset, size);
|
||||
|
||||
// Declare tracking variables.
|
||||
const VAddr end = virtual_offset + size;
|
||||
VAddr cur = virtual_offset;
|
||||
VAddr end = virtual_offset + size;
|
||||
|
||||
while (cur < end) {
|
||||
VAddr next = cur;
|
||||
@ -167,7 +181,7 @@ bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
|
||||
it->tick = m_tick++;
|
||||
|
||||
// Check if we need to rebuild.
|
||||
if (m_resident_map_count > MaxResidentMapCount) {
|
||||
if (m_resident_map_count > m_max_resident_map_count) {
|
||||
rebuild_required = true;
|
||||
}
|
||||
|
||||
@ -193,8 +207,12 @@ void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
||||
|
||||
ASSERT(!m_resident_mappings.empty());
|
||||
|
||||
// Unmap so we have at least 4 maps available.
|
||||
const size_t desired_count = std::min(m_resident_map_count, MaxResidentMapCount - 4);
|
||||
// Dump half of the mappings.
|
||||
//
|
||||
// Despite being worse in theory, this has proven to be better in practice than more
|
||||
// regularly dumping a smaller amount, because it significantly reduces average case
|
||||
// lock contention.
|
||||
const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
|
||||
const size_t evict_count = m_resident_map_count - desired_count;
|
||||
auto it = m_resident_mappings.begin();
|
||||
|
||||
@ -247,8 +265,8 @@ void HeapTracker::SplitHeapMapLocked(VAddr offset) {
|
||||
|
||||
// If resident, also insert into resident map.
|
||||
if (right->is_resident) {
|
||||
m_resident_mappings.insert(*right);
|
||||
m_resident_map_count++;
|
||||
m_resident_mappings.insert(*right);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,6 +86,7 @@ private:
|
||||
|
||||
private:
|
||||
Common::HostMemory& m_buffer;
|
||||
const s64 m_max_resident_map_count;
|
||||
|
||||
std::shared_mutex m_rebuild_lock{};
|
||||
std::mutex m_lock{};
|
||||
|
@ -29,7 +29,6 @@ std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callbac
|
||||
struct CoreTiming::Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
std::uintptr_t user_data;
|
||||
std::weak_ptr<EventType> type;
|
||||
s64 reschedule_time;
|
||||
heap_t::handle_type handle{};
|
||||
@ -67,17 +66,15 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
event_fifo_id = 0;
|
||||
shutting_down = false;
|
||||
cpu_ticks = 0;
|
||||
const auto empty_timed_callback = [](std::uintptr_t, u64, std::chrono::nanoseconds)
|
||||
-> std::optional<std::chrono::nanoseconds> { return std::nullopt; };
|
||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||
if (is_multicore) {
|
||||
timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this));
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
event_queue.clear();
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
@ -119,14 +116,12 @@ bool CoreTiming::HasPendingEvents() const {
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data, bool absolute_time) {
|
||||
const std::shared_ptr<EventType>& event_type, bool absolute_time) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time{absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future};
|
||||
|
||||
auto h{event_queue.emplace(
|
||||
Event{next_time.count(), event_fifo_id++, user_data, event_type, 0})};
|
||||
auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, event_type, 0})};
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
@ -136,13 +131,13 @@ void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
std::chrono::nanoseconds resched_time,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data, bool absolute_time) {
|
||||
bool absolute_time) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time};
|
||||
|
||||
auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, user_data, event_type,
|
||||
resched_time.count()})};
|
||||
auto h{event_queue.emplace(
|
||||
Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()})};
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
@ -150,14 +145,14 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data, bool wait) {
|
||||
UnscheduleEventType type) {
|
||||
{
|
||||
std::scoped_lock lk{basic_lock};
|
||||
|
||||
std::vector<heap_t::handle_type> to_remove;
|
||||
for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) {
|
||||
const Event& e = *itr;
|
||||
if (e.type.lock().get() == event_type.get() && e.user_data == user_data) {
|
||||
if (e.type.lock().get() == event_type.get()) {
|
||||
to_remove.push_back(itr->handle);
|
||||
}
|
||||
}
|
||||
@ -165,10 +160,12 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
for (auto h : to_remove) {
|
||||
event_queue.erase(h);
|
||||
}
|
||||
|
||||
event_type->sequence_number++;
|
||||
}
|
||||
|
||||
// Force any in-progress events to finish
|
||||
if (wait) {
|
||||
if (type == UnscheduleEventType::Wait) {
|
||||
std::scoped_lock lk{advance_lock};
|
||||
}
|
||||
}
|
||||
@ -208,28 +205,31 @@ std::optional<s64> CoreTiming::Advance() {
|
||||
const Event& evt = event_queue.top();
|
||||
|
||||
if (const auto event_type{evt.type.lock()}) {
|
||||
if (evt.reschedule_time == 0) {
|
||||
const auto evt_user_data = evt.user_data;
|
||||
const auto evt_time = evt.time;
|
||||
const auto evt_time = evt.time;
|
||||
const auto evt_sequence_num = event_type->sequence_number;
|
||||
|
||||
if (evt.reschedule_time == 0) {
|
||||
event_queue.pop();
|
||||
|
||||
basic_lock.unlock();
|
||||
|
||||
event_type->callback(
|
||||
evt_user_data, evt_time,
|
||||
std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
|
||||
basic_lock.lock();
|
||||
} else {
|
||||
basic_lock.unlock();
|
||||
|
||||
const auto new_schedule_time{event_type->callback(
|
||||
evt.user_data, evt.time,
|
||||
std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt.time})};
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time})};
|
||||
|
||||
basic_lock.lock();
|
||||
|
||||
if (evt_sequence_num != event_type->sequence_number) {
|
||||
// Heap handle is invalidated after external modification.
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto next_schedule_time{new_schedule_time.has_value()
|
||||
? new_schedule_time.value().count()
|
||||
: evt.reschedule_time};
|
||||
@ -241,8 +241,8 @@ std::optional<s64> CoreTiming::Advance() {
|
||||
next_time = pause_end_time + next_schedule_time;
|
||||
}
|
||||
|
||||
event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.user_data,
|
||||
evt.type, next_schedule_time, evt.handle});
|
||||
event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.type,
|
||||
next_schedule_time, evt.handle});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,17 +22,25 @@ namespace Core::Timing {
|
||||
|
||||
/// A callback that may be scheduled for a particular core timing event.
|
||||
using TimedCallback = std::function<std::optional<std::chrono::nanoseconds>(
|
||||
std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)>;
|
||||
s64 time, std::chrono::nanoseconds ns_late)>;
|
||||
|
||||
/// Contains the characteristics of a particular event.
|
||||
struct EventType {
|
||||
explicit EventType(TimedCallback&& callback_, std::string&& name_)
|
||||
: callback{std::move(callback_)}, name{std::move(name_)} {}
|
||||
: callback{std::move(callback_)}, name{std::move(name_)}, sequence_number{0} {}
|
||||
|
||||
/// The event's callback function.
|
||||
TimedCallback callback;
|
||||
/// A pointer to the name of the event.
|
||||
const std::string name;
|
||||
/// A monotonic sequence number, incremented when this event is
|
||||
/// changed externally.
|
||||
size_t sequence_number;
|
||||
};
|
||||
|
||||
enum class UnscheduleEventType {
|
||||
Wait,
|
||||
NoWait,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -89,23 +97,17 @@ public:
|
||||
|
||||
/// Schedules an event in core timing
|
||||
void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data = 0,
|
||||
bool absolute_time = false);
|
||||
const std::shared_ptr<EventType>& event_type, bool absolute_time = false);
|
||||
|
||||
/// Schedules an event which will automatically re-schedule itself with the given time, until
|
||||
/// unscheduled
|
||||
void ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
std::chrono::nanoseconds resched_time,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data = 0, bool absolute_time = false);
|
||||
bool absolute_time = false);
|
||||
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data,
|
||||
bool wait = true);
|
||||
|
||||
void UnscheduleEventWithoutWait(const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
UnscheduleEvent(event_type, user_data, false);
|
||||
}
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
UnscheduleEventType type = UnscheduleEventType::Wait);
|
||||
|
||||
void AddTicks(u64 ticks_to_add);
|
||||
|
||||
@ -158,7 +160,6 @@ private:
|
||||
heap_t event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
|
||||
std::shared_ptr<EventType> ev_lost;
|
||||
Common::Event event{};
|
||||
Common::Event pause_event{};
|
||||
mutable std::mutex basic_lock;
|
||||
|
@ -10,15 +10,15 @@ namespace Kernel {
|
||||
|
||||
void KHardwareTimer::Initialize() {
|
||||
// Create the timing callback to register with CoreTiming.
|
||||
m_event_type = Core::Timing::CreateEvent(
|
||||
"KHardwareTimer::Callback", [](std::uintptr_t timer_handle, s64, std::chrono::nanoseconds) {
|
||||
reinterpret_cast<KHardwareTimer*>(timer_handle)->DoTask();
|
||||
return std::nullopt;
|
||||
});
|
||||
m_event_type = Core::Timing::CreateEvent("KHardwareTimer::Callback",
|
||||
[this](s64, std::chrono::nanoseconds) {
|
||||
this->DoTask();
|
||||
return std::nullopt;
|
||||
});
|
||||
}
|
||||
|
||||
void KHardwareTimer::Finalize() {
|
||||
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this));
|
||||
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type);
|
||||
m_wakeup_time = std::numeric_limits<s64>::max();
|
||||
m_event_type.reset();
|
||||
}
|
||||
@ -57,13 +57,12 @@ void KHardwareTimer::EnableInterrupt(s64 wakeup_time) {
|
||||
|
||||
m_wakeup_time = wakeup_time;
|
||||
m_kernel.System().CoreTiming().ScheduleEvent(std::chrono::nanoseconds{m_wakeup_time},
|
||||
m_event_type, reinterpret_cast<uintptr_t>(this),
|
||||
true);
|
||||
m_event_type, true);
|
||||
}
|
||||
|
||||
void KHardwareTimer::DisableInterrupt() {
|
||||
m_kernel.System().CoreTiming().UnscheduleEventWithoutWait(m_event_type,
|
||||
reinterpret_cast<uintptr_t>(this));
|
||||
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type,
|
||||
Core::Timing::UnscheduleEventType::NoWait);
|
||||
m_wakeup_time = std::numeric_limits<s64>::max();
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ struct KernelCore::Impl {
|
||||
void InitializePreemption(KernelCore& kernel) {
|
||||
preemption_event = Core::Timing::CreateEvent(
|
||||
"PreemptionCallback",
|
||||
[this, &kernel](std::uintptr_t, s64 time,
|
||||
[this, &kernel](s64 time,
|
||||
std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
@ -49,10 +49,10 @@ HidBus::HidBus(Core::System& system_)
|
||||
// Register update callbacks
|
||||
hidbus_update_event = Core::Timing::CreateEvent(
|
||||
"Hidbus::UpdateCallback",
|
||||
[this](std::uintptr_t user_data, s64 time,
|
||||
[this](s64 time,
|
||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
const auto guard = LockService();
|
||||
UpdateHidbus(user_data, ns_late);
|
||||
UpdateHidbus(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
@ -61,10 +61,10 @@ HidBus::HidBus(Core::System& system_)
|
||||
}
|
||||
|
||||
HidBus::~HidBus() {
|
||||
system.CoreTiming().UnscheduleEvent(hidbus_update_event, 0);
|
||||
system.CoreTiming().UnscheduleEvent(hidbus_update_event);
|
||||
}
|
||||
|
||||
void HidBus::UpdateHidbus(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||
void HidBus::UpdateHidbus(std::chrono::nanoseconds ns_late) {
|
||||
if (is_hidbus_enabled) {
|
||||
for (std::size_t i = 0; i < devices.size(); ++i) {
|
||||
if (!devices[i].is_device_initializated) {
|
||||
|
@ -108,7 +108,7 @@ private:
|
||||
void DisableJoyPollingReceiveMode(HLERequestContext& ctx);
|
||||
void SetStatusManagerType(HLERequestContext& ctx);
|
||||
|
||||
void UpdateHidbus(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void UpdateHidbus(std::chrono::nanoseconds ns_late);
|
||||
std::optional<std::size_t> GetDeviceIndexFromHandle(BusHandle handle) const;
|
||||
|
||||
template <typename T>
|
||||
|
@ -227,8 +227,7 @@ void ResourceManager::EnableTouchScreen(u64 aruid, bool is_enabled) {
|
||||
applet_resource->EnableTouchScreen(aruid, is_enabled);
|
||||
}
|
||||
|
||||
void ResourceManager::UpdateControllers(std::uintptr_t user_data,
|
||||
std::chrono::nanoseconds ns_late) {
|
||||
void ResourceManager::UpdateControllers(std::chrono::nanoseconds ns_late) {
|
||||
auto& core_timing = system.CoreTiming();
|
||||
debug_pad->OnUpdate(core_timing);
|
||||
digitizer->OnUpdate(core_timing);
|
||||
@ -241,20 +240,19 @@ void ResourceManager::UpdateControllers(std::uintptr_t user_data,
|
||||
capture_button->OnUpdate(core_timing);
|
||||
}
|
||||
|
||||
void ResourceManager::UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||
void ResourceManager::UpdateNpad(std::chrono::nanoseconds ns_late) {
|
||||
auto& core_timing = system.CoreTiming();
|
||||
npad->OnUpdate(core_timing);
|
||||
}
|
||||
|
||||
void ResourceManager::UpdateMouseKeyboard(std::uintptr_t user_data,
|
||||
std::chrono::nanoseconds ns_late) {
|
||||
void ResourceManager::UpdateMouseKeyboard(std::chrono::nanoseconds ns_late) {
|
||||
auto& core_timing = system.CoreTiming();
|
||||
mouse->OnUpdate(core_timing);
|
||||
debug_mouse->OnUpdate(core_timing);
|
||||
keyboard->OnUpdate(core_timing);
|
||||
}
|
||||
|
||||
void ResourceManager::UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||
void ResourceManager::UpdateMotion(std::chrono::nanoseconds ns_late) {
|
||||
auto& core_timing = system.CoreTiming();
|
||||
six_axis->OnUpdate(core_timing);
|
||||
seven_six_axis->OnUpdate(core_timing);
|
||||
@ -273,34 +271,34 @@ IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<Resource
|
||||
// Register update callbacks
|
||||
npad_update_event = Core::Timing::CreateEvent(
|
||||
"HID::UpdatePadCallback",
|
||||
[this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
|
||||
-> std::optional<std::chrono::nanoseconds> {
|
||||
[this, resource](
|
||||
s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
const auto guard = LockService();
|
||||
resource->UpdateNpad(user_data, ns_late);
|
||||
resource->UpdateNpad(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
default_update_event = Core::Timing::CreateEvent(
|
||||
"HID::UpdateDefaultCallback",
|
||||
[this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
|
||||
-> std::optional<std::chrono::nanoseconds> {
|
||||
[this, resource](
|
||||
s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
const auto guard = LockService();
|
||||
resource->UpdateControllers(user_data, ns_late);
|
||||
resource->UpdateControllers(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
mouse_keyboard_update_event = Core::Timing::CreateEvent(
|
||||
"HID::UpdateMouseKeyboardCallback",
|
||||
[this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
|
||||
-> std::optional<std::chrono::nanoseconds> {
|
||||
[this, resource](
|
||||
s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
const auto guard = LockService();
|
||||
resource->UpdateMouseKeyboard(user_data, ns_late);
|
||||
resource->UpdateMouseKeyboard(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
motion_update_event = Core::Timing::CreateEvent(
|
||||
"HID::UpdateMotionCallback",
|
||||
[this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
|
||||
-> std::optional<std::chrono::nanoseconds> {
|
||||
[this, resource](
|
||||
s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
const auto guard = LockService();
|
||||
resource->UpdateMotion(user_data, ns_late);
|
||||
resource->UpdateMotion(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
@ -314,10 +312,10 @@ IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<Resource
|
||||
}
|
||||
|
||||
IAppletResource::~IAppletResource() {
|
||||
system.CoreTiming().UnscheduleEvent(npad_update_event, 0);
|
||||
system.CoreTiming().UnscheduleEvent(default_update_event, 0);
|
||||
system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event, 0);
|
||||
system.CoreTiming().UnscheduleEvent(motion_update_event, 0);
|
||||
system.CoreTiming().UnscheduleEvent(npad_update_event);
|
||||
system.CoreTiming().UnscheduleEvent(default_update_event);
|
||||
system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event);
|
||||
system.CoreTiming().UnscheduleEvent(motion_update_event);
|
||||
resource_manager->FreeAppletResourceId(aruid);
|
||||
}
|
||||
|
||||
|
@ -81,10 +81,10 @@ public:
|
||||
void EnablePadInput(u64 aruid, bool is_enabled);
|
||||
void EnableTouchScreen(u64 aruid, bool is_enabled);
|
||||
|
||||
void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void UpdateMouseKeyboard(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void UpdateControllers(std::chrono::nanoseconds ns_late);
|
||||
void UpdateNpad(std::chrono::nanoseconds ns_late);
|
||||
void UpdateMouseKeyboard(std::chrono::nanoseconds ns_late);
|
||||
void UpdateMotion(std::chrono::nanoseconds ns_late);
|
||||
|
||||
private:
|
||||
Result CreateAppletResourceImpl(u64 aruid);
|
||||
|
@ -67,7 +67,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||
// Schedule the screen composition events
|
||||
multi_composition_event = Core::Timing::CreateEvent(
|
||||
"ScreenComposition",
|
||||
[this](std::uintptr_t, s64 time,
|
||||
[this](s64 time,
|
||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
vsync_signal.Set();
|
||||
return std::chrono::nanoseconds(GetNextTicks());
|
||||
@ -75,7 +75,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||
|
||||
single_composition_event = Core::Timing::CreateEvent(
|
||||
"ScreenComposition",
|
||||
[this](std::uintptr_t, s64 time,
|
||||
[this](s64 time,
|
||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
const auto lock_guard = Lock();
|
||||
Compose();
|
||||
@ -93,11 +93,11 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||
|
||||
Nvnflinger::~Nvnflinger() {
|
||||
if (system.IsMulticore()) {
|
||||
system.CoreTiming().UnscheduleEvent(multi_composition_event, {});
|
||||
system.CoreTiming().UnscheduleEvent(multi_composition_event);
|
||||
vsync_thread.request_stop();
|
||||
vsync_signal.Set();
|
||||
} else {
|
||||
system.CoreTiming().UnscheduleEvent(single_composition_event, {});
|
||||
system.CoreTiming().UnscheduleEvent(single_composition_event);
|
||||
}
|
||||
|
||||
ShutdownLayers();
|
||||
|
@ -190,15 +190,15 @@ CheatEngine::CheatEngine(System& system_, std::vector<CheatEntry> cheats_,
|
||||
}
|
||||
|
||||
CheatEngine::~CheatEngine() {
|
||||
core_timing.UnscheduleEvent(event, 0);
|
||||
core_timing.UnscheduleEvent(event);
|
||||
}
|
||||
|
||||
void CheatEngine::Initialize() {
|
||||
event = Core::Timing::CreateEvent(
|
||||
"CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
|
||||
[this](std::uintptr_t user_data, s64 time,
|
||||
[this](s64 time,
|
||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
FrameCallback(user_data, ns_late);
|
||||
FrameCallback(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event);
|
||||
@ -239,7 +239,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> reload_cheats) {
|
||||
|
||||
MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
|
||||
|
||||
void CheatEngine::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||
void CheatEngine::FrameCallback(std::chrono::nanoseconds ns_late) {
|
||||
if (is_pending_reload.exchange(false)) {
|
||||
vm.LoadProgram(cheats);
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ public:
|
||||
void Reload(std::vector<CheatEntry> reload_cheats);
|
||||
|
||||
private:
|
||||
void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void FrameCallback(std::chrono::nanoseconds ns_late);
|
||||
|
||||
DmntCheatVm vm;
|
||||
CheatProcessMetadata metadata;
|
||||
|
@ -51,18 +51,17 @@ void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 v
|
||||
|
||||
Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
|
||||
: core_timing{core_timing_}, memory{memory_} {
|
||||
event = Core::Timing::CreateEvent(
|
||||
"MemoryFreezer::FrameCallback",
|
||||
[this](std::uintptr_t user_data, s64 time,
|
||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
FrameCallback(user_data, ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
event = Core::Timing::CreateEvent("MemoryFreezer::FrameCallback",
|
||||
[this](s64 time, std::chrono::nanoseconds ns_late)
|
||||
-> std::optional<std::chrono::nanoseconds> {
|
||||
FrameCallback(ns_late);
|
||||
return std::nullopt;
|
||||
});
|
||||
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
||||
}
|
||||
|
||||
Freezer::~Freezer() {
|
||||
core_timing.UnscheduleEvent(event, 0);
|
||||
core_timing.UnscheduleEvent(event);
|
||||
}
|
||||
|
||||
void Freezer::SetActive(bool is_active) {
|
||||
@ -159,7 +158,7 @@ Freezer::Entries::const_iterator Freezer::FindEntry(VAddr address) const {
|
||||
[address](const Entry& entry) { return entry.address == address; });
|
||||
}
|
||||
|
||||
void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) {
|
||||
void Freezer::FrameCallback(std::chrono::nanoseconds ns_late) {
|
||||
if (!IsActive()) {
|
||||
LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
|
||||
return;
|
||||
|
@ -77,7 +77,7 @@ private:
|
||||
Entries::iterator FindEntry(VAddr address);
|
||||
Entries::const_iterator FindEntry(VAddr address) const;
|
||||
|
||||
void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void FrameCallback(std::chrono::nanoseconds ns_late);
|
||||
void FillEntryReads();
|
||||
|
||||
std::atomic_bool active{false};
|
||||
|
@ -96,9 +96,9 @@ Id ImageType(EmitContext& ctx, const ImageDescriptor& desc, Id sampled_type) {
|
||||
}
|
||||
|
||||
Id DefineVariable(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin,
|
||||
spv::StorageClass storage_class) {
|
||||
spv::StorageClass storage_class, std::optional<Id> initializer = std::nullopt) {
|
||||
const Id pointer_type{ctx.TypePointer(storage_class, type)};
|
||||
const Id id{ctx.AddGlobalVariable(pointer_type, storage_class)};
|
||||
const Id id{ctx.AddGlobalVariable(pointer_type, storage_class, initializer)};
|
||||
if (builtin) {
|
||||
ctx.Decorate(id, spv::Decoration::BuiltIn, *builtin);
|
||||
}
|
||||
@ -144,11 +144,12 @@ Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
|
||||
}
|
||||
|
||||
Id DefineOutput(EmitContext& ctx, Id type, std::optional<u32> invocations,
|
||||
std::optional<spv::BuiltIn> builtin = std::nullopt) {
|
||||
std::optional<spv::BuiltIn> builtin = std::nullopt,
|
||||
std::optional<Id> initializer = std::nullopt) {
|
||||
if (invocations && ctx.stage == Stage::TessellationControl) {
|
||||
type = ctx.TypeArray(type, ctx.Const(*invocations));
|
||||
}
|
||||
return DefineVariable(ctx, type, builtin, spv::StorageClass::Output);
|
||||
return DefineVariable(ctx, type, builtin, spv::StorageClass::Output, initializer);
|
||||
}
|
||||
|
||||
void DefineGenericOutput(EmitContext& ctx, size_t index, std::optional<u32> invocations) {
|
||||
@ -811,10 +812,14 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
if (info.stores.ClipDistances()) {
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance0) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance4) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
if (profile.max_user_clip_distances >= 4) {
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance0) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
if (profile.max_user_clip_distances >= 8) {
|
||||
literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance4) >> 2);
|
||||
labels.push_back(OpLabel());
|
||||
}
|
||||
}
|
||||
OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone);
|
||||
OpSwitch(compare_index, default_label, literals, labels);
|
||||
@ -843,17 +848,21 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
||||
++label_index;
|
||||
}
|
||||
if (info.stores.ClipDistances()) {
|
||||
AddLabel(labels[label_index]);
|
||||
const Id pointer{OpAccessChain(output_f32, clip_distances, masked_index)};
|
||||
OpStore(pointer, store_value);
|
||||
OpReturn();
|
||||
++label_index;
|
||||
AddLabel(labels[label_index]);
|
||||
const Id fixed_index{OpIAdd(U32[1], masked_index, Const(4U))};
|
||||
const Id pointer2{OpAccessChain(output_f32, clip_distances, fixed_index)};
|
||||
OpStore(pointer2, store_value);
|
||||
OpReturn();
|
||||
++label_index;
|
||||
if (profile.max_user_clip_distances >= 4) {
|
||||
AddLabel(labels[label_index]);
|
||||
const Id pointer{OpAccessChain(output_f32, clip_distances, masked_index)};
|
||||
OpStore(pointer, store_value);
|
||||
OpReturn();
|
||||
++label_index;
|
||||
}
|
||||
if (profile.max_user_clip_distances >= 8) {
|
||||
AddLabel(labels[label_index]);
|
||||
const Id fixed_index{OpIAdd(U32[1], masked_index, Const(4U))};
|
||||
const Id pointer{OpAccessChain(output_f32, clip_distances, fixed_index)};
|
||||
OpStore(pointer, store_value);
|
||||
OpReturn();
|
||||
++label_index;
|
||||
}
|
||||
}
|
||||
AddLabel(end_block);
|
||||
OpUnreachable();
|
||||
@ -1532,9 +1541,16 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
|
||||
if (stage == Stage::Fragment) {
|
||||
throw NotImplementedException("Storing ClipDistance in fragment stage");
|
||||
}
|
||||
const Id type{TypeArray(
|
||||
F32[1], Const(std::min(info.used_clip_distances, profile.max_user_clip_distances)))};
|
||||
clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance);
|
||||
if (profile.max_user_clip_distances > 0) {
|
||||
const u32 used{std::min(profile.max_user_clip_distances, 8u)};
|
||||
const std::array<Id, 8> zero{f32_zero_value, f32_zero_value, f32_zero_value,
|
||||
f32_zero_value, f32_zero_value, f32_zero_value,
|
||||
f32_zero_value, f32_zero_value};
|
||||
const Id type{TypeArray(F32[1], Const(used))};
|
||||
const Id initializer{ConstantComposite(type, std::span(zero).subspan(0, used))};
|
||||
clip_distances =
|
||||
DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance, initializer);
|
||||
}
|
||||
}
|
||||
if (info.stores[IR::Attribute::Layer] &&
|
||||
(profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
|
||||
|
@ -16,20 +16,16 @@
|
||||
|
||||
namespace {
|
||||
// Numbers are chosen randomly to make sure the correct one is given.
|
||||
constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
|
||||
constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};
|
||||
std::array<s64, 5> delays{};
|
||||
|
||||
std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
||||
std::bitset<5> callbacks_ran_flags;
|
||||
u64 expected_callback = 0;
|
||||
|
||||
template <unsigned int IDX>
|
||||
std::optional<std::chrono::nanoseconds> HostCallbackTemplate(std::uintptr_t user_data, s64 time,
|
||||
std::optional<std::chrono::nanoseconds> HostCallbackTemplate(s64 time,
|
||||
std::chrono::nanoseconds ns_late) {
|
||||
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
||||
static_assert(IDX < callbacks_ran_flags.size(), "IDX out of range");
|
||||
callbacks_ran_flags.set(IDX);
|
||||
REQUIRE(CB_IDS[IDX] == user_data);
|
||||
REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
|
||||
delays[IDX] = ns_late.count();
|
||||
++expected_callback;
|
||||
return std::nullopt;
|
||||
@ -76,7 +72,7 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||
const u64 order = calls_order[i];
|
||||
const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
|
||||
|
||||
core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]);
|
||||
core_timing.ScheduleEvent(future_ns, events[order]);
|
||||
}
|
||||
/// test pause
|
||||
REQUIRE(callbacks_ran_flags.none());
|
||||
@ -118,7 +114,7 @@ TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") {
|
||||
for (std::size_t i = 0; i < events.size(); i++) {
|
||||
const u64 order = calls_order[i];
|
||||
const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
|
||||
core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]);
|
||||
core_timing.ScheduleEvent(future_ns, events[order]);
|
||||
}
|
||||
|
||||
const u64 end = core_timing.GetGlobalTimeNs().count();
|
||||
|
Reference in New Issue
Block a user