core_timing: Use better reference tracking for EventType. (#3159)

* core_timing: Use better reference tracking for EventType.

- Moves ownership of the event to the caller, ensuring we don't fire events for destroyed objects.
- Removes need for unique names - we won't be using this for save states anyways.
This commit is contained in:
bunnei 2019-11-26 21:48:56 -05:00 committed by GitHub
parent 31daaa7911
commit ec0ce96c56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 103 additions and 161 deletions

View File

@ -37,7 +37,7 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo
: sample_rate{sample_rate}, format{format}, release_callback{std::move(release_callback)},
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
release_event = core_timing.RegisterEvent(
release_event = Core::Timing::CreateEvent(
name, [this](u64 userdata, s64 cycles_late) { ReleaseActiveBuffer(); });
}

View File

@ -103,7 +103,8 @@ private:
float game_volume = 1.0f; ///< The volume the game currently has set
ReleaseCallback release_callback; ///< Buffer release callback for the stream
State state{State::Stopped}; ///< Playback state of the stream
Core::Timing::EventType* release_event{}; ///< Core timing release event for the stream
std::shared_ptr<Core::Timing::EventType>
release_event; ///< Core timing release event for the stream
BufferPtr active_buffer; ///< Actively playing buffer in the stream
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream

View File

@ -17,11 +17,15 @@ namespace Core::Timing {
constexpr int MAX_SLICE_LENGTH = 10000;
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
return std::make_shared<EventType>(std::move(callback), std::move(name));
}
struct CoreTiming::Event {
s64 time;
u64 fifo_order;
u64 userdata;
const EventType* type;
std::weak_ptr<EventType> type;
// Sort by time, unless the times are the same, in which case sort by
// the order added to the queue
@ -54,36 +58,15 @@ void CoreTiming::Initialize() {
event_fifo_id = 0;
const auto empty_timed_callback = [](u64, s64) {};
ev_lost = RegisterEvent("_lost_event", empty_timed_callback);
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
}
void CoreTiming::Shutdown() {
ClearPendingEvents();
UnregisterAllEvents();
}
EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) {
std::lock_guard guard{inner_mutex};
// check for existing type with same name.
// we want event type names to remain unique so that we can use them for serialization.
ASSERT_MSG(event_types.find(name) == event_types.end(),
"CoreTiming Event \"{}\" is already registered. Events should only be registered "
"during Init to avoid breaking save states.",
name.c_str());
auto info = event_types.emplace(name, EventType{callback, nullptr});
EventType* event_type = &info.first->second;
event_type->name = &info.first->first;
return event_type;
}
void CoreTiming::UnregisterAllEvents() {
ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending");
event_types.clear();
}
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
ASSERT(event_type != nullptr);
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type,
u64 userdata) {
std::lock_guard guard{inner_mutex};
const s64 timeout = GetTicks() + cycles_into_future;
@ -93,13 +76,15 @@ void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_ty
}
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
std::lock_guard guard{inner_mutex};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type == event_type && e.userdata == userdata;
return e.type.lock().get() == event_type.get() && e.userdata == userdata;
});
// Removing random items breaks the invariant so we have to re-establish it.
@ -130,10 +115,12 @@ void CoreTiming::ClearPendingEvents() {
event_queue.clear();
}
void CoreTiming::RemoveEvent(const EventType* event_type) {
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
std::lock_guard guard{inner_mutex};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
[&](const Event& e) { return e.type == event_type; });
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type.lock().get() == event_type.get();
});
// Removing random items breaks the invariant so we have to re-establish it.
if (itr != event_queue.end()) {
@ -181,7 +168,11 @@ void CoreTiming::Advance() {
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
event_queue.pop_back();
inner_mutex.unlock();
evt.type->callback(evt.userdata, global_timer - evt.time);
if (auto event_type{evt.type.lock()}) {
event_type->callback(evt.userdata, global_timer - evt.time);
}
inner_mutex.lock();
}

View File

@ -6,11 +6,12 @@
#include <chrono>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <string>
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "common/threadsafe_queue.h"
@ -21,10 +22,13 @@ using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
/// Contains the characteristics of a particular event.
struct EventType {
EventType(TimedCallback&& callback, std::string&& name)
: callback{std::move(callback)}, name{std::move(name)} {}
/// The event's callback function.
TimedCallback callback;
/// A pointer to the name of the event.
const std::string* name;
const std::string name;
};
/**
@ -57,31 +61,17 @@ public:
/// Tears down all timing related functionality.
void Shutdown();
/// Registers a core timing event with the given name and callback.
///
/// @param name The name of the core timing event to register.
/// @param callback The callback to execute for the event.
///
/// @returns An EventType instance representing the registered event.
///
/// @pre The name of the event being registered must be unique among all
/// registered events.
///
EventType* RegisterEvent(const std::string& name, TimedCallback callback);
/// Unregisters all registered events thus far. Note: not thread unsafe
void UnregisterAllEvents();
/// After the first Advance, the slice lengths and the downcount will be reduced whenever an
/// event is scheduled earlier than the current values.
///
/// Scheduling from a callback will not update the downcount until the Advance() completes.
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0);
void ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type,
u64 userdata = 0);
void UnscheduleEvent(const EventType* event_type, u64 userdata);
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
/// We only permit one event of each type in the queue at a time.
void RemoveEvent(const EventType* event_type);
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
void ForceExceptionCheck(s64 cycles);
@ -148,13 +138,18 @@ private:
std::vector<Event> event_queue;
u64 event_fifo_id = 0;
// Stores each element separately as a linked list node so pointers to elements
// remain stable regardless of rehashes/resizing.
std::unordered_map<std::string, EventType> event_types;
EventType* ev_lost = nullptr;
std::shared_ptr<EventType> ev_lost;
std::mutex inner_mutex;
};
/// Creates a core timing event with the given name and callback.
///
/// @param name The name of the core timing event to create.
/// @param callback The callback to execute for the event.
///
/// @returns An EventType instance representing the created event.
///
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback);
} // namespace Core::Timing

View File

@ -11,8 +11,7 @@
namespace Core::Hardware {
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
gpu_interrupt_event =
system.CoreTiming().RegisterEvent("GPUInterrupt", [this](u64 message, s64) {
gpu_interrupt_event = Core::Timing::CreateEvent("GPUInterrupt", [this](u64 message, s64) {
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
const u32 syncpt = static_cast<u32>(message >> 32);
const u32 value = static_cast<u32>(message);

View File

@ -4,6 +4,8 @@
#pragma once
#include <memory>
#include "common/common_types.h"
namespace Core {
@ -25,7 +27,7 @@ public:
private:
Core::System& system;
Core::Timing::EventType* gpu_interrupt_event{};
std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event;
};
} // namespace Core::Hardware

View File

@ -139,12 +139,12 @@ struct KernelCore::Impl {
void InitializeThreads() {
thread_wakeup_event_type =
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback);
}
void InitializePreemption() {
preemption_event = system.CoreTiming().RegisterEvent(
"PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
preemption_event =
Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
global_scheduler.PreemptThreads();
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
@ -166,8 +166,9 @@ struct KernelCore::Impl {
std::shared_ptr<ResourceLimit> system_resource_limit;
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
Core::Timing::EventType* preemption_event = nullptr;
std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
std::shared_ptr<Core::Timing::EventType> preemption_event;
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
// allowing us to simply use a pool index or similar.
Kernel::HandleTable thread_wakeup_callback_handle_table;
@ -269,7 +270,7 @@ u64 KernelCore::CreateNewUserProcessID() {
return impl->next_user_process_id++;
}
Core::Timing::EventType* KernelCore::ThreadWakeupCallbackEventType() const {
const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
return impl->thread_wakeup_event_type;
}

View File

@ -113,7 +113,7 @@ private:
u64 CreateNewThreadID();
/// Retrieves the event type used for thread wakeup callbacks.
Core::Timing::EventType* ThreadWakeupCallbackEventType() const;
const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
/// Provides a reference to the thread wakeup callback handle table.
Kernel::HandleTable& ThreadWakeupCallbackHandleTable();

View File

@ -77,15 +77,14 @@ IAppletResource::IAppletResource(Core::System& system)
GetController<Controller_Stubbed>(HidController::Unknown3).SetCommonHeaderOffset(0x5000);
// Register update callbacks
auto& core_timing = system.CoreTiming();
pad_update_event =
core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) {
UpdateControllers(userdata, cycles_late);
});
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
core_timing.ScheduleEvent(pad_update_ticks, pad_update_event);
system.CoreTiming().ScheduleEvent(pad_update_ticks, pad_update_event);
ReloadInputDevices();
}

View File

@ -69,7 +69,7 @@ private:
std::shared_ptr<Kernel::SharedMemory> shared_mem;
Core::Timing::EventType* pad_update_event;
std::shared_ptr<Core::Timing::EventType> pad_update_event;
Core::System& system;
std::array<std::unique_ptr<ControllerBase>, static_cast<size_t>(HidController::MaxControllers)>

View File

@ -37,8 +37,8 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
displays.emplace_back(4, "Null", system);
// Schedule the screen composition events
composition_event = system.CoreTiming().RegisterEvent(
"ScreenComposition", [this](u64 userdata, s64 cycles_late) {
composition_event =
Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) {
Compose();
const auto ticks =
Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks();

View File

@ -103,7 +103,7 @@ private:
u32 swap_interval = 1;
/// Event that handles screen composition.
Core::Timing::EventType* composition_event;
std::shared_ptr<Core::Timing::EventType> composition_event;
Core::System& system;
};

View File

@ -186,7 +186,7 @@ CheatEngine::~CheatEngine() {
}
void CheatEngine::Initialize() {
event = core_timing.RegisterEvent(
event = Core::Timing::CreateEvent(
"CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
[this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); });
core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);

View File

@ -5,6 +5,7 @@
#pragma once
#include <atomic>
#include <memory>
#include <vector>
#include "common/common_types.h"
#include "core/memory/dmnt_cheat_types.h"
@ -78,7 +79,7 @@ private:
std::vector<CheatEntry> cheats;
std::atomic_bool is_pending_reload{false};
Core::Timing::EventType* event{};
std::shared_ptr<Core::Timing::EventType> event;
Core::Timing::CoreTiming& core_timing;
Core::System& system;
};

View File

@ -54,7 +54,7 @@ void MemoryWriteWidth(u32 width, VAddr addr, u64 value) {
} // Anonymous namespace
Freezer::Freezer(Core::Timing::CoreTiming& core_timing) : core_timing(core_timing) {
event = core_timing.RegisterEvent(
event = Core::Timing::CreateEvent(
"MemoryFreezer::FrameCallback",
[this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); });
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);

View File

@ -5,6 +5,7 @@
#pragma once
#include <atomic>
#include <memory>
#include <mutex>
#include <optional>
#include <vector>
@ -75,7 +76,7 @@ private:
mutable std::mutex entries_mutex;
std::vector<Entry> entries;
Core::Timing::EventType* event;
std::shared_ptr<Core::Timing::EventType> event;
Core::Timing::CoreTiming& core_timing;
};

View File

@ -7,7 +7,9 @@
#include <array>
#include <bitset>
#include <cstdlib>
#include <memory>
#include <string>
#include "common/file_util.h"
#include "core/core.h"
#include "core/core_timing.h"
@ -65,11 +67,16 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
ScopeInit guard;
auto& core_timing = guard.core_timing;
Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>);
Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>);
Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>);
Core::Timing::EventType* cb_d = core_timing.RegisterEvent("callbackD", CallbackTemplate<3>);
Core::Timing::EventType* cb_e = core_timing.RegisterEvent("callbackE", CallbackTemplate<4>);
std::shared_ptr<Core::Timing::EventType> cb_a =
Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>);
std::shared_ptr<Core::Timing::EventType> cb_b =
Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>);
std::shared_ptr<Core::Timing::EventType> cb_c =
Core::Timing::CreateEvent("callbackC", CallbackTemplate<2>);
std::shared_ptr<Core::Timing::EventType> cb_d =
Core::Timing::CreateEvent("callbackD", CallbackTemplate<3>);
std::shared_ptr<Core::Timing::EventType> cb_e =
Core::Timing::CreateEvent("callbackE", CallbackTemplate<4>);
// Enter slice 0
core_timing.ResetRun();
@ -99,8 +106,8 @@ TEST_CASE("CoreTiming[FairSharing]", "[core]") {
ScopeInit guard;
auto& core_timing = guard.core_timing;
Core::Timing::EventType* empty_callback =
core_timing.RegisterEvent("empty_callback", EmptyCallback);
std::shared_ptr<Core::Timing::EventType> empty_callback =
Core::Timing::CreateEvent("empty_callback", EmptyCallback);
callbacks_done = 0;
u64 MAX_CALLBACKS = 10;
@ -133,8 +140,10 @@ TEST_CASE("Core::Timing[PredictableLateness]", "[core]") {
ScopeInit guard;
auto& core_timing = guard.core_timing;
Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>);
Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>);
std::shared_ptr<Core::Timing::EventType> cb_a =
Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>);
std::shared_ptr<Core::Timing::EventType> cb_b =
Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>);
// Enter slice 0
core_timing.ResetRun();
@ -145,60 +154,3 @@ TEST_CASE("Core::Timing[PredictableLateness]", "[core]") {
AdvanceAndCheck(core_timing, 0, 0, 10, -10); // (100 - 10)
AdvanceAndCheck(core_timing, 1, 1, 50, -50);
}
namespace ChainSchedulingTest {
static int reschedules = 0;
static void RescheduleCallback(Core::Timing::CoreTiming& core_timing, u64 userdata,
s64 cycles_late) {
--reschedules;
REQUIRE(reschedules >= 0);
REQUIRE(lateness == cycles_late);
if (reschedules > 0) {
core_timing.ScheduleEvent(1000, reinterpret_cast<Core::Timing::EventType*>(userdata),
userdata);
}
}
} // namespace ChainSchedulingTest
TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
using namespace ChainSchedulingTest;
ScopeInit guard;
auto& core_timing = guard.core_timing;
Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>);
Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>);
Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>);
Core::Timing::EventType* cb_rs = core_timing.RegisterEvent(
"callbackReschedule", [&core_timing](u64 userdata, s64 cycles_late) {
RescheduleCallback(core_timing, userdata, cycles_late);
});
// Enter slice 0
core_timing.ResetRun();
core_timing.ScheduleEvent(800, cb_a, CB_IDS[0]);
core_timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
core_timing.ScheduleEvent(2200, cb_c, CB_IDS[2]);
core_timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs));
REQUIRE(800 == core_timing.GetDowncount());
reschedules = 3;
AdvanceAndCheck(core_timing, 0, 0); // cb_a
AdvanceAndCheck(core_timing, 1, 1); // cb_b, cb_rs
REQUIRE(2 == reschedules);
core_timing.AddTicks(core_timing.GetDowncount());
core_timing.Advance(); // cb_rs
core_timing.SwitchContext(3);
REQUIRE(1 == reschedules);
REQUIRE(200 == core_timing.GetDowncount());
AdvanceAndCheck(core_timing, 2, 3); // cb_c
core_timing.AddTicks(core_timing.GetDowncount());
core_timing.Advance(); // cb_rs
REQUIRE(0 == reschedules);
}