diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp index f97ad433b..1c1d09ccb 100644 --- a/src/common/fiber.cpp +++ b/src/common/fiber.cpp @@ -54,9 +54,7 @@ Fiber::Fiber(std::function&& entry_point_func, void* start_paramete impl->handle = CreateFiber(default_stack_size, &FiberStartFunc, this); } -Fiber::Fiber() { - impl = std::make_unique(); -} +Fiber::Fiber() : impl{std::make_unique()} {} Fiber::~Fiber() { if (released) { @@ -116,8 +114,8 @@ std::shared_ptr Fiber::ThreadToFiber() { struct Fiber::FiberImpl { alignas(64) std::array stack; - u8* stack_limit; alignas(64) std::array rewind_stack; + u8* stack_limit; u8* rewind_stack_limit; boost::context::detail::fcontext_t context; boost::context::detail::fcontext_t rewind_context; @@ -168,9 +166,7 @@ void Fiber::SetRewindPoint(std::function&& rewind_func, void* start rewind_parameter = start_parameter; } -Fiber::Fiber() { - impl = std::make_unique(); -} +Fiber::Fiber() : impl{std::make_unique()} {} Fiber::~Fiber() { if (released) { diff --git a/src/common/spin_lock.cpp b/src/common/spin_lock.cpp index c7b46aac6..c1524220f 100644 --- a/src/common/spin_lock.cpp +++ b/src/common/spin_lock.cpp @@ -20,7 +20,7 @@ namespace { -void thread_pause() { +void ThreadPause() { #if __x86_64__ _mm_pause(); #elif __aarch64__ && _MSC_VER @@ -30,13 +30,13 @@ void thread_pause() { #endif } -} // namespace +} // Anonymous namespace namespace Common { void SpinLock::lock() { while (lck.test_and_set(std::memory_order_acquire)) { - thread_pause(); + ThreadPause(); } } diff --git a/src/common/spin_lock.h b/src/common/spin_lock.h index 70282a961..1df5528c4 100644 --- a/src/common/spin_lock.h +++ b/src/common/spin_lock.h @@ -8,6 +8,11 @@ namespace Common { +/** + * SpinLock class + * a lock similar to mutex that forces a thread to spin wait instead calling the + * supervisor. Should be used on short sequences of code. + */ class SpinLock { public: void lock(); diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp index f1bc60fd2..424b39b1f 100644 --- a/src/common/x64/native_clock.cpp +++ b/src/common/x64/native_clock.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include +#include #include #ifdef _MSC_VER @@ -52,7 +53,7 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequenc } u64 NativeClock::GetRTSC() { - rtsc_serialize.lock(); + std::scoped_lock scope{rtsc_serialize}; _mm_mfence(); const u64 current_measure = __rdtsc(); u64 diff = current_measure - last_measure; @@ -61,7 +62,6 @@ u64 NativeClock::GetRTSC() { last_measure = current_measure; } accumulated_ticks += diff; - rtsc_serialize.unlock(); /// The clock cannot be more precise than the guest timer, remove the lower bits return accumulated_ticks & inaccuracy_mask; } diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 0c1d6ac39..1f24051e4 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -148,7 +148,7 @@ public: */ virtual void SetTPIDR_EL0(u64 value) = 0; - virtual void ChangeProcessorId(std::size_t new_core_id) = 0; + virtual void ChangeProcessorID(std::size_t new_core_id) = 0; virtual void SaveContext(ThreadContext32& ctx) = 0; virtual void SaveContext(ThreadContext64& ctx) = 0; diff --git a/src/core/arm/cpu_interrupt_handler.h b/src/core/arm/cpu_interrupt_handler.h index 91c31a271..3d062d326 100644 --- a/src/core/arm/cpu_interrupt_handler.h +++ b/src/core/arm/cpu_interrupt_handler.h @@ -23,7 +23,7 @@ public: CPUInterruptHandler(CPUInterruptHandler&&) = default; CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default; - constexpr bool IsInterrupted() const { + bool IsInterrupted() const { return is_interrupted; } diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index cfda12098..0d4ab95b7 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -107,7 +107,7 @@ public: u64 GetTicksRemaining() override { if (parent.uses_wall_clock) { if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) { - return 1000U; + return minimum_run_cycles; } return 0U; } @@ -116,6 +116,7 @@ public: ARM_Dynarmic_32& parent; std::size_t num_interpreted_instructions{}; + static constexpr u64 minimum_run_cycles = 1000U; }; std::shared_ptr ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table, @@ -214,7 +215,7 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) { cp15->uprw = static_cast(value); } -void ARM_Dynarmic_32::ChangeProcessorId(std::size_t new_core_id) { +void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) { jit->ChangeProcessorID(new_core_id); } diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h index d9c0bfede..2bab31b92 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.h +++ b/src/core/arm/dynarmic/arm_dynarmic_32.h @@ -47,7 +47,7 @@ public: void SetTlsAddress(VAddr address) override; void SetTPIDR_EL0(u64 value) override; u64 GetTPIDR_EL0() const override; - void ChangeProcessorId(std::size_t new_core_id) override; + void ChangeProcessorID(std::size_t new_core_id) override; void SaveContext(ThreadContext32& ctx) override; void SaveContext(ThreadContext64& ctx) override {} diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 35a99e28a..790981034 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -144,7 +144,7 @@ public: u64 GetTicksRemaining() override { if (parent.uses_wall_clock) { if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) { - return 1000U; + return minimum_run_cycles; } return 0U; } @@ -159,6 +159,7 @@ public: std::size_t num_interpreted_instructions = 0; u64 tpidrro_el0 = 0; u64 tpidr_el0 = 0; + static constexpr u64 minimum_run_cycles = 1000U; }; std::shared_ptr ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table, @@ -271,7 +272,7 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) { cb->tpidr_el0 = value; } -void ARM_Dynarmic_64::ChangeProcessorId(std::size_t new_core_id) { +void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) { jit->ChangeProcessorID(new_core_id); } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h index c74fcbcea..403c55961 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.h +++ b/src/core/arm/dynarmic/arm_dynarmic_64.h @@ -45,7 +45,7 @@ public: void SetTlsAddress(VAddr address) override; void SetTPIDR_EL0(u64 value) override; u64 GetTPIDR_EL0() const override; - void ChangeProcessorId(std::size_t new_core_id) override; + void ChangeProcessorID(std::size_t new_core_id) override; void SaveContext(ThreadContext32& ctx) override {} void SaveContext(ThreadContext64& ctx) override; diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index 35e8f42e8..1df3f3ed1 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp @@ -159,7 +159,7 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) { CHECKED(uc_reg_write(uc, UC_ARM64_REG_TPIDR_EL0, &value)); } -void ARM_Unicorn::ChangeProcessorId(std::size_t new_core_id) { +void ARM_Unicorn::ChangeProcessorID(std::size_t new_core_id) { core_index = new_core_id; } diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h index 8ace8b86f..810aff311 100644 --- a/src/core/arm/unicorn/arm_unicorn.h +++ b/src/core/arm/unicorn/arm_unicorn.h @@ -36,7 +36,7 @@ public: void SetTlsAddress(VAddr address) override; void SetTPIDR_EL0(u64 value) override; u64 GetTPIDR_EL0() const override; - void ChangeProcessorId(std::size_t new_core_id) override; + void ChangeProcessorID(std::size_t new_core_id) override; void PrepareReschedule() override; void ClearExclusiveState() override; void ExecuteInstructions(std::size_t num_instructions); diff --git a/src/core/core.cpp b/src/core/core.cpp index 8256ec0fc..1a243c515 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -443,7 +443,7 @@ bool System::IsPoweredOn() const { } void System::PrepareReschedule() { - // impl->CurrentPhysicalCore().Stop(); + // Deprecated, does nothing, kept for backward compatibility. } void System::PrepareReschedule(const u32 core_index) { diff --git a/src/core/core.h b/src/core/core.h index 133ecb8e1..5c6cfbffe 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -138,13 +138,13 @@ public: /** * Run the OS and Application - * This function will start emulation and run the competent devices + * This function will start emulation and run the relevant devices */ ResultStatus Run(); /** * Pause the OS and Application - * This function will pause emulation and stop the competent devices + * This function will pause emulation and stop the relevant devices */ ResultStatus Pause(); diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 1aa89a1cc..5c83c41a4 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -45,9 +45,9 @@ CoreTiming::CoreTiming() { CoreTiming::~CoreTiming() = default; void CoreTiming::ThreadEntry(CoreTiming& instance) { - std::string name = "yuzu:HostTiming"; - MicroProfileOnThreadCreate(name.c_str()); - Common::SetCurrentThreadName(name.c_str()); + constexpr char name[] = "yuzu:HostTiming"; + MicroProfileOnThreadCreate(name); + Common::SetCurrentThreadName(name); Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); instance.on_thread_init(); instance.ThreadLoop(); @@ -108,18 +108,19 @@ bool CoreTiming::HasPendingEvents() const { void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr& event_type, u64 userdata) { - basic_lock.lock(); - const u64 timeout = static_cast(GetGlobalTimeNs().count() + ns_into_future); + { + std::scoped_lock scope{basic_lock}; + const u64 timeout = static_cast(GetGlobalTimeNs().count() + ns_into_future); - event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); + event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); - std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); - basic_lock.unlock(); + std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); + } event.Set(); } void CoreTiming::UnscheduleEvent(const std::shared_ptr& event_type, u64 userdata) { - basic_lock.lock(); + std::scoped_lock scope{basic_lock}; const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { return e.type.lock().get() == event_type.get() && e.userdata == userdata; }); @@ -129,7 +130,6 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr& event_type, u event_queue.erase(itr, event_queue.end()); std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); } - basic_lock.unlock(); } void CoreTiming::AddTicks(u64 ticks) { @@ -187,8 +187,8 @@ void CoreTiming::RemoveEvent(const std::shared_ptr& event_type) { } std::optional CoreTiming::Advance() { - advance_lock.lock(); - basic_lock.lock(); + std::scoped_lock advance_scope{advance_lock}; + std::scoped_lock basic_scope{basic_lock}; global_timer = GetGlobalTimeNs().count(); while (!event_queue.empty() && event_queue.front().time <= global_timer) { @@ -207,12 +207,8 @@ std::optional CoreTiming::Advance() { if (!event_queue.empty()) { const s64 next_time = event_queue.front().time - global_timer; - basic_lock.unlock(); - advance_lock.unlock(); return next_time; } else { - basic_lock.unlock(); - advance_lock.unlock(); return std::nullopt; } } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index dbb75416d..1f2af7a1b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -472,16 +472,12 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { } void KernelCore::InvalidateAllInstructionCaches() { - if (!IsMulticore()) { - auto& threads = GlobalScheduler().GetThreadList(); - for (auto& thread : threads) { - if (!thread->IsHLEThread()) { - auto& arm_interface = thread->ArmInterface(); - arm_interface.ClearInstructionCache(); - } + auto& threads = GlobalScheduler().GetThreadList(); + for (auto& thread : threads) { + if (!thread->IsHLEThread()) { + auto& arm_interface = thread->ArmInterface(); + arm_interface.ClearInstructionCache(); } - } else { - UNIMPLEMENTED_MSG("Cache Invalidation unimplemented for multicore"); } } diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index c82c60a16..c6bbdb080 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -37,6 +37,10 @@ void PhysicalCore::Shutdown() { scheduler.Shutdown(); } +bool PhysicalCore::IsInterrupted() const { + return interrupt_handler.IsInterrupted(); +} + void PhysicalCore::Interrupt() { guard->lock(); interrupt_handler.SetInterrupt(true); diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index 85f6dec05..d7a7a951c 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -7,8 +7,6 @@ #include #include -#include "core/arm/cpu_interrupt_handler.h" - namespace Common { class SpinLock; } @@ -19,6 +17,7 @@ class Scheduler; namespace Core { class ARM_Interface; +class CPUInterruptHandler; class ExclusiveMonitor; class System; } // namespace Core @@ -45,9 +44,7 @@ public: void ClearInterrupt(); /// Check if this core is interrupted - bool IsInterrupted() const { - return interrupt_handler.IsInterrupted(); - } + bool IsInterrupted() const; // Shutdown this physical core. void Shutdown(); diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 61b8a396a..2b12c0dbf 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -658,7 +658,7 @@ void Scheduler::Reload() { cpu_core.LoadContext(thread->GetContext64()); cpu_core.SetTlsAddress(thread->GetTLSAddress()); cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ChangeProcessorId(this->core_id); + cpu_core.ChangeProcessorID(this->core_id); cpu_core.ClearExclusiveState(); } } @@ -691,7 +691,7 @@ void Scheduler::SwitchContextStep2() { cpu_core.LoadContext(new_thread->GetContext64()); cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); - cpu_core.ChangeProcessorId(this->core_id); + cpu_core.ChangeProcessorID(this->core_id); cpu_core.ClearExclusiveState(); } } diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 348107160..b3b4b5169 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -240,6 +240,10 @@ public: return switch_fiber; } + const std::shared_ptr& ControlContext() const { + return switch_fiber; + } + private: friend class GlobalScheduler; diff --git a/src/tests/common/fibers.cpp b/src/tests/common/fibers.cpp index 12536b6d8..4fd92428f 100644 --- a/src/tests/common/fibers.cpp +++ b/src/tests/common/fibers.cpp @@ -68,7 +68,7 @@ static void ThreadStart1(u32 id, TestControl1& test_control) { * doing all the work required. */ TEST_CASE("Fibers::Setup", "[common]") { - constexpr u32 num_threads = 7; + constexpr std::size_t num_threads = 7; TestControl1 test_control{}; test_control.thread_fibers.resize(num_threads); test_control.work_fibers.resize(num_threads);