Merge pull request #8549 from liamwhite/kscheduler-sc
kernel: use KScheduler from Mesosphere
This commit is contained in:
		| @@ -154,9 +154,10 @@ void ARM_Interface::Run() { | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         // Handle syscalls and scheduling (this may change the current thread) | ||||
|         // Handle syscalls and scheduling (this may change the current thread/core) | ||||
|         if (Has(hr, svc_call)) { | ||||
|             Kernel::Svc::Call(system, GetSvcNumber()); | ||||
|             break; | ||||
|         } | ||||
|         if (Has(hr, break_loop) || !uses_wall_clock) { | ||||
|             break; | ||||
|   | ||||
| @@ -8,6 +8,7 @@ | ||||
| #include "core/core.h" | ||||
| #include "core/core_timing.h" | ||||
| #include "core/cpu_manager.h" | ||||
| #include "core/hle/kernel/k_interrupt_manager.h" | ||||
| #include "core/hle/kernel/k_scheduler.h" | ||||
| #include "core/hle/kernel/k_thread.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| @@ -49,14 +50,6 @@ void CpuManager::GuestThreadFunction() { | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::GuestRewindFunction() { | ||||
|     if (is_multicore) { | ||||
|         MultiCoreRunGuestLoop(); | ||||
|     } else { | ||||
|         SingleCoreRunGuestLoop(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::IdleThreadFunction() { | ||||
|     if (is_multicore) { | ||||
|         MultiCoreRunIdleThread(); | ||||
| @@ -69,21 +62,21 @@ void CpuManager::ShutdownThreadFunction() { | ||||
|     ShutdownThread(); | ||||
| } | ||||
|  | ||||
| void CpuManager::HandleInterrupt() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     auto core_index = kernel.CurrentPhysicalCoreIndex(); | ||||
|  | ||||
|     Kernel::KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_index)); | ||||
| } | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////////////////// | ||||
| ///                             MultiCore                                   /// | ||||
| /////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| void CpuManager::MultiCoreRunGuestThread() { | ||||
|     // Similar to UserModeThreadStarter in HOS | ||||
|     auto& kernel = system.Kernel(); | ||||
|     kernel.CurrentScheduler()->OnThreadStart(); | ||||
|     auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | ||||
|     auto& host_context = thread->GetHostContext(); | ||||
|     host_context->SetRewindPoint([this] { GuestRewindFunction(); }); | ||||
|     MultiCoreRunGuestLoop(); | ||||
| } | ||||
|  | ||||
| void CpuManager::MultiCoreRunGuestLoop() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|  | ||||
|     while (true) { | ||||
|         auto* physical_core = &kernel.CurrentPhysicalCore(); | ||||
| @@ -91,18 +84,26 @@ void CpuManager::MultiCoreRunGuestLoop() { | ||||
|             physical_core->Run(); | ||||
|             physical_core = &kernel.CurrentPhysicalCore(); | ||||
|         } | ||||
|         { | ||||
|             Kernel::KScopedDisableDispatch dd(kernel); | ||||
|             physical_core->ArmInterface().ClearExclusiveState(); | ||||
|         } | ||||
|  | ||||
|         HandleInterrupt(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::MultiCoreRunIdleThread() { | ||||
|     // Not accurate to HOS. Remove this entire method when singlecore is removed. | ||||
|     // See notes in KScheduler::ScheduleImpl for more information about why this | ||||
|     // is inaccurate. | ||||
|  | ||||
|     auto& kernel = system.Kernel(); | ||||
|     kernel.CurrentScheduler()->OnThreadStart(); | ||||
|  | ||||
|     while (true) { | ||||
|         Kernel::KScopedDisableDispatch dd(kernel); | ||||
|         kernel.CurrentPhysicalCore().Idle(); | ||||
|         auto& physical_core = kernel.CurrentPhysicalCore(); | ||||
|         if (!physical_core.IsInterrupted()) { | ||||
|             physical_core.Idle(); | ||||
|         } | ||||
|  | ||||
|         HandleInterrupt(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -113,49 +114,40 @@ void CpuManager::MultiCoreRunIdleThread() { | ||||
| void CpuManager::SingleCoreRunGuestThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     kernel.CurrentScheduler()->OnThreadStart(); | ||||
|     auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | ||||
|     auto& host_context = thread->GetHostContext(); | ||||
|     host_context->SetRewindPoint([this] { GuestRewindFunction(); }); | ||||
|     SingleCoreRunGuestLoop(); | ||||
| } | ||||
|  | ||||
| void CpuManager::SingleCoreRunGuestLoop() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     while (true) { | ||||
|         auto* physical_core = &kernel.CurrentPhysicalCore(); | ||||
|         if (!physical_core->IsInterrupted()) { | ||||
|             physical_core->Run(); | ||||
|             physical_core = &kernel.CurrentPhysicalCore(); | ||||
|         } | ||||
|  | ||||
|         kernel.SetIsPhantomModeForSingleCore(true); | ||||
|         system.CoreTiming().Advance(); | ||||
|         kernel.SetIsPhantomModeForSingleCore(false); | ||||
|         physical_core->ArmInterface().ClearExclusiveState(); | ||||
|  | ||||
|         PreemptSingleCore(); | ||||
|         auto& scheduler = kernel.Scheduler(current_core); | ||||
|         scheduler.RescheduleCurrentCore(); | ||||
|         HandleInterrupt(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::SingleCoreRunIdleThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     kernel.CurrentScheduler()->OnThreadStart(); | ||||
|  | ||||
|     while (true) { | ||||
|         auto& physical_core = kernel.CurrentPhysicalCore(); | ||||
|         PreemptSingleCore(false); | ||||
|         system.CoreTiming().AddTicks(1000U); | ||||
|         idle_count++; | ||||
|         auto& scheduler = physical_core.Scheduler(); | ||||
|         scheduler.RescheduleCurrentCore(); | ||||
|         HandleInterrupt(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::PreemptSingleCore(bool from_running_enviroment) { | ||||
|     { | ||||
| void CpuManager::PreemptSingleCore(bool from_running_environment) { | ||||
|     auto& kernel = system.Kernel(); | ||||
|         auto& scheduler = kernel.Scheduler(current_core); | ||||
|         Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread(); | ||||
|         if (idle_count >= 4 || from_running_enviroment) { | ||||
|             if (!from_running_enviroment) { | ||||
|  | ||||
|     if (idle_count >= 4 || from_running_environment) { | ||||
|         if (!from_running_environment) { | ||||
|             system.CoreTiming().Idle(); | ||||
|             idle_count = 0; | ||||
|         } | ||||
| @@ -165,28 +157,30 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) { | ||||
|     } | ||||
|     current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); | ||||
|     system.CoreTiming().ResetTicks(); | ||||
|         scheduler.Unload(scheduler.GetSchedulerCurrentThread()); | ||||
|     kernel.Scheduler(current_core).PreemptSingleCore(); | ||||
|  | ||||
|         auto& next_scheduler = kernel.Scheduler(current_core); | ||||
|         Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext()); | ||||
|     } | ||||
|  | ||||
|     // May have changed scheduler | ||||
|     { | ||||
|         auto& scheduler = system.Kernel().Scheduler(current_core); | ||||
|         scheduler.Reload(scheduler.GetSchedulerCurrentThread()); | ||||
|         if (!scheduler.IsIdle()) { | ||||
|     // We've now been scheduled again, and we may have exchanged schedulers. | ||||
|     // Reload the scheduler in case it's different. | ||||
|     if (!kernel.Scheduler(current_core).IsIdle()) { | ||||
|         idle_count = 0; | ||||
|     } | ||||
| } | ||||
|  | ||||
| void CpuManager::GuestActivate() { | ||||
|     // Similar to the HorizonKernelMain callback in HOS | ||||
|     auto& kernel = system.Kernel(); | ||||
|     auto* scheduler = kernel.CurrentScheduler(); | ||||
|  | ||||
|     scheduler->Activate(); | ||||
|     UNREACHABLE(); | ||||
| } | ||||
|  | ||||
| void CpuManager::ShutdownThread() { | ||||
|     auto& kernel = system.Kernel(); | ||||
|     auto* thread = kernel.GetCurrentEmuThread(); | ||||
|     auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0; | ||||
|     auto* current_thread = kernel.GetCurrentEmuThread(); | ||||
|  | ||||
|     Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | ||||
|     Common::Fiber::YieldTo(thread->GetHostContext(), *core_data[core].host_context); | ||||
|     UNREACHABLE(); | ||||
| } | ||||
|  | ||||
| @@ -218,9 +212,12 @@ void CpuManager::RunThread(std::size_t core) { | ||||
|         system.GPU().ObtainContext(); | ||||
|     } | ||||
|  | ||||
|     auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread(); | ||||
|     Kernel::SetCurrentThread(system.Kernel(), current_thread); | ||||
|     Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); | ||||
|     auto& kernel = system.Kernel(); | ||||
|     auto& scheduler = *kernel.CurrentScheduler(); | ||||
|     auto* thread = scheduler.GetSchedulerCurrentThread(); | ||||
|     Kernel::SetCurrentThread(kernel, thread); | ||||
|  | ||||
|     Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext()); | ||||
| } | ||||
|  | ||||
| } // namespace Core | ||||
|   | ||||
| @@ -50,7 +50,10 @@ public: | ||||
|     void Initialize(); | ||||
|     void Shutdown(); | ||||
|  | ||||
|     std::function<void()> GetGuestThreadStartFunc() { | ||||
|     std::function<void()> GetGuestActivateFunc() { | ||||
|         return [this] { GuestActivate(); }; | ||||
|     } | ||||
|     std::function<void()> GetGuestThreadFunc() { | ||||
|         return [this] { GuestThreadFunction(); }; | ||||
|     } | ||||
|     std::function<void()> GetIdleThreadStartFunc() { | ||||
| @@ -68,20 +71,19 @@ public: | ||||
|  | ||||
| private: | ||||
|     void GuestThreadFunction(); | ||||
|     void GuestRewindFunction(); | ||||
|     void IdleThreadFunction(); | ||||
|     void ShutdownThreadFunction(); | ||||
|  | ||||
|     void MultiCoreRunGuestThread(); | ||||
|     void MultiCoreRunGuestLoop(); | ||||
|     void MultiCoreRunIdleThread(); | ||||
|  | ||||
|     void SingleCoreRunGuestThread(); | ||||
|     void SingleCoreRunGuestLoop(); | ||||
|     void SingleCoreRunIdleThread(); | ||||
|  | ||||
|     static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | ||||
|  | ||||
|     void GuestActivate(); | ||||
|     void HandleInterrupt(); | ||||
|     void ShutdownThread(); | ||||
|     void RunThread(std::size_t core); | ||||
|  | ||||
|   | ||||
| @@ -41,12 +41,7 @@ void GlobalSchedulerContext::PreemptThreads() { | ||||
|     ASSERT(IsLocked()); | ||||
|     for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|         const u32 priority = preemption_priorities[core_id]; | ||||
|         kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); | ||||
|  | ||||
|         // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result | ||||
|         // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system | ||||
|         // interrupts that may have occurred. | ||||
|         kernel.PhysicalCore(core_id).Interrupt(); | ||||
|         KScheduler::RotateScheduledQueue(kernel, core_id, priority); | ||||
|     } | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -6,6 +6,7 @@ | ||||
| #include "core/hle/kernel/k_scheduler.h" | ||||
| #include "core/hle/kernel/k_thread.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/kernel/physical_core.h" | ||||
|  | ||||
| namespace Kernel::KInterruptManager { | ||||
|  | ||||
| @@ -15,6 +16,9 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // Acknowledge the interrupt. | ||||
|     kernel.PhysicalCore(core_id).ClearInterrupt(); | ||||
|  | ||||
|     auto& current_thread = GetCurrentThread(kernel); | ||||
|  | ||||
|     // If the user disable count is set, we may need to pin the current thread. | ||||
| @@ -27,6 +31,9 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { | ||||
|         // Set the interrupt flag for the thread. | ||||
|         GetCurrentThread(kernel).SetInterruptFlag(); | ||||
|     } | ||||
|  | ||||
|     // Request interrupt scheduling. | ||||
|     kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); | ||||
| } | ||||
|  | ||||
| } // namespace Kernel::KInterruptManager | ||||
|   | ||||
| @@ -27,69 +27,185 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) { | ||||
|     auto scheduler = kernel.CurrentScheduler(); | ||||
| KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { | ||||
|     m_switch_fiber = std::make_shared<Common::Fiber>([this] { | ||||
|         while (true) { | ||||
|             ScheduleImplFiber(); | ||||
|         } | ||||
|     }); | ||||
|  | ||||
|     u32 current_core{0xF}; | ||||
|     bool must_context_switch{}; | ||||
|     if (scheduler) { | ||||
|         current_core = scheduler->core_id; | ||||
|         // TODO(bunnei): Should be set to true when we deprecate single core | ||||
|         must_context_switch = !kernel.IsPhantomModeForSingleCore(); | ||||
|     m_state.needs_scheduling = true; | ||||
| } | ||||
|  | ||||
|     while (cores_pending_reschedule != 0) { | ||||
|         const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); | ||||
|         ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||||
|         if (!must_context_switch || core != current_core) { | ||||
|             auto& phys_core = kernel.PhysicalCore(core); | ||||
|             phys_core.Interrupt(); | ||||
|         } | ||||
|         cores_pending_reschedule &= ~(1ULL << core); | ||||
| KScheduler::~KScheduler() = default; | ||||
|  | ||||
| void KScheduler::SetInterruptTaskRunnable() { | ||||
|     m_state.interrupt_task_runnable = true; | ||||
|     m_state.needs_scheduling = true; | ||||
| } | ||||
|  | ||||
|     for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { | ||||
|         if (kernel.PhysicalCore(core_id).IsInterrupted()) { | ||||
|             KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id)); | ||||
| void KScheduler::RequestScheduleOnInterrupt() { | ||||
|     m_state.needs_scheduling = true; | ||||
|  | ||||
|     if (CanSchedule(kernel)) { | ||||
|         ScheduleOnInterrupt(); | ||||
|     } | ||||
| } | ||||
|  | ||||
|     if (must_context_switch) { | ||||
|         auto core_scheduler = kernel.CurrentScheduler(); | ||||
|         kernel.ExitSVCProfile(); | ||||
|         core_scheduler->RescheduleCurrentCore(); | ||||
|         kernel.EnterSVCProfile(); | ||||
| void KScheduler::DisableScheduling(KernelCore& kernel) { | ||||
|     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | ||||
|     GetCurrentThread(kernel).DisableDispatch(); | ||||
| } | ||||
|  | ||||
| void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||||
|     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); | ||||
|  | ||||
|     auto* scheduler{kernel.CurrentScheduler()}; | ||||
|  | ||||
|     if (!scheduler || kernel.IsPhantomModeForSingleCore()) { | ||||
|         KScheduler::RescheduleCores(kernel, cores_needing_scheduling); | ||||
|         KScheduler::RescheduleCurrentHLEThread(kernel); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     scheduler->RescheduleOtherCores(cores_needing_scheduling); | ||||
|  | ||||
|     if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) { | ||||
|         GetCurrentThread(kernel).EnableDispatch(); | ||||
|     } else { | ||||
|         scheduler->RescheduleCurrentCore(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) { | ||||
|     // HACK: we cannot schedule from this thread, it is not a core thread | ||||
|     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||||
|  | ||||
|     // Special case to ensure dummy threads that are waiting block | ||||
|     GetCurrentThread(kernel).IfDummyThreadTryWait(); | ||||
|  | ||||
|     ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting); | ||||
|     GetCurrentThread(kernel).EnableDispatch(); | ||||
| } | ||||
|  | ||||
| u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||||
|     if (IsSchedulerUpdateNeeded(kernel)) { | ||||
|         return UpdateHighestPriorityThreadsImpl(kernel); | ||||
|     } else { | ||||
|         return 0; | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::Schedule() { | ||||
|     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||||
|     ASSERT(m_core_id == GetCurrentCoreId(kernel)); | ||||
|  | ||||
|     ScheduleImpl(); | ||||
| } | ||||
|  | ||||
| void KScheduler::ScheduleOnInterrupt() { | ||||
|     GetCurrentThread(kernel).DisableDispatch(); | ||||
|     Schedule(); | ||||
|     GetCurrentThread(kernel).EnableDispatch(); | ||||
| } | ||||
|  | ||||
| void KScheduler::PreemptSingleCore() { | ||||
|     GetCurrentThread(kernel).DisableDispatch(); | ||||
|  | ||||
|     auto* thread = GetCurrentThreadPointer(kernel); | ||||
|     auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore()); | ||||
|     previous_scheduler.Unload(thread); | ||||
|  | ||||
|     Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber); | ||||
|  | ||||
|     GetCurrentThread(kernel).EnableDispatch(); | ||||
| } | ||||
|  | ||||
| void KScheduler::RescheduleCurrentCore() { | ||||
|     ASSERT(!kernel.IsPhantomModeForSingleCore()); | ||||
|     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||||
|  | ||||
|     GetCurrentThread(kernel).EnableDispatch(); | ||||
|  | ||||
|     if (m_state.needs_scheduling.load()) { | ||||
|         // Disable interrupts, and then check again if rescheduling is needed. | ||||
|         // KScopedInterruptDisable intr_disable; | ||||
|  | ||||
|         kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::RescheduleCurrentCoreImpl() { | ||||
|     // Check that scheduling is needed. | ||||
|     if (m_state.needs_scheduling.load()) [[likely]] { | ||||
|         GetCurrentThread(kernel).DisableDispatch(); | ||||
|         Schedule(); | ||||
|         GetCurrentThread(kernel).EnableDispatch(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) { | ||||
|     // Set core ID/idle thread/interrupt task manager. | ||||
|     m_core_id = core_id; | ||||
|     m_idle_thread = idle_thread; | ||||
|     // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); | ||||
|     // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); | ||||
|  | ||||
|     // Insert the main thread into the priority queue. | ||||
|     // { | ||||
|     //     KScopedSchedulerLock lk{kernel}; | ||||
|     //     GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); | ||||
|     //     SetSchedulerUpdateNeeded(kernel); | ||||
|     // } | ||||
|  | ||||
|     // Bind interrupt handler. | ||||
|     // kernel.GetInterruptManager().BindHandler( | ||||
|     //     GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, | ||||
|     //     KInterruptController::PriorityLevel::Scheduler, false, false); | ||||
|  | ||||
|     // Set the current thread. | ||||
|     m_current_thread = main_thread; | ||||
| } | ||||
|  | ||||
| void KScheduler::Activate() { | ||||
|     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||||
|  | ||||
|     // m_state.should_count_idle = KTargetSystem::IsDebugMode(); | ||||
|     m_is_active = true; | ||||
|     RescheduleCurrentCore(); | ||||
| } | ||||
|  | ||||
| void KScheduler::OnThreadStart() { | ||||
|     GetCurrentThread(kernel).EnableDispatch(); | ||||
| } | ||||
|  | ||||
| u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { | ||||
|     KScopedSpinLock lk{guard}; | ||||
|     if (KThread* prev_highest_thread = state.highest_priority_thread; | ||||
|         prev_highest_thread != highest_thread) { | ||||
|         if (prev_highest_thread != nullptr) { | ||||
|     if (KThread* prev_highest_thread = m_state.highest_priority_thread; | ||||
|         prev_highest_thread != highest_thread) [[likely]] { | ||||
|         if (prev_highest_thread != nullptr) [[likely]] { | ||||
|             IncrementScheduledCount(prev_highest_thread); | ||||
|             prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); | ||||
|             prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); | ||||
|         } | ||||
|         if (state.should_count_idle) { | ||||
|             if (highest_thread != nullptr) { | ||||
|         if (m_state.should_count_idle) { | ||||
|             if (highest_thread != nullptr) [[likely]] { | ||||
|                 if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { | ||||
|                     process->SetRunningThread(core_id, highest_thread, state.idle_count); | ||||
|                     process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count); | ||||
|                 } | ||||
|             } else { | ||||
|                 state.idle_count++; | ||||
|                 m_state.idle_count++; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         state.highest_priority_thread = highest_thread; | ||||
|         state.needs_scheduling.store(true); | ||||
|         return (1ULL << core_id); | ||||
|         m_state.highest_priority_thread = highest_thread; | ||||
|         m_state.needs_scheduling = true; | ||||
|         return (1ULL << m_core_id); | ||||
|     } else { | ||||
|         return 0; | ||||
|     } | ||||
| } | ||||
|  | ||||
| u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | ||||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
|     ASSERT(IsSchedulerLockedByCurrentThread(kernel)); | ||||
|  | ||||
|     // Clear that we need to update. | ||||
|     ClearSchedulerUpdateNeeded(kernel); | ||||
| @@ -98,18 +214,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | ||||
|     KThread* top_threads[Core::Hardware::NUM_CPU_CORES]; | ||||
|     auto& priority_queue = GetPriorityQueue(kernel); | ||||
|  | ||||
|     /// We want to go over all cores, finding the highest priority thread and determining if | ||||
|     /// scheduling is needed for that core. | ||||
|     // We want to go over all cores, finding the highest priority thread and determining if | ||||
|     // scheduling is needed for that core. | ||||
|     for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|         KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); | ||||
|         if (top_thread != nullptr) { | ||||
|             // If the thread has no waiters, we need to check if the process has a thread pinned. | ||||
|             if (top_thread->GetNumKernelWaiters() == 0) { | ||||
|                 if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) { | ||||
|             // We need to check if the thread's process has a pinned thread. | ||||
|             if (KProcess* parent = top_thread->GetOwnerProcess()) { | ||||
|                 // Check that there's a pinned thread other than the current top thread. | ||||
|                 if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); | ||||
|                     pinned != nullptr && pinned != top_thread) { | ||||
|                         // We prefer our parent's pinned thread if possible. However, we also don't | ||||
|                         // want to schedule un-runnable threads. | ||||
|                     // We need to prefer threads with kernel waiters to the pinned thread. | ||||
|                     if (top_thread->GetNumKernelWaiters() == | ||||
|                         0 /* && top_thread != parent->GetExceptionThread() */) { | ||||
|                         // If the pinned thread is runnable, use it. | ||||
|                         if (pinned->GetRawState() == ThreadState::Runnable) { | ||||
|                             top_thread = pinned; | ||||
|                         } else { | ||||
| @@ -129,7 +247,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | ||||
|  | ||||
|     // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. | ||||
|     while (idle_cores != 0) { | ||||
|         const auto core_id = static_cast<u32>(std::countr_zero(idle_cores)); | ||||
|         const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores)); | ||||
|  | ||||
|         if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { | ||||
|             s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; | ||||
|             size_t num_candidates = 0; | ||||
| @@ -150,7 +269,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | ||||
|                     // The suggested thread isn't bound to its core, so we can migrate it! | ||||
|                     suggested->SetActiveCore(core_id); | ||||
|                     priority_queue.ChangeCore(suggested_core, suggested); | ||||
|  | ||||
|                     top_threads[core_id] = suggested; | ||||
|                     cores_needing_scheduling |= | ||||
|                         kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); | ||||
| @@ -183,7 +301,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | ||||
|                         // Perform the migration. | ||||
|                         suggested->SetActiveCore(core_id); | ||||
|                         priority_queue.ChangeCore(candidate_core, suggested); | ||||
|  | ||||
|                         top_threads[core_id] = suggested; | ||||
|                         cores_needing_scheduling |= | ||||
|                             kernel.Scheduler(core_id).UpdateHighestPriorityThread( | ||||
| @@ -200,24 +317,210 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | ||||
|     return cores_needing_scheduling; | ||||
| } | ||||
|  | ||||
| void KScheduler::SwitchThread(KThread* next_thread) { | ||||
|     KProcess* const cur_process = kernel.CurrentProcess(); | ||||
|     KThread* const cur_thread = GetCurrentThreadPointer(kernel); | ||||
|  | ||||
|     // We never want to schedule a null thread, so use the idle thread if we don't have a next. | ||||
|     if (next_thread == nullptr) { | ||||
|         next_thread = m_idle_thread; | ||||
|     } | ||||
|  | ||||
|     if (next_thread->GetCurrentCore() != m_core_id) { | ||||
|         next_thread->SetCurrentCore(m_core_id); | ||||
|     } | ||||
|  | ||||
|     // If we're not actually switching thread, there's nothing to do. | ||||
|     if (next_thread == cur_thread) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // Next thread is now known not to be nullptr, and must not be dispatchable. | ||||
|     ASSERT(next_thread->GetDisableDispatchCount() == 1); | ||||
|     ASSERT(!next_thread->IsDummyThread()); | ||||
|  | ||||
|     // Update the CPU time tracking variables. | ||||
|     const s64 prev_tick = m_last_context_switch_time; | ||||
|     const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); | ||||
|     const s64 tick_diff = cur_tick - prev_tick; | ||||
|     cur_thread->AddCpuTime(m_core_id, tick_diff); | ||||
|     if (cur_process != nullptr) { | ||||
|         cur_process->UpdateCPUTimeTicks(tick_diff); | ||||
|     } | ||||
|     m_last_context_switch_time = cur_tick; | ||||
|  | ||||
|     // Update our previous thread. | ||||
|     if (cur_process != nullptr) { | ||||
|         if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id) | ||||
|             [[likely]] { | ||||
|             m_state.prev_thread = cur_thread; | ||||
|         } else { | ||||
|             m_state.prev_thread = nullptr; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // Switch the current process, if we're switching processes. | ||||
|     // if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) { | ||||
|     //     KProcess::Switch(cur_process, next_process); | ||||
|     // } | ||||
|  | ||||
|     // Set the new thread. | ||||
|     SetCurrentThread(kernel, next_thread); | ||||
|     m_current_thread = next_thread; | ||||
|  | ||||
|     // Set the new Thread Local region. | ||||
|     // cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); | ||||
| } | ||||
|  | ||||
| void KScheduler::ScheduleImpl() { | ||||
|     // First, clear the needs scheduling bool. | ||||
|     m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | ||||
|  | ||||
|     // Load the appropriate thread pointers for scheduling. | ||||
|     KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; | ||||
|     KThread* highest_priority_thread{m_state.highest_priority_thread}; | ||||
|  | ||||
|     // Check whether there are runnable interrupt tasks. | ||||
|     if (m_state.interrupt_task_runnable) { | ||||
|         // The interrupt task is runnable. | ||||
|         // We want to switch to the interrupt task/idle thread. | ||||
|         highest_priority_thread = nullptr; | ||||
|     } | ||||
|  | ||||
|     // If there aren't, we want to check if the highest priority thread is the same as the current | ||||
|     // thread. | ||||
|     if (highest_priority_thread == cur_thread) { | ||||
|         // If they're the same, then we can just return. | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // The highest priority thread is not the same as the current thread. | ||||
|     // Jump to the switcher and continue executing from there. | ||||
|     m_switch_cur_thread = cur_thread; | ||||
|     m_switch_highest_priority_thread = highest_priority_thread; | ||||
|     m_switch_from_schedule = true; | ||||
|     Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); | ||||
|  | ||||
|     // Returning from ScheduleImpl occurs after this thread has been scheduled again. | ||||
| } | ||||
|  | ||||
| void KScheduler::ScheduleImplFiber() { | ||||
|     KThread* const cur_thread{m_switch_cur_thread}; | ||||
|     KThread* highest_priority_thread{m_switch_highest_priority_thread}; | ||||
|  | ||||
|     // If we're not coming from scheduling (i.e., we came from SC preemption), | ||||
|     // we should restart the scheduling loop directly. Not accurate to HOS. | ||||
|     if (!m_switch_from_schedule) { | ||||
|         goto retry; | ||||
|     } | ||||
|  | ||||
|     // Mark that we are not coming from scheduling anymore. | ||||
|     m_switch_from_schedule = false; | ||||
|  | ||||
|     // Save the original thread context. | ||||
|     Unload(cur_thread); | ||||
|  | ||||
|     // The current thread's context has been entirely taken care of. | ||||
|     // Now we want to loop until we successfully switch the thread context. | ||||
|     while (true) { | ||||
|         // We're starting to try to do the context switch. | ||||
|         // Check if the highest priority thread is null. | ||||
|         if (!highest_priority_thread) { | ||||
|             // The next thread is nullptr! | ||||
|  | ||||
|             // Switch to the idle thread. Note: HOS treats idling as a special case for | ||||
|             // performance. This is not *required* for yuzu's purposes, and for singlecore | ||||
|             // compatibility, we can just move the logic that would go here into the execution | ||||
|             // of the idle thread. If we ever remove singlecore, we should implement this | ||||
|             // accurately to HOS. | ||||
|             highest_priority_thread = m_idle_thread; | ||||
|         } | ||||
|  | ||||
|         // We want to try to lock the highest priority thread's context. | ||||
|         // Try to take it. | ||||
|         while (!highest_priority_thread->context_guard.try_lock()) { | ||||
|             // The highest priority thread's context is already locked. | ||||
|             // Check if we need scheduling. If we don't, we can retry directly. | ||||
|             if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||||
|                 // If we do, another core is interfering, and we must start again. | ||||
|                 goto retry; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         // It's time to switch the thread. | ||||
|         // Switch to the highest priority thread. | ||||
|         SwitchThread(highest_priority_thread); | ||||
|  | ||||
|         // Check if we need scheduling. If we do, then we can't complete the switch and should | ||||
|         // retry. | ||||
|         if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||||
|             // Our switch failed. | ||||
|             // We should unlock the thread context, and then retry. | ||||
|             highest_priority_thread->context_guard.unlock(); | ||||
|             goto retry; | ||||
|         } else { | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|     retry: | ||||
|  | ||||
|         // We failed to successfully do the context switch, and need to retry. | ||||
|         // Clear needs_scheduling. | ||||
|         m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | ||||
|  | ||||
|         // Refresh the highest priority thread. | ||||
|         highest_priority_thread = m_state.highest_priority_thread; | ||||
|     } | ||||
|  | ||||
|     // Reload the guest thread context. | ||||
|     Reload(highest_priority_thread); | ||||
|  | ||||
|     // Reload the host thread. | ||||
|     Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); | ||||
| } | ||||
|  | ||||
| void KScheduler::Unload(KThread* thread) { | ||||
|     auto& cpu_core = kernel.System().ArmInterface(m_core_id); | ||||
|     cpu_core.SaveContext(thread->GetContext32()); | ||||
|     cpu_core.SaveContext(thread->GetContext64()); | ||||
|     // Save the TPIDR_EL0 system register in case it was modified. | ||||
|     thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||||
|     cpu_core.ClearExclusiveState(); | ||||
|  | ||||
|     // Check if the thread is terminated by checking the DPC flags. | ||||
|     if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { | ||||
|         // The thread isn't terminated, so we want to unlock it. | ||||
|         thread->context_guard.unlock(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::Reload(KThread* thread) { | ||||
|     auto& cpu_core = kernel.System().ArmInterface(m_core_id); | ||||
|     cpu_core.LoadContext(thread->GetContext32()); | ||||
|     cpu_core.LoadContext(thread->GetContext64()); | ||||
|     cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||||
|     cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||||
|     cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | ||||
|     cpu_core.ClearExclusiveState(); | ||||
| } | ||||
|  | ||||
| void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { | ||||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
|     ASSERT(IsSchedulerLockedByCurrentThread(kernel)); | ||||
|     for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) { | ||||
|         // Get an atomic reference to the core scheduler's previous thread. | ||||
|         std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread); | ||||
|         static_assert(std::atomic_ref<KThread*>::is_always_lock_free); | ||||
|         auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread}; | ||||
|  | ||||
|         // Atomically clear the previous thread if it's our target. | ||||
|         KThread* compare = thread; | ||||
|         prev_thread.compare_exchange_strong(compare, nullptr); | ||||
|         prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { | ||||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
|     ASSERT(IsSchedulerLockedByCurrentThread(kernel)); | ||||
|  | ||||
|     // Check if the state has changed, because if it hasn't there's nothing to do. | ||||
|     const auto cur_state = thread->GetRawState(); | ||||
|     const ThreadState cur_state = thread->GetRawState(); | ||||
|     if (cur_state == old_state) { | ||||
|         return; | ||||
|     } | ||||
| @@ -237,12 +540,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa | ||||
| } | ||||
|  | ||||
| void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) { | ||||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
|     ASSERT(IsSchedulerLockedByCurrentThread(kernel)); | ||||
|  | ||||
|     // If the thread is runnable, we want to change its priority in the queue. | ||||
|     if (thread->GetRawState() == ThreadState::Runnable) { | ||||
|         GetPriorityQueue(kernel).ChangePriority(old_priority, | ||||
|                                                 thread == kernel.GetCurrentEmuThread(), thread); | ||||
|                                                 thread == GetCurrentThreadPointer(kernel), thread); | ||||
|         IncrementScheduledCount(thread); | ||||
|         SetSchedulerUpdateNeeded(kernel); | ||||
|     } | ||||
| @@ -250,7 +553,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3 | ||||
|  | ||||
| void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, | ||||
|                                              const KAffinityMask& old_affinity, s32 old_core) { | ||||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
|     ASSERT(IsSchedulerLockedByCurrentThread(kernel)); | ||||
|  | ||||
|     // If the thread is runnable, we want to change its affinity in the queue. | ||||
|     if (thread->GetRawState() == ThreadState::Runnable) { | ||||
| @@ -260,15 +563,14 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|     ASSERT(system.GlobalSchedulerContext().IsLocked()); | ||||
| void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) { | ||||
|     ASSERT(IsSchedulerLockedByCurrentThread(kernel)); | ||||
|  | ||||
|     // Get a reference to the priority queue. | ||||
|     auto& kernel = system.Kernel(); | ||||
|     auto& priority_queue = GetPriorityQueue(kernel); | ||||
|  | ||||
|     // Rotate the front of the queue to the end. | ||||
|     KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority); | ||||
|     KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority); | ||||
|     KThread* next_thread = nullptr; | ||||
|     if (top_thread != nullptr) { | ||||
|         next_thread = priority_queue.MoveToScheduledBack(top_thread); | ||||
| @@ -280,7 +582,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|  | ||||
|     // While we have a suggested thread, try to migrate it! | ||||
|     { | ||||
|         KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority); | ||||
|         KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority); | ||||
|         while (suggested != nullptr) { | ||||
|             // Check if the suggested thread is the top thread on its core. | ||||
|             const s32 suggested_core = suggested->GetActiveCore(); | ||||
| @@ -301,7 +603,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|                 // to the front of the queue. | ||||
|                 if (top_on_suggested_core == nullptr || | ||||
|                     top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { | ||||
|                     suggested->SetActiveCore(cpu_core_id); | ||||
|                     suggested->SetActiveCore(core_id); | ||||
|                     priority_queue.ChangeCore(suggested_core, suggested, true); | ||||
|                     IncrementScheduledCount(suggested); | ||||
|                     break; | ||||
| @@ -309,22 +611,21 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|             } | ||||
|  | ||||
|             // Get the next suggestion. | ||||
|             suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested); | ||||
|             suggested = priority_queue.GetSamePriorityNext(core_id, suggested); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // Now that we might have migrated a thread with the same priority, check if we can do better. | ||||
|  | ||||
|     { | ||||
|         KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id); | ||||
|         KThread* best_thread = priority_queue.GetScheduledFront(core_id); | ||||
|         if (best_thread == GetCurrentThreadPointer(kernel)) { | ||||
|             best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread); | ||||
|             best_thread = priority_queue.GetScheduledNext(core_id, best_thread); | ||||
|         } | ||||
|  | ||||
|         // If the best thread we can choose has a priority the same or worse than ours, try to | ||||
|         // migrate a higher priority thread. | ||||
|         if (best_thread != nullptr && best_thread->GetPriority() >= priority) { | ||||
|             KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id); | ||||
|             KThread* suggested = priority_queue.GetSuggestedFront(core_id); | ||||
|             while (suggested != nullptr) { | ||||
|                 // If the suggestion's priority is the same as ours, don't bother. | ||||
|                 if (suggested->GetPriority() >= best_thread->GetPriority()) { | ||||
| @@ -343,7 +644,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|                     if (top_on_suggested_core == nullptr || | ||||
|                         top_on_suggested_core->GetPriority() >= | ||||
|                             HighestCoreMigrationAllowedPriority) { | ||||
|                         suggested->SetActiveCore(cpu_core_id); | ||||
|                         suggested->SetActiveCore(core_id); | ||||
|                         priority_queue.ChangeCore(suggested_core, suggested, true); | ||||
|                         IncrementScheduledCount(suggested); | ||||
|                         break; | ||||
| @@ -351,7 +652,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|                 } | ||||
|  | ||||
|                 // Get the next suggestion. | ||||
|                 suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested); | ||||
|                 suggested = priority_queue.GetSuggestedNext(core_id, suggested); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| @@ -360,64 +661,6 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | ||||
|     SetSchedulerUpdateNeeded(kernel); | ||||
| } | ||||
|  | ||||
| bool KScheduler::CanSchedule(KernelCore& kernel) { | ||||
|     return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1; | ||||
| } | ||||
|  | ||||
| bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { | ||||
|     return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); | ||||
| } | ||||
|  | ||||
| void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
|     kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); | ||||
| } | ||||
|  | ||||
| void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
|     kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); | ||||
| } | ||||
|  | ||||
| void KScheduler::DisableScheduling(KernelCore& kernel) { | ||||
|     // If we are shutting down the kernel, none of this is relevant anymore. | ||||
|     if (kernel.IsShuttingDown()) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | ||||
|     GetCurrentThreadPointer(kernel)->DisableDispatch(); | ||||
| } | ||||
|  | ||||
| void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||||
|     // If we are shutting down the kernel, none of this is relevant anymore. | ||||
|     if (kernel.IsShuttingDown()) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     auto* current_thread = GetCurrentThreadPointer(kernel); | ||||
|  | ||||
|     ASSERT(current_thread->GetDisableDispatchCount() >= 1); | ||||
|  | ||||
|     if (current_thread->GetDisableDispatchCount() > 1) { | ||||
|         current_thread->EnableDispatch(); | ||||
|     } else { | ||||
|         RescheduleCores(kernel, cores_needing_scheduling); | ||||
|     } | ||||
|  | ||||
|     // Special case to ensure dummy threads that are waiting block. | ||||
|     current_thread->IfDummyThreadTryWait(); | ||||
| } | ||||
|  | ||||
| u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||||
|     if (IsSchedulerUpdateNeeded(kernel)) { | ||||
|         return UpdateHighestPriorityThreadsImpl(kernel); | ||||
|     } else { | ||||
|         return 0; | ||||
|     } | ||||
| } | ||||
|  | ||||
| KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { | ||||
|     return kernel.GlobalSchedulerContext().priority_queue; | ||||
| } | ||||
|  | ||||
| void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { | ||||
|     // Validate preconditions. | ||||
|     ASSERT(CanSchedule(kernel)); | ||||
| @@ -437,7 +680,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { | ||||
|  | ||||
|     // Perform the yield. | ||||
|     { | ||||
|         KScopedSchedulerLock lock(kernel); | ||||
|         KScopedSchedulerLock sl{kernel}; | ||||
|  | ||||
|         const auto cur_state = cur_thread.GetRawState(); | ||||
|         if (cur_state == ThreadState::Runnable) { | ||||
| @@ -476,7 +719,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { | ||||
|  | ||||
|     // Perform the yield. | ||||
|     { | ||||
|         KScopedSchedulerLock lock(kernel); | ||||
|         KScopedSchedulerLock sl{kernel}; | ||||
|  | ||||
|         const auto cur_state = cur_thread.GetRawState(); | ||||
|         if (cur_state == ThreadState::Runnable) { | ||||
| @@ -496,7 +739,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { | ||||
|  | ||||
|                 if (KThread* running_on_suggested_core = | ||||
|                         (suggested_core >= 0) | ||||
|                             ? kernel.Scheduler(suggested_core).state.highest_priority_thread | ||||
|                             ? kernel.Scheduler(suggested_core).m_state.highest_priority_thread | ||||
|                             : nullptr; | ||||
|                     running_on_suggested_core != suggested) { | ||||
|                     // If the current thread's priority is higher than our suggestion's we prefer | ||||
| @@ -564,7 +807,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | ||||
|  | ||||
|     // Perform the yield. | ||||
|     { | ||||
|         KScopedSchedulerLock lock(kernel); | ||||
|         KScopedSchedulerLock sl{kernel}; | ||||
|  | ||||
|         const auto cur_state = cur_thread.GetRawState(); | ||||
|         if (cur_state == ThreadState::Runnable) { | ||||
| @@ -621,223 +864,19 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | ||||
|     } | ||||
| } | ||||
|  | ||||
| KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} { | ||||
|     switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); }); | ||||
|     state.needs_scheduling.store(true); | ||||
|     state.interrupt_task_thread_runnable = false; | ||||
|     state.should_count_idle = false; | ||||
|     state.idle_count = 0; | ||||
|     state.idle_thread_stack = nullptr; | ||||
|     state.highest_priority_thread = nullptr; | ||||
| } | ||||
|  | ||||
| void KScheduler::Finalize() { | ||||
|     if (idle_thread) { | ||||
|         idle_thread->Close(); | ||||
|         idle_thread = nullptr; | ||||
| void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { | ||||
|     if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { | ||||
|         RescheduleCores(kernel, core_mask); | ||||
|     } | ||||
| } | ||||
|  | ||||
| KScheduler::~KScheduler() { | ||||
|     ASSERT(!idle_thread); | ||||
| } | ||||
|  | ||||
| KThread* KScheduler::GetSchedulerCurrentThread() const { | ||||
|     if (auto result = current_thread.load(); result) { | ||||
|         return result; | ||||
|     } | ||||
|     return idle_thread; | ||||
| } | ||||
|  | ||||
| u64 KScheduler::GetLastContextSwitchTicks() const { | ||||
|     return last_context_switch_time; | ||||
| } | ||||
|  | ||||
| void KScheduler::RescheduleCurrentCore() { | ||||
|     ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1); | ||||
|  | ||||
|     auto& phys_core = system.Kernel().PhysicalCore(core_id); | ||||
|     if (phys_core.IsInterrupted()) { | ||||
|         phys_core.ClearInterrupt(); | ||||
|     } | ||||
|  | ||||
|     guard.Lock(); | ||||
|     if (state.needs_scheduling.load()) { | ||||
|         Schedule(); | ||||
|     } else { | ||||
|         GetCurrentThread(system.Kernel()).EnableDispatch(); | ||||
|         guard.Unlock(); | ||||
| void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) { | ||||
|     // Send IPI | ||||
|     for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||||
|         if (core_mask & (1ULL << i)) { | ||||
|             kernel.PhysicalCore(i).Interrupt(); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| void KScheduler::OnThreadStart() { | ||||
|     SwitchContextStep2(); | ||||
| } | ||||
|  | ||||
| void KScheduler::Unload(KThread* thread) { | ||||
|     ASSERT(thread); | ||||
|  | ||||
|     LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | ||||
|  | ||||
|     if (thread->IsCallingSvc()) { | ||||
|         thread->ClearIsCallingSvc(); | ||||
|     } | ||||
|  | ||||
|     auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||||
|     if (!physical_core.IsInitialized()) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||||
|     cpu_core.SaveContext(thread->GetContext32()); | ||||
|     cpu_core.SaveContext(thread->GetContext64()); | ||||
|     // Save the TPIDR_EL0 system register in case it was modified. | ||||
|     thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||||
|     cpu_core.ClearExclusiveState(); | ||||
|  | ||||
|     if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||||
|         prev_thread = thread; | ||||
|     } else { | ||||
|         prev_thread = nullptr; | ||||
|     } | ||||
|  | ||||
|     thread->context_guard.unlock(); | ||||
| } | ||||
|  | ||||
| void KScheduler::Reload(KThread* thread) { | ||||
|     LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName()); | ||||
|  | ||||
|     Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||||
|     cpu_core.LoadContext(thread->GetContext32()); | ||||
|     cpu_core.LoadContext(thread->GetContext64()); | ||||
|     cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | ||||
|     cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||||
|     cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||||
|     cpu_core.ClearExclusiveState(); | ||||
| } | ||||
|  | ||||
| void KScheduler::SwitchContextStep2() { | ||||
|     // Load context of new thread | ||||
|     Reload(GetCurrentThreadPointer(system.Kernel())); | ||||
|  | ||||
|     RescheduleCurrentCore(); | ||||
| } | ||||
|  | ||||
| void KScheduler::Schedule() { | ||||
|     ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1); | ||||
|     this->ScheduleImpl(); | ||||
| } | ||||
|  | ||||
| void KScheduler::ScheduleImpl() { | ||||
|     KThread* previous_thread = GetCurrentThreadPointer(system.Kernel()); | ||||
|     KThread* next_thread = state.highest_priority_thread; | ||||
|  | ||||
|     state.needs_scheduling.store(false); | ||||
|  | ||||
|     // We never want to schedule a null thread, so use the idle thread if we don't have a next. | ||||
|     if (next_thread == nullptr) { | ||||
|         next_thread = idle_thread; | ||||
|     } | ||||
|  | ||||
|     if (next_thread->GetCurrentCore() != core_id) { | ||||
|         next_thread->SetCurrentCore(core_id); | ||||
|     } | ||||
|  | ||||
|     // We never want to schedule a dummy thread, as these are only used by host threads for locking. | ||||
|     if (next_thread->GetThreadType() == ThreadType::Dummy) { | ||||
|         ASSERT_MSG(false, "Dummy threads should never be scheduled!"); | ||||
|         next_thread = idle_thread; | ||||
|     } | ||||
|  | ||||
|     // If we're not actually switching thread, there's nothing to do. | ||||
|     if (next_thread == current_thread.load()) { | ||||
|         previous_thread->EnableDispatch(); | ||||
|         guard.Unlock(); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // Update the CPU time tracking variables. | ||||
|     KProcess* const previous_process = system.Kernel().CurrentProcess(); | ||||
|     UpdateLastContextSwitchTime(previous_thread, previous_process); | ||||
|  | ||||
|     // Save context for previous thread | ||||
|     Unload(previous_thread); | ||||
|  | ||||
|     std::shared_ptr<Common::Fiber>* old_context; | ||||
|     old_context = &previous_thread->GetHostContext(); | ||||
|  | ||||
|     // Set the new thread. | ||||
|     SetCurrentThread(system.Kernel(), next_thread); | ||||
|     current_thread.store(next_thread); | ||||
|  | ||||
|     guard.Unlock(); | ||||
|  | ||||
|     Common::Fiber::YieldTo(*old_context, *switch_fiber); | ||||
|     /// When a thread wakes up, the scheduler may have changed to other in another core. | ||||
|     auto& next_scheduler = *system.Kernel().CurrentScheduler(); | ||||
|     next_scheduler.SwitchContextStep2(); | ||||
| } | ||||
|  | ||||
| void KScheduler::SwitchToCurrent() { | ||||
|     while (true) { | ||||
|         { | ||||
|             KScopedSpinLock lk{guard}; | ||||
|             current_thread.store(state.highest_priority_thread); | ||||
|             state.needs_scheduling.store(false); | ||||
|         } | ||||
|         const auto is_switch_pending = [this] { | ||||
|             KScopedSpinLock lk{guard}; | ||||
|             return state.needs_scheduling.load(); | ||||
|         }; | ||||
|         do { | ||||
|             auto next_thread = current_thread.load(); | ||||
|             if (next_thread != nullptr) { | ||||
|                 const auto locked = next_thread->context_guard.try_lock(); | ||||
|                 if (state.needs_scheduling.load()) { | ||||
|                     next_thread->context_guard.unlock(); | ||||
|                     break; | ||||
|                 } | ||||
|                 if (next_thread->GetActiveCore() != core_id) { | ||||
|                     next_thread->context_guard.unlock(); | ||||
|                     break; | ||||
|                 } | ||||
|                 if (!locked) { | ||||
|                     continue; | ||||
|                 } | ||||
|             } | ||||
|             auto thread = next_thread ? next_thread : idle_thread; | ||||
|             SetCurrentThread(system.Kernel(), thread); | ||||
|             Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); | ||||
|         } while (!is_switch_pending()); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) { | ||||
|     const u64 prev_switch_ticks = last_context_switch_time; | ||||
|     const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); | ||||
|     const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | ||||
|  | ||||
|     if (thread != nullptr) { | ||||
|         thread->AddCpuTime(core_id, update_ticks); | ||||
|     } | ||||
|  | ||||
|     if (process != nullptr) { | ||||
|         process->UpdateCPUTimeTicks(update_ticks); | ||||
|     } | ||||
|  | ||||
|     last_context_switch_time = most_recent_switch_ticks; | ||||
| } | ||||
|  | ||||
| void KScheduler::Initialize() { | ||||
|     idle_thread = KThread::Create(system.Kernel()); | ||||
|     ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess()); | ||||
|     idle_thread->SetName(fmt::format("IdleThread:{}", core_id)); | ||||
|     idle_thread->EnableDispatch(); | ||||
| } | ||||
|  | ||||
| KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) | ||||
|     : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {} | ||||
|  | ||||
| KScopedSchedulerLock::~KScopedSchedulerLock() = default; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -11,6 +11,7 @@ | ||||
| #include "core/hle/kernel/k_scheduler_lock.h" | ||||
| #include "core/hle/kernel/k_scoped_lock.h" | ||||
| #include "core/hle/kernel/k_spin_lock.h" | ||||
| #include "core/hle/kernel/k_thread.h" | ||||
|  | ||||
| namespace Common { | ||||
| class Fiber; | ||||
| @@ -23,184 +24,150 @@ class System; | ||||
| namespace Kernel { | ||||
|  | ||||
| class KernelCore; | ||||
| class KInterruptTaskManager; | ||||
| class KProcess; | ||||
| class SchedulerLock; | ||||
| class KThread; | ||||
| class KScopedDisableDispatch; | ||||
| class KScopedSchedulerLock; | ||||
| class KScopedSchedulerLockAndSleep; | ||||
|  | ||||
| class KScheduler final { | ||||
| public: | ||||
|     explicit KScheduler(Core::System& system_, s32 core_id_); | ||||
|     YUZU_NON_COPYABLE(KScheduler); | ||||
|     YUZU_NON_MOVEABLE(KScheduler); | ||||
|  | ||||
|     using LockType = KAbstractSchedulerLock<KScheduler>; | ||||
|  | ||||
|     explicit KScheduler(KernelCore& kernel); | ||||
|     ~KScheduler(); | ||||
|  | ||||
|     void Finalize(); | ||||
|  | ||||
|     /// Reschedules to the next available thread (call after current thread is suspended) | ||||
|     void RescheduleCurrentCore(); | ||||
|  | ||||
|     /// Reschedules cores pending reschedule, to be called on EnableScheduling. | ||||
|     static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule); | ||||
|  | ||||
|     /// The next two are for SingleCore Only. | ||||
|     /// Unload current thread before preempting core. | ||||
|     void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id); | ||||
|     void Activate(); | ||||
|     void OnThreadStart(); | ||||
|     void Unload(KThread* thread); | ||||
|  | ||||
|     /// Reload current thread after core preemption. | ||||
|     void Reload(KThread* thread); | ||||
|  | ||||
|     /// Gets the current running thread | ||||
|     [[nodiscard]] KThread* GetSchedulerCurrentThread() const; | ||||
|     void SetInterruptTaskRunnable(); | ||||
|     void RequestScheduleOnInterrupt(); | ||||
|     void PreemptSingleCore(); | ||||
|  | ||||
|     /// Gets the idle thread | ||||
|     [[nodiscard]] KThread* GetIdleThread() const { | ||||
|         return idle_thread; | ||||
|     u64 GetIdleCount() { | ||||
|         return m_state.idle_count; | ||||
|     } | ||||
|  | ||||
|     /// Returns true if the scheduler is idle | ||||
|     [[nodiscard]] bool IsIdle() const { | ||||
|         return GetSchedulerCurrentThread() == idle_thread; | ||||
|     KThread* GetIdleThread() const { | ||||
|         return m_idle_thread; | ||||
|     } | ||||
|  | ||||
|     /// Gets the timestamp for the last context switch in ticks. | ||||
|     [[nodiscard]] u64 GetLastContextSwitchTicks() const; | ||||
|  | ||||
|     [[nodiscard]] bool ContextSwitchPending() const { | ||||
|         return state.needs_scheduling.load(std::memory_order_relaxed); | ||||
|     bool IsIdle() const { | ||||
|         return m_current_thread.load() == m_idle_thread; | ||||
|     } | ||||
|  | ||||
|     void Initialize(); | ||||
|  | ||||
|     void OnThreadStart(); | ||||
|  | ||||
|     [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() { | ||||
|         return switch_fiber; | ||||
|     KThread* GetPreviousThread() const { | ||||
|         return m_state.prev_thread; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const { | ||||
|         return switch_fiber; | ||||
|     KThread* GetSchedulerCurrentThread() const { | ||||
|         return m_current_thread.load(); | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread); | ||||
|     s64 GetLastContextSwitchTime() const { | ||||
|         return m_last_context_switch_time; | ||||
|     } | ||||
|  | ||||
|     /** | ||||
|      * Takes a thread and moves it to the back of the it's priority list. | ||||
|      * | ||||
|      * @note This operation can be redundant and no scheduling is changed if marked as so. | ||||
|      */ | ||||
|     static void YieldWithoutCoreMigration(KernelCore& kernel); | ||||
|     // Static public API. | ||||
|     static bool CanSchedule(KernelCore& kernel) { | ||||
|         return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; | ||||
|     } | ||||
|     static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { | ||||
|         return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); | ||||
|     } | ||||
|  | ||||
|     /** | ||||
|      * Takes a thread and moves it to the back of the it's priority list. | ||||
|      * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or | ||||
|      * a better priority than the next thread in the core. | ||||
|      * | ||||
|      * @note This operation can be redundant and no scheduling is changed if marked as so. | ||||
|      */ | ||||
|     static void YieldWithCoreMigration(KernelCore& kernel); | ||||
|     static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
|         return kernel.GlobalSchedulerContext().scheduler_update_needed; | ||||
|     } | ||||
|     static void SetSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
|         kernel.GlobalSchedulerContext().scheduler_update_needed = true; | ||||
|     } | ||||
|     static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||||
|         kernel.GlobalSchedulerContext().scheduler_update_needed = false; | ||||
|     } | ||||
|  | ||||
|     /** | ||||
|      * Takes a thread and moves it out of the scheduling queue. | ||||
|      * and into the suggested queue. If no thread can be scheduled afterwards in that core, | ||||
|      * a suggested thread is obtained instead. | ||||
|      * | ||||
|      * @note This operation can be redundant and no scheduling is changed if marked as so. | ||||
|      */ | ||||
|     static void YieldToAnyThread(KernelCore& kernel); | ||||
|     static void DisableScheduling(KernelCore& kernel); | ||||
|     static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling); | ||||
|  | ||||
|     static u64 UpdateHighestPriorityThreads(KernelCore& kernel); | ||||
|  | ||||
|     static void ClearPreviousThread(KernelCore& kernel, KThread* thread); | ||||
|  | ||||
|     /// Notify the scheduler a thread's status has changed. | ||||
|     static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state); | ||||
|  | ||||
|     /// Notify the scheduler a thread's priority has changed. | ||||
|     static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority); | ||||
|  | ||||
|     /// Notify the scheduler a thread's core and/or affinity mask has changed. | ||||
|     static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, | ||||
|                                             const KAffinityMask& old_affinity, s32 old_core); | ||||
|  | ||||
|     static bool CanSchedule(KernelCore& kernel); | ||||
|     static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); | ||||
|     static void SetSchedulerUpdateNeeded(KernelCore& kernel); | ||||
|     static void ClearSchedulerUpdateNeeded(KernelCore& kernel); | ||||
|     static void DisableScheduling(KernelCore& kernel); | ||||
|     static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling); | ||||
|     [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); | ||||
|     static void RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority); | ||||
|     static void RescheduleCores(KernelCore& kernel, u64 cores_needing_scheduling); | ||||
|  | ||||
|     static void YieldWithoutCoreMigration(KernelCore& kernel); | ||||
|     static void YieldWithCoreMigration(KernelCore& kernel); | ||||
|     static void YieldToAnyThread(KernelCore& kernel); | ||||
|  | ||||
| private: | ||||
|     friend class GlobalSchedulerContext; | ||||
|     // Static private API. | ||||
|     static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { | ||||
|         return kernel.GlobalSchedulerContext().priority_queue; | ||||
|     } | ||||
|     static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); | ||||
|  | ||||
|     /** | ||||
|      * Takes care of selecting the new scheduled threads in three steps: | ||||
|      * | ||||
|      * 1. First a thread is selected from the top of the priority queue. If no thread | ||||
|      *    is obtained then we move to step two, else we are done. | ||||
|      * | ||||
|      * 2. Second we try to get a suggested thread that's not assigned to any core or | ||||
|      *    that is not the top thread in that core. | ||||
|      * | ||||
|      * 3. Third is no suggested thread is found, we do a second pass and pick a running | ||||
|      *    thread in another core and swap it with its current thread. | ||||
|      * | ||||
|      * returns the cores needing scheduling. | ||||
|      */ | ||||
|     [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); | ||||
|     static void RescheduleCurrentHLEThread(KernelCore& kernel); | ||||
|  | ||||
|     [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); | ||||
|  | ||||
|     void RotateScheduledQueue(s32 cpu_core_id, s32 priority); | ||||
|     // Instanced private API. | ||||
|     void ScheduleImpl(); | ||||
|     void ScheduleImplFiber(); | ||||
|     void SwitchThread(KThread* next_thread); | ||||
|  | ||||
|     void Schedule(); | ||||
|     void ScheduleOnInterrupt(); | ||||
|  | ||||
|     /// Switches the CPU's active thread context to that of the specified thread | ||||
|     void ScheduleImpl(); | ||||
|     void RescheduleOtherCores(u64 cores_needing_scheduling); | ||||
|     void RescheduleCurrentCore(); | ||||
|     void RescheduleCurrentCoreImpl(); | ||||
|  | ||||
|     /// When a thread wakes up, it must run this through it's new scheduler | ||||
|     void SwitchContextStep2(); | ||||
|     u64 UpdateHighestPriorityThread(KThread* thread); | ||||
|  | ||||
|     /** | ||||
|      * Called on every context switch to update the internal timestamp | ||||
|      * This also updates the running time ticks for the given thread and | ||||
|      * process using the following difference: | ||||
|      * | ||||
|      * ticks += most_recent_ticks - last_context_switch_ticks | ||||
|      * | ||||
|      * The internal tick timestamp for the scheduler is simply the | ||||
|      * most recent tick count retrieved. No special arithmetic is | ||||
|      * applied to it. | ||||
|      */ | ||||
|     void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); | ||||
|  | ||||
|     void SwitchToCurrent(); | ||||
|  | ||||
|     KThread* prev_thread{}; | ||||
|     std::atomic<KThread*> current_thread{}; | ||||
|  | ||||
|     KThread* idle_thread{}; | ||||
|  | ||||
|     std::shared_ptr<Common::Fiber> switch_fiber{}; | ||||
| private: | ||||
|     friend class KScopedDisableDispatch; | ||||
|  | ||||
|     struct SchedulingState { | ||||
|         std::atomic<bool> needs_scheduling{}; | ||||
|         bool interrupt_task_thread_runnable{}; | ||||
|         bool should_count_idle{}; | ||||
|         u64 idle_count{}; | ||||
|         KThread* highest_priority_thread{}; | ||||
|         void* idle_thread_stack{}; | ||||
|         std::atomic<bool> needs_scheduling{false}; | ||||
|         bool interrupt_task_runnable{false}; | ||||
|         bool should_count_idle{false}; | ||||
|         u64 idle_count{0}; | ||||
|         KThread* highest_priority_thread{nullptr}; | ||||
|         void* idle_thread_stack{nullptr}; | ||||
|         std::atomic<KThread*> prev_thread{nullptr}; | ||||
|         KInterruptTaskManager* interrupt_task_manager{nullptr}; | ||||
|     }; | ||||
|  | ||||
|     SchedulingState state; | ||||
|     KernelCore& kernel; | ||||
|     SchedulingState m_state; | ||||
|     bool m_is_active{false}; | ||||
|     s32 m_core_id{0}; | ||||
|     s64 m_last_context_switch_time{0}; | ||||
|     KThread* m_idle_thread{nullptr}; | ||||
|     std::atomic<KThread*> m_current_thread{nullptr}; | ||||
|  | ||||
|     Core::System& system; | ||||
|     u64 last_context_switch_time{}; | ||||
|     const s32 core_id; | ||||
|  | ||||
|     KSpinLock guard{}; | ||||
|     std::shared_ptr<Common::Fiber> m_switch_fiber{}; | ||||
|     KThread* m_switch_cur_thread{}; | ||||
|     KThread* m_switch_highest_priority_thread{}; | ||||
|     bool m_switch_from_schedule{}; | ||||
| }; | ||||
|  | ||||
| class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> { | ||||
| class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { | ||||
| public: | ||||
|     explicit KScopedSchedulerLock(KernelCore& kernel); | ||||
|     ~KScopedSchedulerLock(); | ||||
|     explicit KScopedSchedulerLock(KernelCore& kernel) | ||||
|         : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} | ||||
|     ~KScopedSchedulerLock() = default; | ||||
| }; | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -5,9 +5,11 @@ | ||||
|  | ||||
| #include <atomic> | ||||
| #include "common/assert.h" | ||||
| #include "core/hle/kernel/k_interrupt_manager.h" | ||||
| #include "core/hle/kernel/k_spin_lock.h" | ||||
| #include "core/hle/kernel/k_thread.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/kernel/physical_core.h" | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
|   | ||||
| @@ -258,7 +258,18 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ | ||||
| } | ||||
|  | ||||
| Result KThread::InitializeDummyThread(KThread* thread) { | ||||
|     return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy); | ||||
|     // Initialize the thread. | ||||
|     R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy)); | ||||
|  | ||||
|     // Initialize emulation parameters. | ||||
|     thread->stack_parameters.disable_count = 0; | ||||
|  | ||||
|     return ResultSuccess; | ||||
| } | ||||
|  | ||||
| Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { | ||||
|     return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | ||||
|                             system.GetCpuManager().GetGuestActivateFunc()); | ||||
| } | ||||
|  | ||||
| Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { | ||||
| @@ -277,7 +288,7 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr | ||||
|                                      KProcess* owner) { | ||||
|     system.Kernel().GlobalSchedulerContext().AddThread(thread); | ||||
|     return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, | ||||
|                             ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc()); | ||||
|                             ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); | ||||
| } | ||||
|  | ||||
| void KThread::PostDestroy(uintptr_t arg) { | ||||
| @@ -1058,6 +1069,8 @@ void KThread::Exit() { | ||||
|         // Register the thread as a work task. | ||||
|         KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); | ||||
|     } | ||||
|  | ||||
|     UNREACHABLE_MSG("KThread::Exit() would return"); | ||||
| } | ||||
|  | ||||
| Result KThread::Sleep(s64 timeout) { | ||||
| @@ -1093,6 +1106,8 @@ void KThread::IfDummyThreadTryWait() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     ASSERT(!kernel.IsPhantomModeForSingleCore()); | ||||
|  | ||||
|     // Block until we are no longer waiting. | ||||
|     std::unique_lock lk(dummy_wait_lock); | ||||
|     dummy_wait_cv.wait( | ||||
| @@ -1197,16 +1212,13 @@ KScopedDisableDispatch::~KScopedDisableDispatch() { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     // Skip the reschedule if single-core, as dispatch tracking is disabled here. | ||||
|     if (!Settings::values.use_multi_core.GetValue()) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||||
|         auto scheduler = kernel.CurrentScheduler(); | ||||
|         auto* scheduler = kernel.CurrentScheduler(); | ||||
|  | ||||
|         if (scheduler) { | ||||
|         if (scheduler && !kernel.IsPhantomModeForSingleCore()) { | ||||
|             scheduler->RescheduleCurrentCore(); | ||||
|         } else { | ||||
|             KScheduler::RescheduleCurrentHLEThread(kernel); | ||||
|         } | ||||
|     } else { | ||||
|         GetCurrentThread(kernel).EnableDispatch(); | ||||
|   | ||||
| @@ -413,6 +413,9 @@ public: | ||||
|  | ||||
|     [[nodiscard]] static Result InitializeDummyThread(KThread* thread); | ||||
|  | ||||
|     [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, | ||||
|                                                      s32 virt_core); | ||||
|  | ||||
|     [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, | ||||
|                                                      s32 virt_core); | ||||
|  | ||||
| @@ -480,39 +483,16 @@ public: | ||||
|         return per_core_priority_queue_entry[core]; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] bool IsKernelThread() const { | ||||
|         return GetActiveCore() == 3; | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||||
|         return is_single_core || IsKernelThread(); | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] s32 GetDisableDispatchCount() const { | ||||
|         if (IsDispatchTrackingDisabled()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return 1; | ||||
|         } | ||||
|  | ||||
|         return this->GetStackParameters().disable_count; | ||||
|     } | ||||
|  | ||||
|     void DisableDispatch() { | ||||
|         if (IsDispatchTrackingDisabled()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | ||||
|         this->GetStackParameters().disable_count++; | ||||
|     } | ||||
|  | ||||
|     void EnableDispatch() { | ||||
|         if (IsDispatchTrackingDisabled()) { | ||||
|             // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | ||||
|         this->GetStackParameters().disable_count--; | ||||
|     } | ||||
|   | ||||
| @@ -64,8 +64,6 @@ struct KernelCore::Impl { | ||||
|  | ||||
|         is_phantom_mode_for_singlecore = false; | ||||
|  | ||||
|         InitializePhysicalCores(); | ||||
|  | ||||
|         // Derive the initial memory layout from the emulated board | ||||
|         Init::InitializeSlabResourceCounts(kernel); | ||||
|         DeriveInitialMemoryLayout(); | ||||
| @@ -75,9 +73,9 @@ struct KernelCore::Impl { | ||||
|         InitializeSystemResourceLimit(kernel, system.CoreTiming()); | ||||
|         InitializeMemoryLayout(); | ||||
|         Init::InitializeKPageBufferSlabHeap(system); | ||||
|         InitializeSchedulers(); | ||||
|         InitializeShutdownThreads(); | ||||
|         InitializePreemption(kernel); | ||||
|         InitializePhysicalCores(); | ||||
|  | ||||
|         RegisterHostThread(); | ||||
|     } | ||||
| @@ -136,7 +134,6 @@ struct KernelCore::Impl { | ||||
|                 shutdown_threads[core_id] = nullptr; | ||||
|             } | ||||
|  | ||||
|             schedulers[core_id]->Finalize(); | ||||
|             schedulers[core_id].reset(); | ||||
|         } | ||||
|  | ||||
| @@ -199,14 +196,21 @@ struct KernelCore::Impl { | ||||
|         exclusive_monitor = | ||||
|             Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | ||||
|         for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||||
|             schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i); | ||||
|             cores.emplace_back(i, system, *schedulers[i], interrupts); | ||||
|         } | ||||
|     } | ||||
|             const s32 core{static_cast<s32>(i)}; | ||||
|  | ||||
|     void InitializeSchedulers() { | ||||
|         for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||||
|             cores[i].Scheduler().Initialize(); | ||||
|             schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); | ||||
|             cores.emplace_back(i, system, *schedulers[i], interrupts); | ||||
|  | ||||
|             auto* main_thread{Kernel::KThread::Create(system.Kernel())}; | ||||
|             main_thread->SetName(fmt::format("MainThread:{}", core)); | ||||
|             main_thread->SetCurrentCore(core); | ||||
|             ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); | ||||
|  | ||||
|             auto* idle_thread{Kernel::KThread::Create(system.Kernel())}; | ||||
|             idle_thread->SetCurrentCore(core); | ||||
|             ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess()); | ||||
|  | ||||
|             schedulers[i]->Initialize(main_thread, idle_thread, core); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -1109,10 +1113,11 @@ void KernelCore::Suspend(bool suspended) { | ||||
| } | ||||
|  | ||||
| void KernelCore::ShutdownCores() { | ||||
|     KScopedSchedulerLock lk{*this}; | ||||
|  | ||||
|     for (auto* thread : impl->shutdown_threads) { | ||||
|         void(thread->Run()); | ||||
|     } | ||||
|     InterruptAllPhysicalCores(); | ||||
| } | ||||
|  | ||||
| bool KernelCore::IsMulticore() const { | ||||
|   | ||||
| @@ -43,6 +43,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { | ||||
|  | ||||
| void PhysicalCore::Run() { | ||||
|     arm_interface->Run(); | ||||
|     arm_interface->ClearExclusiveState(); | ||||
| } | ||||
|  | ||||
| void PhysicalCore::Idle() { | ||||
|   | ||||
| @@ -887,7 +887,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han | ||||
|         const auto* const current_thread = GetCurrentThreadPointer(system.Kernel()); | ||||
|         const bool same_thread = current_thread == thread.GetPointerUnsafe(); | ||||
|  | ||||
|         const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); | ||||
|         const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime(); | ||||
|         u64 out_ticks = 0; | ||||
|         if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { | ||||
|             const u64 thread_ticks = current_thread->GetCpuTime(); | ||||
| @@ -3026,11 +3026,6 @@ void Call(Core::System& system, u32 immediate) { | ||||
|     } | ||||
|  | ||||
|     kernel.ExitSVCProfile(); | ||||
|  | ||||
|     if (!thread->IsCallingSvc()) { | ||||
|         auto* host_context = thread->GetHostContext().get(); | ||||
|         host_context->Rewind(); | ||||
|     } | ||||
| } | ||||
|  | ||||
| } // namespace Kernel::Svc | ||||
|   | ||||
		Reference in New Issue
	
	Block a user