Merge pull request #9666 from liamwhite/wait-for-me
kernel: fix incorrect locking order in suspension
This commit is contained in:
		| @@ -171,7 +171,7 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) | |||||||
|         R_UNLESS(owner_thread != nullptr, ResultInvalidHandle); |         R_UNLESS(owner_thread != nullptr, ResultInvalidHandle); | ||||||
|  |  | ||||||
|         // Update the lock. |         // Update the lock. | ||||||
|         cur_thread->SetAddressKey(addr, value); |         cur_thread->SetUserAddressKey(addr, value); | ||||||
|         owner_thread->AddWaiter(cur_thread); |         owner_thread->AddWaiter(cur_thread); | ||||||
|  |  | ||||||
|         // Begin waiting. |         // Begin waiting. | ||||||
|   | |||||||
| @@ -68,7 +68,7 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { | |||||||
|  |  | ||||||
|         // Add the current thread as a waiter on the owner. |         // Add the current thread as a waiter on the owner. | ||||||
|         KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL); |         KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL); | ||||||
|         cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); |         cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); | ||||||
|         owner_thread->AddWaiter(cur_thread); |         owner_thread->AddWaiter(cur_thread); | ||||||
|  |  | ||||||
|         // Begin waiting to hold the lock. |         // Begin waiting to hold the lock. | ||||||
|   | |||||||
| @@ -67,9 +67,9 @@ constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; | |||||||
| constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + | constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + | ||||||
|                                            KernelSlabHeapSize + KernelPageBufferHeapSize; |                                            KernelSlabHeapSize + KernelPageBufferHeapSize; | ||||||
|  |  | ||||||
| constexpr bool IsKernelAddressKey(VAddr key) { | //! NB: Use KThread::GetAddressKeyIsKernel(). | ||||||
|     return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | //! See explanation for deviation of GetAddressKey. | ||||||
| } | bool IsKernelAddressKey(VAddr key) = delete; | ||||||
|  |  | ||||||
| constexpr bool IsKernelAddress(VAddr address) { | constexpr bool IsKernelAddress(VAddr address) { | ||||||
|     return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; |     return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; | ||||||
|   | |||||||
| @@ -330,7 +330,7 @@ void KThread::Finalize() { | |||||||
|             KThread* const waiter = std::addressof(*it); |             KThread* const waiter = std::addressof(*it); | ||||||
|  |  | ||||||
|             // The thread shouldn't be a kernel waiter. |             // The thread shouldn't be a kernel waiter. | ||||||
|             ASSERT(!IsKernelAddressKey(waiter->GetAddressKey())); |             ASSERT(!waiter->GetAddressKeyIsKernel()); | ||||||
|  |  | ||||||
|             // Clear the lock owner. |             // Clear the lock owner. | ||||||
|             waiter->SetLockOwner(nullptr); |             waiter->SetLockOwner(nullptr); | ||||||
| @@ -763,19 +763,6 @@ void KThread::Continue() { | |||||||
|     KScheduler::OnThreadStateChanged(kernel, this, old_state); |     KScheduler::OnThreadStateChanged(kernel, this, old_state); | ||||||
| } | } | ||||||
|  |  | ||||||
| void KThread::WaitUntilSuspended() { |  | ||||||
|     // Make sure we have a suspend requested. |  | ||||||
|     ASSERT(IsSuspendRequested()); |  | ||||||
|  |  | ||||||
|     // Loop until the thread is not executing on any core. |  | ||||||
|     for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { |  | ||||||
|         KThread* core_thread{}; |  | ||||||
|         do { |  | ||||||
|             core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread(); |  | ||||||
|         } while (core_thread == this); |  | ||||||
|     } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| Result KThread::SetActivity(Svc::ThreadActivity activity) { | Result KThread::SetActivity(Svc::ThreadActivity activity) { | ||||||
|     // Lock ourselves. |     // Lock ourselves. | ||||||
|     KScopedLightLock lk(activity_pause_lock); |     KScopedLightLock lk(activity_pause_lock); | ||||||
| @@ -897,7 +884,7 @@ void KThread::AddWaiterImpl(KThread* thread) { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     // Keep track of how many kernel waiters we have. |     // Keep track of how many kernel waiters we have. | ||||||
|     if (IsKernelAddressKey(thread->GetAddressKey())) { |     if (thread->GetAddressKeyIsKernel()) { | ||||||
|         ASSERT((num_kernel_waiters++) >= 0); |         ASSERT((num_kernel_waiters++) >= 0); | ||||||
|         KScheduler::SetSchedulerUpdateNeeded(kernel); |         KScheduler::SetSchedulerUpdateNeeded(kernel); | ||||||
|     } |     } | ||||||
| @@ -911,7 +898,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) { | |||||||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||||
|  |  | ||||||
|     // Keep track of how many kernel waiters we have. |     // Keep track of how many kernel waiters we have. | ||||||
|     if (IsKernelAddressKey(thread->GetAddressKey())) { |     if (thread->GetAddressKeyIsKernel()) { | ||||||
|         ASSERT((num_kernel_waiters--) > 0); |         ASSERT((num_kernel_waiters--) > 0); | ||||||
|         KScheduler::SetSchedulerUpdateNeeded(kernel); |         KScheduler::SetSchedulerUpdateNeeded(kernel); | ||||||
|     } |     } | ||||||
| @@ -987,7 +974,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { | |||||||
|             KThread* thread = std::addressof(*it); |             KThread* thread = std::addressof(*it); | ||||||
|  |  | ||||||
|             // Keep track of how many kernel waiters we have. |             // Keep track of how many kernel waiters we have. | ||||||
|             if (IsKernelAddressKey(thread->GetAddressKey())) { |             if (thread->GetAddressKeyIsKernel()) { | ||||||
|                 ASSERT((num_kernel_waiters--) > 0); |                 ASSERT((num_kernel_waiters--) > 0); | ||||||
|                 KScheduler::SetSchedulerUpdateNeeded(kernel); |                 KScheduler::SetSchedulerUpdateNeeded(kernel); | ||||||
|             } |             } | ||||||
|   | |||||||
| @@ -214,8 +214,6 @@ public: | |||||||
|  |  | ||||||
|     void Continue(); |     void Continue(); | ||||||
|  |  | ||||||
|     void WaitUntilSuspended(); |  | ||||||
|  |  | ||||||
|     constexpr void SetSyncedIndex(s32 index) { |     constexpr void SetSyncedIndex(s32 index) { | ||||||
|         synced_index = index; |         synced_index = index; | ||||||
|     } |     } | ||||||
| @@ -607,13 +605,30 @@ public: | |||||||
|         return address_key_value; |         return address_key_value; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     void SetAddressKey(VAddr key) { |     [[nodiscard]] bool GetAddressKeyIsKernel() const { | ||||||
|         address_key = key; |         return address_key_is_kernel; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     void SetAddressKey(VAddr key, u32 val) { |     //! NB: intentional deviation from official kernel. | ||||||
|  |     // | ||||||
|  |     // Separate SetAddressKey into user and kernel versions | ||||||
|  |     // to cope with arbitrary host pointers making their way | ||||||
|  |     // into things. | ||||||
|  |  | ||||||
|  |     void SetUserAddressKey(VAddr key) { | ||||||
|  |         address_key = key; | ||||||
|  |         address_key_is_kernel = false; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     void SetUserAddressKey(VAddr key, u32 val) { | ||||||
|         address_key = key; |         address_key = key; | ||||||
|         address_key_value = val; |         address_key_value = val; | ||||||
|  |         address_key_is_kernel = false; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     void SetKernelAddressKey(VAddr key) { | ||||||
|  |         address_key = key; | ||||||
|  |         address_key_is_kernel = true; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     void ClearWaitQueue() { |     void ClearWaitQueue() { | ||||||
| @@ -772,6 +787,7 @@ private: | |||||||
|     bool debug_attached{}; |     bool debug_attached{}; | ||||||
|     s8 priority_inheritance_count{}; |     s8 priority_inheritance_count{}; | ||||||
|     bool resource_limit_release_hint{}; |     bool resource_limit_release_hint{}; | ||||||
|  |     bool address_key_is_kernel{}; | ||||||
|     StackParameters stack_parameters{}; |     StackParameters stack_parameters{}; | ||||||
|     Common::SpinLock context_guard{}; |     Common::SpinLock context_guard{}; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1198,27 +1198,34 @@ void KernelCore::Suspend(bool suspended) { | |||||||
|     const bool should_suspend{exception_exited || suspended}; |     const bool should_suspend{exception_exited || suspended}; | ||||||
|     const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; |     const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; | ||||||
|  |  | ||||||
|     std::vector<KScopedAutoObject<KThread>> process_threads; |     //! This refers to the application process, not the current process. | ||||||
|     { |     KScopedAutoObject<KProcess> process = CurrentProcess(); | ||||||
|         KScopedSchedulerLock sl{*this}; |     if (process.IsNull()) { | ||||||
|  |  | ||||||
|         if (auto* process = CurrentProcess(); process != nullptr) { |  | ||||||
|             process->SetActivity(activity); |  | ||||||
|  |  | ||||||
|             if (!should_suspend) { |  | ||||||
|                 // Runnable now; no need to wait. |  | ||||||
|         return; |         return; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|             for (auto* thread : process->GetThreadList()) { |     // Set the new activity. | ||||||
|                 process_threads.emplace_back(thread); |     process->SetActivity(activity); | ||||||
|             } |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     // Wait for execution to stop. |     // Wait for process execution to stop. | ||||||
|     for (auto& thread : process_threads) { |     bool must_wait{should_suspend}; | ||||||
|         thread->WaitUntilSuspended(); |  | ||||||
|  |     // KernelCore::Suspend must be called from locked context, or we | ||||||
|  |     // could race another call to SetActivity, interfering with waiting. | ||||||
|  |     while (must_wait) { | ||||||
|  |         KScopedSchedulerLock sl{*this}; | ||||||
|  |  | ||||||
|  |         // Assume that all threads have finished running. | ||||||
|  |         must_wait = false; | ||||||
|  |  | ||||||
|  |         for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { | ||||||
|  |             if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() == | ||||||
|  |                 process.GetPointerUnsafe()) { | ||||||
|  |                 // A thread has not finished running yet. | ||||||
|  |                 // Continue waiting. | ||||||
|  |                 must_wait = true; | ||||||
|  |             } | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user