Kernel: Actually wake up the requested number of threads in Semaphore::Release.
Also properly keep track of data in guest memory, this fixes managing the semaphore from userland. It was found that Semaphores are actually Condition Variables, with Release(1) and Release(-1) being equivalent to notify_one and notify_all. We should change the name of the class to reflect this.
This commit is contained in:
parent
1bbe9309da
commit
db3a525166
|
@ -273,9 +273,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSemaphore::GetChildren() cons
|
||||||
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
|
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
|
||||||
|
|
||||||
const auto& semaphore = static_cast<const Kernel::Semaphore&>(object);
|
const auto& semaphore = static_cast<const Kernel::Semaphore&>(object);
|
||||||
list.push_back(
|
list.push_back(std::make_unique<WaitTreeText>(
|
||||||
std::make_unique<WaitTreeText>(tr("available count = %1").arg(semaphore.available_count)));
|
tr("available count = %1").arg(semaphore.GetAvailableCount())));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("max count = %1").arg(semaphore.max_count)));
|
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,6 @@ ResultVal<SharedPtr<Semaphore>> Semaphore::Create(VAddr guest_addr, VAddr mutex_
|
||||||
std::string name) {
|
std::string name) {
|
||||||
SharedPtr<Semaphore> semaphore(new Semaphore);
|
SharedPtr<Semaphore> semaphore(new Semaphore);
|
||||||
|
|
||||||
// When the semaphore is created, some slots are reserved for other threads,
|
|
||||||
// and the rest is reserved for the caller thread;
|
|
||||||
semaphore->available_count = Memory::Read32(guest_addr);
|
|
||||||
semaphore->name = std::move(name);
|
semaphore->name = std::move(name);
|
||||||
semaphore->guest_addr = guest_addr;
|
semaphore->guest_addr = guest_addr;
|
||||||
semaphore->mutex_addr = mutex_addr;
|
semaphore->mutex_addr = mutex_addr;
|
||||||
|
@ -32,34 +29,36 @@ ResultVal<SharedPtr<Semaphore>> Semaphore::Create(VAddr guest_addr, VAddr mutex_
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Semaphore::ShouldWait(Thread* thread) const {
|
bool Semaphore::ShouldWait(Thread* thread) const {
|
||||||
return available_count <= 0;
|
return GetAvailableCount() <= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Semaphore::Acquire(Thread* thread) {
|
void Semaphore::Acquire(Thread* thread) {
|
||||||
if (available_count <= 0)
|
if (GetAvailableCount() <= 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
--available_count;
|
SetAvailableCount(GetAvailableCount() - 1);
|
||||||
UpdateGuestState();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Semaphore::Release(s32 target) {
|
ResultCode Semaphore::Release(s32 target) {
|
||||||
++available_count;
|
|
||||||
UpdateGuestState();
|
|
||||||
|
|
||||||
if (target == -1) {
|
if (target == -1) {
|
||||||
// When -1, wake up all waiting threads
|
// When -1, wake up all waiting threads
|
||||||
|
SetAvailableCount(GetWaitingThreads().size());
|
||||||
WakeupAllWaitingThreads();
|
WakeupAllWaitingThreads();
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, wake up just a single thread
|
// Otherwise, wake up just a single thread
|
||||||
|
SetAvailableCount(target);
|
||||||
WakeupWaitingThread(GetHighestPriorityReadyThread());
|
WakeupWaitingThread(GetHighestPriorityReadyThread());
|
||||||
}
|
}
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Semaphore::UpdateGuestState() {
|
s32 Semaphore::GetAvailableCount() const {
|
||||||
Memory::Write32(guest_addr, available_count);
|
return Memory::Read32(guest_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Semaphore::SetAvailableCount(s32 value) const {
|
||||||
|
Memory::Write32(guest_addr, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
// TODO(Subv): This is actually a Condition Variable.
|
||||||
class Semaphore final : public WaitObject {
|
class Semaphore final : public WaitObject {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
|
@ -39,8 +40,9 @@ public:
|
||||||
return HANDLE_TYPE;
|
return HANDLE_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
s32 max_count; ///< Maximum number of simultaneous holders the semaphore can have
|
s32 GetAvailableCount() const;
|
||||||
s32 available_count; ///< Number of free slots left in the semaphore
|
void SetAvailableCount(s32 value) const;
|
||||||
|
|
||||||
std::string name; ///< Name of semaphore (optional)
|
std::string name; ///< Name of semaphore (optional)
|
||||||
VAddr guest_addr; ///< Address of the guest semaphore value
|
VAddr guest_addr; ///< Address of the guest semaphore value
|
||||||
VAddr mutex_addr; ///< (optional) Address of guest mutex value associated with this semaphore,
|
VAddr mutex_addr; ///< (optional) Address of guest mutex value associated with this semaphore,
|
||||||
|
@ -59,9 +61,6 @@ public:
|
||||||
private:
|
private:
|
||||||
Semaphore();
|
Semaphore();
|
||||||
~Semaphore() override;
|
~Semaphore() override;
|
||||||
|
|
||||||
/// Updates the state of the object tracking this semaphore in guest memory
|
|
||||||
void UpdateGuestState();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -501,7 +501,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr semaphore_add
|
||||||
semaphore->name = Common::StringFromFormat("semaphore-%llx", semaphore_addr);
|
semaphore->name = Common::StringFromFormat("semaphore-%llx", semaphore_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(semaphore->available_count == 0);
|
ASSERT(semaphore->GetAvailableCount() == 0);
|
||||||
ASSERT(semaphore->mutex_addr == mutex_addr);
|
ASSERT(semaphore->mutex_addr == mutex_addr);
|
||||||
|
|
||||||
auto wakeup_callback = [mutex, nano_seconds](ThreadWakeupReason reason,
|
auto wakeup_callback = [mutex, nano_seconds](ThreadWakeupReason reason,
|
||||||
|
|
Loading…
Reference in New Issue