Kernel: Corrections to Scheduling.

This commit is contained in:
Fernando Sahmkow 2020-03-03 15:50:38 -04:00
parent b4dc01f16a
commit 07993ac8c8
5 changed files with 23 additions and 19 deletions

View File

@ -154,7 +154,7 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
basic_lock.unlock();
}
std::optional<u64> CoreTiming::Advance() {
std::optional<s64> CoreTiming::Advance() {
advance_lock.lock();
basic_lock.lock();
global_timer = GetGlobalTimeNs().count();
@ -170,10 +170,11 @@ std::optional<u64> CoreTiming::Advance() {
}
basic_lock.lock();
global_timer = GetGlobalTimeNs().count();
}
if (!event_queue.empty()) {
const u64 next_time = event_queue.front().time - global_timer;
const s64 next_time = event_queue.front().time - global_timer;
basic_lock.unlock();
advance_lock.unlock();
return next_time;
@ -191,8 +192,10 @@ void CoreTiming::ThreadLoop() {
paused_set = false;
const auto next_time = Advance();
if (next_time) {
if (*next_time > 0) {
std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
event.WaitFor(next_time_ns);
}
} else {
wait_set = true;
event.Wait();

View File

@ -110,7 +110,7 @@ public:
std::chrono::nanoseconds GetGlobalTimeNs() const;
/// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
std::optional<u64> Advance();
std::optional<s64> Advance();
private:
struct Event;

View File

@ -47,13 +47,13 @@ u32 GlobalScheduler::SelectThreads() {
ASSERT(is_locked);
const auto update_thread = [](Thread* thread, Scheduler& sched) {
sched.guard.lock();
if (thread != sched.selected_thread.get()) {
if (thread != sched.selected_thread_set.get()) {
if (thread == nullptr) {
++sched.idle_selection_count;
}
sched.selected_thread = SharedFrom(thread);
sched.selected_thread_set = SharedFrom(thread);
}
const bool reschedule_pending = sched.selected_thread != sched.current_thread;
const bool reschedule_pending = sched.selected_thread_set != sched.current_thread;
sched.is_context_switch_pending = reschedule_pending;
std::atomic_thread_fence(std::memory_order_seq_cst);
sched.guard.unlock();
@ -118,6 +118,8 @@ u32 GlobalScheduler::SelectThreads() {
suggested);
top_threads[candidate_core] = next;
break;
} else {
suggested = nullptr;
}
}
}
@ -590,7 +592,7 @@ void Scheduler::OnThreadStart() {
}
void Scheduler::SwitchContextStep2() {
Thread* previous_thread = current_thread.get();
Thread* previous_thread = current_thread_prev.get();
Thread* new_thread = selected_thread.get();
// Load context of new thread
@ -606,8 +608,6 @@ void Scheduler::SwitchContextStep2() {
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
current_thread = SharedFrom(new_thread);
new_thread->SetStatus(ThreadStatus::Running);
new_thread->SetIsRunning(true);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
@ -622,21 +622,21 @@ void Scheduler::SwitchContextStep2() {
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
}
} else {
current_thread = nullptr;
// Note: We do not reset the current process and current page table when idling because
// technically we haven't changed processes, our threads are just paused.
}
guard.unlock();
TryDoContextSwitch();
}
void Scheduler::SwitchContext() {
Thread* previous_thread = current_thread.get();
current_thread_prev = current_thread;
selected_thread = selected_thread_set;
Thread* previous_thread = current_thread_prev.get();
Thread* new_thread = selected_thread.get();
current_thread = selected_thread;
is_context_switch_pending = false;
if (new_thread == previous_thread) {
guard.unlock();
if (new_thread == previous_thread) {
return;
}

View File

@ -249,6 +249,8 @@ private:
std::shared_ptr<Thread> current_thread = nullptr;
std::shared_ptr<Thread> selected_thread = nullptr;
std::shared_ptr<Thread> current_thread_prev = nullptr;
std::shared_ptr<Thread> selected_thread_set = nullptr;
std::shared_ptr<Thread> idle_thread = nullptr;
Core::System& system;

View File

@ -316,7 +316,6 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
/// Makes a blocking IPC call to an OS service.
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
std::lock_guard lock{HLE::g_hle_lock};
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
if (!session) {