2022-04-23 04:59:50 -04:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2021-01-15 02:02:57 -05:00
|
|
|
#include <bit>
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/bit_util.h"
|
|
|
|
#include "common/fiber.h"
|
|
|
|
#include "common/logging/log.h"
|
|
|
|
#include "core/arm/arm_interface.h"
|
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/core_timing.h"
|
|
|
|
#include "core/cpu_manager.h"
|
2021-12-30 00:40:38 -05:00
|
|
|
#include "core/hle/kernel/k_interrupt_manager.h"
|
2021-04-24 01:04:28 -04:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2020-12-03 19:43:18 -05:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2020-12-04 00:56:02 -05:00
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
2020-12-31 02:01:08 -05:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2020-12-02 21:08:35 -05:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
|
#include "core/hle/kernel/physical_core.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
static void IncrementScheduledCount(Kernel::KThread* thread) {
|
2020-12-02 21:08:35 -05:00
|
|
|
if (auto process = thread->GetOwnerProcess(); process) {
|
|
|
|
process->IncrementScheduledCount();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-07 10:49:41 -05:00
|
|
|
KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} {
|
2022-07-05 23:27:25 -04:00
|
|
|
m_switch_fiber = std::make_shared<Common::Fiber>([this] {
|
2022-06-26 18:52:16 -04:00
|
|
|
while (true) {
|
2022-07-05 23:27:25 -04:00
|
|
|
ScheduleImplFiber();
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
2022-06-26 18:52:16 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
m_state.needs_scheduling = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
KScheduler::~KScheduler() = default;
|
|
|
|
|
|
|
|
void KScheduler::SetInterruptTaskRunnable() {
|
|
|
|
m_state.interrupt_task_runnable = true;
|
|
|
|
m_state.needs_scheduling = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::RequestScheduleOnInterrupt() {
|
|
|
|
m_state.needs_scheduling = true;
|
|
|
|
|
2023-03-07 10:49:41 -05:00
|
|
|
if (CanSchedule(m_kernel)) {
|
2022-06-26 18:52:16 -04:00
|
|
|
ScheduleOnInterrupt();
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
2021-12-30 00:40:38 -05:00
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
|
|
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
|
|
|
GetCurrentThread(kernel).DisableDispatch();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
|
|
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
|
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
auto* scheduler{kernel.CurrentScheduler()};
|
2022-06-26 18:52:16 -04:00
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
|
2022-07-11 10:13:13 -04:00
|
|
|
KScheduler::RescheduleCores(kernel, cores_needing_scheduling);
|
|
|
|
KScheduler::RescheduleCurrentHLEThread(kernel);
|
2022-06-26 18:52:16 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
scheduler->RescheduleOtherCores(cores_needing_scheduling);
|
|
|
|
|
|
|
|
if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) {
|
|
|
|
GetCurrentThread(kernel).EnableDispatch();
|
|
|
|
} else {
|
|
|
|
scheduler->RescheduleCurrentCore();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-11 10:13:13 -04:00
|
|
|
void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
|
|
|
|
// HACK: we cannot schedule from this thread, it is not a core thread
|
|
|
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
|
|
|
|
|
2022-10-23 05:24:38 -04:00
|
|
|
// Ensure dummy threads that are waiting block.
|
|
|
|
GetCurrentThread(kernel).DummyThreadBeginWait();
|
2022-07-11 10:13:13 -04:00
|
|
|
|
|
|
|
ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
|
|
|
|
GetCurrentThread(kernel).EnableDispatch();
|
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
|
|
|
if (IsSchedulerUpdateNeeded(kernel)) {
|
|
|
|
return UpdateHighestPriorityThreadsImpl(kernel);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::Schedule() {
|
2023-03-07 10:49:41 -05:00
|
|
|
ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
|
|
|
|
ASSERT(m_core_id == GetCurrentCoreId(m_kernel));
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
ScheduleImpl();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::ScheduleOnInterrupt() {
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).DisableDispatch();
|
2022-06-26 18:52:16 -04:00
|
|
|
Schedule();
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).EnableDispatch();
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
|
|
|
|
2022-07-07 12:34:46 -04:00
|
|
|
void KScheduler::PreemptSingleCore() {
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).DisableDispatch();
|
2022-07-07 12:34:46 -04:00
|
|
|
|
2023-03-07 10:49:41 -05:00
|
|
|
auto* thread = GetCurrentThreadPointer(m_kernel);
|
|
|
|
auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore());
|
2022-07-07 12:34:46 -04:00
|
|
|
previous_scheduler.Unload(thread);
|
|
|
|
|
|
|
|
Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
|
|
|
|
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).EnableDispatch();
|
2022-07-07 12:34:46 -04:00
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::RescheduleCurrentCore() {
|
2023-03-07 10:49:41 -05:00
|
|
|
ASSERT(!m_kernel.IsPhantomModeForSingleCore());
|
|
|
|
ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).EnableDispatch();
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
if (m_state.needs_scheduling.load()) {
|
|
|
|
// Disable interrupts, and then check again if rescheduling is needed.
|
|
|
|
// KScopedInterruptDisable intr_disable;
|
|
|
|
|
2023-03-07 10:49:41 -05:00
|
|
|
m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
|
2021-12-30 00:40:38 -05:00
|
|
|
}
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
2021-12-30 00:40:38 -05:00
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::RescheduleCurrentCoreImpl() {
|
|
|
|
// Check that scheduling is needed.
|
|
|
|
if (m_state.needs_scheduling.load()) [[likely]] {
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).DisableDispatch();
|
2022-06-26 18:52:16 -04:00
|
|
|
Schedule();
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).EnableDispatch();
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) {
|
2022-06-26 18:52:16 -04:00
|
|
|
// Set core ID/idle thread/interrupt task manager.
|
2022-07-05 23:27:25 -04:00
|
|
|
m_core_id = core_id;
|
2022-06-26 18:52:16 -04:00
|
|
|
m_idle_thread = idle_thread;
|
|
|
|
// m_state.idle_thread_stack = m_idle_thread->GetStackTop();
|
2023-03-07 12:01:07 -05:00
|
|
|
// m_state.interrupt_task_manager = std::addressof(kernel.GetInterruptTaskManager());
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// Insert the main thread into the priority queue.
|
|
|
|
// {
|
2023-03-07 10:49:41 -05:00
|
|
|
// KScopedSchedulerLock lk{m_kernel};
|
|
|
|
// GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel));
|
|
|
|
// SetSchedulerUpdateNeeded(m_kernel);
|
2022-06-26 18:52:16 -04:00
|
|
|
// }
|
|
|
|
|
|
|
|
// Bind interrupt handler.
|
|
|
|
// kernel.GetInterruptManager().BindHandler(
|
2023-03-07 10:49:41 -05:00
|
|
|
// GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id,
|
2022-07-05 23:27:25 -04:00
|
|
|
// KInterruptController::PriorityLevel::Scheduler, false, false);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// Set the current thread.
|
2022-07-05 23:27:25 -04:00
|
|
|
m_current_thread = main_thread;
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::Activate() {
|
2023-03-07 10:49:41 -05:00
|
|
|
ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// m_state.should_count_idle = KTargetSystem::IsDebugMode();
|
|
|
|
m_is_active = true;
|
|
|
|
RescheduleCurrentCore();
|
|
|
|
}
|
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
void KScheduler::OnThreadStart() {
|
2023-03-07 10:49:41 -05:00
|
|
|
GetCurrentThread(m_kernel).EnableDispatch();
|
2022-07-05 23:27:25 -04:00
|
|
|
}
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
2022-06-26 18:52:16 -04:00
|
|
|
if (KThread* prev_highest_thread = m_state.highest_priority_thread;
|
|
|
|
prev_highest_thread != highest_thread) [[likely]] {
|
|
|
|
if (prev_highest_thread != nullptr) [[likely]] {
|
2020-12-02 21:08:35 -05:00
|
|
|
IncrementScheduledCount(prev_highest_thread);
|
2023-03-07 10:49:41 -05:00
|
|
|
prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks());
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
2022-06-26 18:52:16 -04:00
|
|
|
if (m_state.should_count_idle) {
|
|
|
|
if (highest_thread != nullptr) [[likely]] {
|
2021-04-24 01:04:28 -04:00
|
|
|
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
2022-06-26 18:52:16 -04:00
|
|
|
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
|
2021-01-20 16:42:27 -05:00
|
|
|
}
|
2020-12-02 21:08:35 -05:00
|
|
|
} else {
|
2022-06-26 18:52:16 -04:00
|
|
|
m_state.idle_count++;
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
m_state.highest_priority_thread = highest_thread;
|
|
|
|
m_state.needs_scheduling = true;
|
|
|
|
return (1ULL << m_core_id);
|
2020-12-02 21:08:35 -05:00
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
2022-06-26 18:52:16 -04:00
|
|
|
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Clear that we need to update.
|
2020-12-02 21:08:35 -05:00
|
|
|
ClearSchedulerUpdateNeeded(kernel);
|
|
|
|
|
|
|
|
u64 cores_needing_scheduling = 0, idle_cores = 0;
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
2020-12-02 21:08:35 -05:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
// We want to go over all cores, finding the highest priority thread and determining if
|
|
|
|
// scheduling is needed for that core.
|
2020-12-02 21:08:35 -05:00
|
|
|
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_thread != nullptr) {
|
2022-06-26 18:52:16 -04:00
|
|
|
// We need to check if the thread's process has a pinned thread.
|
|
|
|
if (KProcess* parent = top_thread->GetOwnerProcess()) {
|
|
|
|
// Check that there's a pinned thread other than the current top thread.
|
|
|
|
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
|
|
|
|
pinned != nullptr && pinned != top_thread) {
|
|
|
|
// We need to prefer threads with kernel waiters to the pinned thread.
|
|
|
|
if (top_thread->GetNumKernelWaiters() ==
|
|
|
|
0 /* && top_thread != parent->GetExceptionThread() */) {
|
|
|
|
// If the pinned thread is runnable, use it.
|
2021-01-20 16:42:27 -05:00
|
|
|
if (pinned->GetRawState() == ThreadState::Runnable) {
|
|
|
|
top_thread = pinned;
|
|
|
|
} else {
|
|
|
|
top_thread = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-02 21:08:35 -05:00
|
|
|
} else {
|
|
|
|
idle_cores |= (1ULL << core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
top_threads[core_id] = top_thread;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
|
2020-12-02 21:08:35 -05:00
|
|
|
while (idle_cores != 0) {
|
2022-06-26 18:52:16 -04:00
|
|
|
const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores));
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
2020-12-02 21:08:35 -05:00
|
|
|
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
|
|
|
|
size_t num_candidates = 0;
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-02 21:08:35 -05:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* top_thread =
|
2020-12-02 21:08:35 -05:00
|
|
|
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
|
|
|
|
top_thread != suggested) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Make sure we're not dealing with threads too high priority for migration.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_thread != nullptr &&
|
|
|
|
top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// The suggested thread isn't bound to its core, so we can migrate it!
|
2020-12-02 21:08:35 -05:00
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested);
|
|
|
|
top_threads[core_id] = suggested;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Note this core as a candidate for migration.
|
2020-12-02 21:08:35 -05:00
|
|
|
ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
migration_candidates[num_candidates++] = suggested_core;
|
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
|
|
|
|
// candidate cores' top threads.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (suggested == nullptr) {
|
|
|
|
for (size_t i = 0; i < num_candidates; i++) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if there's some other thread that can run on the candidate core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 candidate_core = migration_candidates[i];
|
|
|
|
suggested = top_threads[candidate_core];
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* next_on_candidate_core =
|
2020-12-02 21:08:35 -05:00
|
|
|
priority_queue.GetScheduledNext(candidate_core, suggested);
|
|
|
|
next_on_candidate_core != nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// The candidate core can run some other thread! We'll migrate its current
|
|
|
|
// top thread to us.
|
2020-12-02 21:08:35 -05:00
|
|
|
top_threads[candidate_core] = next_on_candidate_core;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(candidate_core)
|
|
|
|
.UpdateHighestPriorityThread(top_threads[candidate_core]);
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Perform the migration.
|
2020-12-02 21:08:35 -05:00
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(candidate_core, suggested);
|
|
|
|
top_threads[core_id] = suggested;
|
|
|
|
cores_needing_scheduling |=
|
|
|
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
|
|
|
|
top_threads[core_id]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
idle_cores &= ~(1ULL << core_id);
|
|
|
|
}
|
|
|
|
|
2022-10-23 05:24:38 -04:00
|
|
|
// HACK: any waiting dummy threads can wake up now.
|
|
|
|
kernel.GlobalSchedulerContext().WakeupWaitingDummyThreads();
|
|
|
|
|
|
|
|
// HACK: if we are a dummy thread, and we need to go sleep, indicate
|
|
|
|
// that for when the lock is released.
|
|
|
|
KThread* const cur_thread = GetCurrentThreadPointer(kernel);
|
|
|
|
if (cur_thread->IsDummyThread() && cur_thread->GetState() != ThreadState::Runnable) {
|
|
|
|
cur_thread->RequestDummyThreadWait();
|
|
|
|
}
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
return cores_needing_scheduling;
|
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::SwitchThread(KThread* next_thread) {
|
2023-03-07 10:49:41 -05:00
|
|
|
KProcess* const cur_process = GetCurrentProcessPointer(m_kernel);
|
|
|
|
KThread* const cur_thread = GetCurrentThreadPointer(m_kernel);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
|
|
|
if (next_thread == nullptr) {
|
|
|
|
next_thread = m_idle_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next_thread->GetCurrentCore() != m_core_id) {
|
|
|
|
next_thread->SetCurrentCore(m_core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not actually switching thread, there's nothing to do.
|
|
|
|
if (next_thread == cur_thread) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next thread is now known not to be nullptr, and must not be dispatchable.
|
|
|
|
ASSERT(next_thread->GetDisableDispatchCount() == 1);
|
|
|
|
ASSERT(!next_thread->IsDummyThread());
|
|
|
|
|
|
|
|
// Update the CPU time tracking variables.
|
|
|
|
const s64 prev_tick = m_last_context_switch_time;
|
2023-03-07 10:49:41 -05:00
|
|
|
const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks();
|
2022-06-26 18:52:16 -04:00
|
|
|
const s64 tick_diff = cur_tick - prev_tick;
|
|
|
|
cur_thread->AddCpuTime(m_core_id, tick_diff);
|
|
|
|
if (cur_process != nullptr) {
|
|
|
|
cur_process->UpdateCPUTimeTicks(tick_diff);
|
|
|
|
}
|
|
|
|
m_last_context_switch_time = cur_tick;
|
|
|
|
|
|
|
|
// Update our previous thread.
|
|
|
|
if (cur_process != nullptr) {
|
|
|
|
if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id)
|
|
|
|
[[likely]] {
|
|
|
|
m_state.prev_thread = cur_thread;
|
|
|
|
} else {
|
|
|
|
m_state.prev_thread = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch the current process, if we're switching processes.
|
|
|
|
// if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
|
|
|
|
// KProcess::Switch(cur_process, next_process);
|
|
|
|
// }
|
|
|
|
|
|
|
|
// Set the new thread.
|
2023-03-07 10:49:41 -05:00
|
|
|
SetCurrentThread(m_kernel, next_thread);
|
2022-06-26 18:52:16 -04:00
|
|
|
m_current_thread = next_thread;
|
|
|
|
|
|
|
|
// Set the new Thread Local region.
|
|
|
|
// cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::ScheduleImpl() {
|
|
|
|
// First, clear the needs scheduling bool.
|
2022-11-08 22:26:14 -05:00
|
|
|
m_state.needs_scheduling.store(false, std::memory_order_relaxed);
|
|
|
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// Load the appropriate thread pointers for scheduling.
|
2023-03-07 10:49:41 -05:00
|
|
|
KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)};
|
2022-06-26 18:52:16 -04:00
|
|
|
KThread* highest_priority_thread{m_state.highest_priority_thread};
|
|
|
|
|
|
|
|
// Check whether there are runnable interrupt tasks.
|
|
|
|
if (m_state.interrupt_task_runnable) {
|
|
|
|
// The interrupt task is runnable.
|
|
|
|
// We want to switch to the interrupt task/idle thread.
|
|
|
|
highest_priority_thread = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there aren't, we want to check if the highest priority thread is the same as the current
|
|
|
|
// thread.
|
|
|
|
if (highest_priority_thread == cur_thread) {
|
2022-11-08 22:26:14 -05:00
|
|
|
// If they're the same, then we can just issue a memory barrier and return.
|
|
|
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
2022-06-26 18:52:16 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The highest priority thread is not the same as the current thread.
|
2022-07-05 23:27:25 -04:00
|
|
|
// Jump to the switcher and continue executing from there.
|
|
|
|
m_switch_cur_thread = cur_thread;
|
|
|
|
m_switch_highest_priority_thread = highest_priority_thread;
|
|
|
|
m_switch_from_schedule = true;
|
|
|
|
Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// Returning from ScheduleImpl occurs after this thread has been scheduled again.
|
|
|
|
}
|
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
void KScheduler::ScheduleImplFiber() {
|
|
|
|
KThread* const cur_thread{m_switch_cur_thread};
|
|
|
|
KThread* highest_priority_thread{m_switch_highest_priority_thread};
|
2022-06-26 18:52:16 -04:00
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
// If we're not coming from scheduling (i.e., we came from SC preemption),
|
|
|
|
// we should restart the scheduling loop directly. Not accurate to HOS.
|
|
|
|
if (!m_switch_from_schedule) {
|
|
|
|
goto retry;
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
// Mark that we are not coming from scheduling anymore.
|
|
|
|
m_switch_from_schedule = false;
|
|
|
|
|
|
|
|
// Save the original thread context.
|
|
|
|
Unload(cur_thread);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// The current thread's context has been entirely taken care of.
|
|
|
|
// Now we want to loop until we successfully switch the thread context.
|
|
|
|
while (true) {
|
|
|
|
// We're starting to try to do the context switch.
|
|
|
|
// Check if the highest priority thread is null.
|
|
|
|
if (!highest_priority_thread) {
|
|
|
|
// The next thread is nullptr!
|
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
// Switch to the idle thread. Note: HOS treats idling as a special case for
|
|
|
|
// performance. This is not *required* for yuzu's purposes, and for singlecore
|
|
|
|
// compatibility, we can just move the logic that would go here into the execution
|
|
|
|
// of the idle thread. If we ever remove singlecore, we should implement this
|
|
|
|
// accurately to HOS.
|
|
|
|
highest_priority_thread = m_idle_thread;
|
|
|
|
}
|
2022-06-26 18:52:16 -04:00
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
// We want to try to lock the highest priority thread's context.
|
|
|
|
// Try to take it.
|
|
|
|
while (!highest_priority_thread->context_guard.try_lock()) {
|
|
|
|
// The highest priority thread's context is already locked.
|
|
|
|
// Check if we need scheduling. If we don't, we can retry directly.
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
// If we do, another core is interfering, and we must start again.
|
|
|
|
goto retry;
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
2022-07-05 23:27:25 -04:00
|
|
|
}
|
2022-06-26 18:52:16 -04:00
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
// It's time to switch the thread.
|
|
|
|
// Switch to the highest priority thread.
|
|
|
|
SwitchThread(highest_priority_thread);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
2022-07-05 23:27:25 -04:00
|
|
|
// Check if we need scheduling. If we do, then we can't complete the switch and should
|
|
|
|
// retry.
|
|
|
|
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
|
|
|
|
// Our switch failed.
|
|
|
|
// We should unlock the thread context, and then retry.
|
|
|
|
highest_priority_thread->context_guard.unlock();
|
2022-06-26 18:52:16 -04:00
|
|
|
goto retry;
|
|
|
|
} else {
|
2022-07-05 23:27:25 -04:00
|
|
|
break;
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
|
|
|
|
|
|
|
// We failed to successfully do the context switch, and need to retry.
|
|
|
|
// Clear needs_scheduling.
|
2022-11-08 22:26:14 -05:00
|
|
|
m_state.needs_scheduling.store(false, std::memory_order_relaxed);
|
|
|
|
std::atomic_thread_fence(std::memory_order_seq_cst);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// Refresh the highest priority thread.
|
|
|
|
highest_priority_thread = m_state.highest_priority_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reload the guest thread context.
|
2022-07-05 23:27:25 -04:00
|
|
|
Reload(highest_priority_thread);
|
2022-06-26 18:52:16 -04:00
|
|
|
|
|
|
|
// Reload the host thread.
|
2022-07-05 23:27:25 -04:00
|
|
|
Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::Unload(KThread* thread) {
|
2023-03-07 10:49:41 -05:00
|
|
|
auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
|
2022-07-05 23:27:25 -04:00
|
|
|
cpu_core.SaveContext(thread->GetContext32());
|
|
|
|
cpu_core.SaveContext(thread->GetContext64());
|
|
|
|
// Save the TPIDR_EL0 system register in case it was modified.
|
|
|
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
|
|
|
cpu_core.ClearExclusiveState();
|
|
|
|
|
|
|
|
// Check if the thread is terminated by checking the DPC flags.
|
|
|
|
if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
|
|
|
|
// The thread isn't terminated, so we want to unlock it.
|
|
|
|
thread->context_guard.unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KScheduler::Reload(KThread* thread) {
|
2023-03-07 10:49:41 -05:00
|
|
|
auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
|
2022-07-05 23:27:25 -04:00
|
|
|
cpu_core.LoadContext(thread->GetContext32());
|
|
|
|
cpu_core.LoadContext(thread->GetContext64());
|
|
|
|
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
|
|
|
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
|
|
|
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
|
|
|
|
cpu_core.ClearExclusiveState();
|
2022-06-26 18:52:16 -04:00
|
|
|
}
|
|
|
|
|
2021-01-20 16:42:27 -05:00
|
|
|
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
|
2022-06-26 18:52:16 -04:00
|
|
|
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
|
2021-01-20 16:42:27 -05:00
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
|
|
|
|
// Get an atomic reference to the core scheduler's previous thread.
|
2022-06-26 18:52:16 -04:00
|
|
|
auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread};
|
2021-01-20 16:42:27 -05:00
|
|
|
|
|
|
|
// Atomically clear the previous thread if it's our target.
|
|
|
|
KThread* compare = thread;
|
2022-06-26 18:52:16 -04:00
|
|
|
prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst);
|
2021-01-20 16:42:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
|
2022-06-26 18:52:16 -04:00
|
|
|
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if the state has changed, because if it hasn't there's nothing to do.
|
2022-06-26 18:52:16 -04:00
|
|
|
const ThreadState cur_state = thread->GetRawState();
|
2020-12-02 21:08:35 -05:00
|
|
|
if (cur_state == old_state) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Update the priority queues.
|
2020-12-28 16:16:43 -05:00
|
|
|
if (old_state == ThreadState::Runnable) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we were previously runnable, then we're not runnable now, and we should remove.
|
2020-12-02 21:08:35 -05:00
|
|
|
GetPriorityQueue(kernel).Remove(thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
2022-10-23 05:24:38 -04:00
|
|
|
|
|
|
|
if (thread->IsDummyThread()) {
|
|
|
|
// HACK: if this is a dummy thread, it should no longer wake up when the
|
|
|
|
// scheduler lock is released.
|
|
|
|
kernel.GlobalSchedulerContext().UnregisterDummyThreadForWakeup(thread);
|
|
|
|
}
|
2020-12-28 16:16:43 -05:00
|
|
|
} else if (cur_state == ThreadState::Runnable) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we're now runnable, then we weren't previously, and we should add.
|
2020-12-02 21:08:35 -05:00
|
|
|
GetPriorityQueue(kernel).PushBack(thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
2022-10-23 05:24:38 -04:00
|
|
|
|
|
|
|
if (thread->IsDummyThread()) {
|
|
|
|
// HACK: if this is a dummy thread, it should wake up when the scheduler
|
|
|
|
// lock is released.
|
|
|
|
kernel.GlobalSchedulerContext().RegisterDummyThreadForWakeup(thread);
|
|
|
|
}
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
|
2022-06-26 18:52:16 -04:00
|
|
|
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the thread is runnable, we want to change its priority in the queue.
|
2020-12-28 16:16:43 -05:00
|
|
|
if (thread->GetRawState() == ThreadState::Runnable) {
|
2021-11-26 12:15:43 -05:00
|
|
|
GetPriorityQueue(kernel).ChangePriority(old_priority,
|
2022-06-26 18:52:16 -04:00
|
|
|
thread == GetCurrentThreadPointer(kernel), thread);
|
2020-12-02 21:08:35 -05:00
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
2020-12-05 03:02:30 -05:00
|
|
|
const KAffinityMask& old_affinity, s32 old_core) {
|
2022-06-26 18:52:16 -04:00
|
|
|
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the thread is runnable, we want to change its affinity in the queue.
|
2020-12-28 16:16:43 -05:00
|
|
|
if (thread->GetRawState() == ThreadState::Runnable) {
|
2020-12-02 21:08:35 -05:00
|
|
|
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
|
|
|
|
IncrementScheduledCount(thread);
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) {
|
|
|
|
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Rotate the front of the queue to the end.
|
2022-06-26 18:52:16 -04:00
|
|
|
KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* next_thread = nullptr;
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_thread != nullptr) {
|
|
|
|
next_thread = priority_queue.MoveToScheduledBack(top_thread);
|
|
|
|
if (next_thread != top_thread) {
|
|
|
|
IncrementScheduledCount(top_thread);
|
|
|
|
IncrementScheduledCount(next_thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-02 21:08:35 -05:00
|
|
|
{
|
2022-06-26 18:52:16 -04:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
2020-12-02 21:08:35 -05:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* top_on_suggested_core =
|
2020-12-02 21:08:35 -05:00
|
|
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
|
|
|
: nullptr;
|
|
|
|
top_on_suggested_core != suggested) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the next thread is a new thread that has been waiting longer than our
|
|
|
|
// suggestion, we prefer it to our suggestion.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_thread != next_thread && next_thread != nullptr &&
|
|
|
|
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
|
|
|
|
suggested = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we're allowed to do a migration, do one.
|
|
|
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
|
|
|
|
// to the front of the queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_on_suggested_core == nullptr ||
|
|
|
|
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
|
2022-06-26 18:52:16 -04:00
|
|
|
suggested->SetActiveCore(core_id);
|
2020-12-02 21:08:35 -05:00
|
|
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the next suggestion.
|
2022-06-26 18:52:16 -04:00
|
|
|
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Now that we might have migrated a thread with the same priority, check if we can do better.
|
2020-12-02 21:08:35 -05:00
|
|
|
{
|
2022-06-26 18:52:16 -04:00
|
|
|
KThread* best_thread = priority_queue.GetScheduledFront(core_id);
|
2022-06-16 10:35:52 -04:00
|
|
|
if (best_thread == GetCurrentThreadPointer(kernel)) {
|
2022-06-26 18:52:16 -04:00
|
|
|
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the best thread we can choose has a priority the same or worse than ours, try to
|
|
|
|
// migrate a higher priority thread.
|
2020-12-28 23:41:01 -05:00
|
|
|
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
|
2022-06-26 18:52:16 -04:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
2020-12-02 21:08:35 -05:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the suggestion's priority is the same as ours, don't bother.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (suggested->GetPriority() >= best_thread->GetPriority()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* top_on_suggested_core =
|
2020-12-02 21:08:35 -05:00
|
|
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
|
|
|
: nullptr;
|
|
|
|
top_on_suggested_core != suggested) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we're allowed to do a migration, do one.
|
|
|
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
|
|
|
// suggestion to the front of the queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_on_suggested_core == nullptr ||
|
|
|
|
top_on_suggested_core->GetPriority() >=
|
|
|
|
HighestCoreMigrationAllowedPriority) {
|
2022-06-26 18:52:16 -04:00
|
|
|
suggested->SetActiveCore(core_id);
|
2020-12-02 21:08:35 -05:00
|
|
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the next suggestion.
|
2022-06-26 18:52:16 -04:00
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// After a rotation, we need a scheduler update.
|
2020-12-02 21:08:35 -05:00
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
|
2021-01-20 16:42:27 -05:00
|
|
|
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Validate preconditions.
|
2020-12-02 21:08:35 -05:00
|
|
|
ASSERT(CanSchedule(kernel));
|
2023-02-13 10:44:41 -05:00
|
|
|
ASSERT(GetCurrentProcessPointer(kernel) != nullptr);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the current thread and process.
|
2022-06-16 10:35:52 -04:00
|
|
|
KThread& cur_thread = GetCurrentThread(kernel);
|
2023-02-13 10:44:41 -05:00
|
|
|
KProcess& cur_process = GetCurrentProcess(kernel);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the thread's yield count matches, there's nothing for us to do.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Perform the yield.
|
2020-12-02 21:08:35 -05:00
|
|
|
{
|
2022-06-26 18:52:16 -04:00
|
|
|
KScopedSchedulerLock sl{kernel};
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-28 16:16:43 -05:00
|
|
|
const auto cur_state = cur_thread.GetRawState();
|
|
|
|
if (cur_state == ThreadState::Runnable) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Put the current thread at the back of the queue.
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
2020-12-02 21:08:35 -05:00
|
|
|
IncrementScheduledCount(std::addressof(cur_thread));
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the next thread is different, we have an update to perform.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (next_thread != std::addressof(cur_thread)) {
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Otherwise, set the thread's yield count so that we won't waste work until the
|
|
|
|
// process is scheduled again.
|
2020-12-02 21:08:35 -05:00
|
|
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 16:42:27 -05:00
|
|
|
void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Validate preconditions.
|
2020-12-02 21:08:35 -05:00
|
|
|
ASSERT(CanSchedule(kernel));
|
2023-02-13 10:44:41 -05:00
|
|
|
ASSERT(GetCurrentProcessPointer(kernel) != nullptr);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the current thread and process.
|
2022-06-16 10:35:52 -04:00
|
|
|
KThread& cur_thread = GetCurrentThread(kernel);
|
2023-02-13 10:44:41 -05:00
|
|
|
KProcess& cur_process = GetCurrentProcess(kernel);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the thread's yield count matches, there's nothing for us to do.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Perform the yield.
|
2020-12-02 21:08:35 -05:00
|
|
|
{
|
2022-06-26 18:52:16 -04:00
|
|
|
KScopedSchedulerLock sl{kernel};
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-28 16:16:43 -05:00
|
|
|
const auto cur_state = cur_thread.GetRawState();
|
|
|
|
if (cur_state == ThreadState::Runnable) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the current active core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 core_id = cur_thread.GetActiveCore();
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Put the current thread at the back of the queue.
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
2020-12-02 21:08:35 -05:00
|
|
|
IncrementScheduledCount(std::addressof(cur_thread));
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-02 21:08:35 -05:00
|
|
|
bool recheck = false;
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
2020-12-02 21:08:35 -05:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if the suggested thread is the thread running on its core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* running_on_suggested_core =
|
2020-12-02 21:08:35 -05:00
|
|
|
(suggested_core >= 0)
|
2022-06-26 18:52:16 -04:00
|
|
|
? kernel.Scheduler(suggested_core).m_state.highest_priority_thread
|
2020-12-02 21:08:35 -05:00
|
|
|
: nullptr;
|
|
|
|
running_on_suggested_core != suggested) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the current thread's priority is higher than our suggestion's we prefer
|
|
|
|
// the next thread to the suggestion. We also prefer the next thread when the
|
|
|
|
// current thread's priority is equal to the suggestions, but the next thread
|
|
|
|
// has been waiting longer.
|
2020-12-02 21:08:35 -05:00
|
|
|
if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
|
|
|
|
(suggested->GetPriority() == cur_thread.GetPriority() &&
|
|
|
|
next_thread != std::addressof(cur_thread) &&
|
|
|
|
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
|
|
|
|
suggested = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we're allowed to do a migration, do one.
|
|
|
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
|
|
|
// suggestion to the front of the queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (running_on_suggested_core == nullptr ||
|
|
|
|
running_on_suggested_core->GetPriority() >=
|
|
|
|
HighestCoreMigrationAllowedPriority) {
|
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
break;
|
|
|
|
} else {
|
2020-12-05 02:37:35 -05:00
|
|
|
// We couldn't perform a migration, but we should check again on a future
|
|
|
|
// yield.
|
2020-12-02 21:08:35 -05:00
|
|
|
recheck = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the next suggestion.
|
2020-12-02 21:08:35 -05:00
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we still have a suggestion or the next thread is different, we have an update to
|
|
|
|
// perform.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else if (!recheck) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Otherwise if we don't need to re-check, set the thread's yield count so that we
|
|
|
|
// won't waste work until the process is scheduled again.
|
2020-12-02 21:08:35 -05:00
|
|
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 16:42:27 -05:00
|
|
|
void KScheduler::YieldToAnyThread(KernelCore& kernel) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Validate preconditions.
|
2020-12-02 21:08:35 -05:00
|
|
|
ASSERT(CanSchedule(kernel));
|
2023-02-13 10:44:41 -05:00
|
|
|
ASSERT(GetCurrentProcessPointer(kernel) != nullptr);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the current thread and process.
|
2022-06-16 10:35:52 -04:00
|
|
|
KThread& cur_thread = GetCurrentThread(kernel);
|
2023-02-13 10:44:41 -05:00
|
|
|
KProcess& cur_process = GetCurrentProcess(kernel);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the thread's yield count matches, there's nothing for us to do.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get a reference to the priority queue.
|
2020-12-02 21:08:35 -05:00
|
|
|
auto& priority_queue = GetPriorityQueue(kernel);
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Perform the yield.
|
2020-12-02 21:08:35 -05:00
|
|
|
{
|
2022-06-26 18:52:16 -04:00
|
|
|
KScopedSchedulerLock sl{kernel};
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-28 16:16:43 -05:00
|
|
|
const auto cur_state = cur_thread.GetRawState();
|
|
|
|
if (cur_state == ThreadState::Runnable) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the current active core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 core_id = cur_thread.GetActiveCore();
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Migrate the current thread to core -1.
|
2020-12-02 21:08:35 -05:00
|
|
|
cur_thread.SetActiveCore(-1);
|
|
|
|
priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
|
|
|
|
IncrementScheduledCount(std::addressof(cur_thread));
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If there's nothing scheduled, we can try to perform a migration.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// While we have a suggested thread, try to migrate it!
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
2020-12-02 21:08:35 -05:00
|
|
|
while (suggested != nullptr) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Check if the suggested thread is the top thread on its core.
|
2020-12-02 21:08:35 -05:00
|
|
|
const s32 suggested_core = suggested->GetActiveCore();
|
2020-12-31 02:01:08 -05:00
|
|
|
if (KThread* top_on_suggested_core =
|
2020-12-02 21:08:35 -05:00
|
|
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
|
|
|
: nullptr;
|
|
|
|
top_on_suggested_core != suggested) {
|
2020-12-05 02:37:35 -05:00
|
|
|
// If we're allowed to do a migration, do one.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (top_on_suggested_core == nullptr ||
|
|
|
|
top_on_suggested_core->GetPriority() >=
|
|
|
|
HighestCoreMigrationAllowedPriority) {
|
|
|
|
suggested->SetActiveCore(core_id);
|
|
|
|
priority_queue.ChangeCore(suggested_core, suggested);
|
|
|
|
IncrementScheduledCount(suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Regardless of whether we migrated, we had a candidate, so we're done.
|
2020-12-02 21:08:35 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// Get the next suggestion.
|
2020-12-02 21:08:35 -05:00
|
|
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
|
|
|
}
|
|
|
|
|
2020-12-05 02:37:35 -05:00
|
|
|
// If the suggestion is different from the current thread, we need to perform an
|
|
|
|
// update.
|
2020-12-02 21:08:35 -05:00
|
|
|
if (suggested != std::addressof(cur_thread)) {
|
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Otherwise, set the thread's yield count so that we won't waste work until the
|
|
|
|
// process is scheduled again.
|
2020-12-02 21:08:35 -05:00
|
|
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
|
|
|
}
|
|
|
|
} else {
|
2020-12-05 02:37:35 -05:00
|
|
|
// Otherwise, we have an update to perform.
|
2020-12-02 21:08:35 -05:00
|
|
|
SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
|
|
|
|
if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
|
2023-03-07 10:49:41 -05:00
|
|
|
RescheduleCores(m_kernel, core_mask);
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-26 18:52:16 -04:00
|
|
|
void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) {
|
|
|
|
// Send IPI
|
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
if (core_mask & (1ULL << i)) {
|
|
|
|
kernel.PhysicalCore(i).Interrupt();
|
2020-12-02 21:08:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Kernel
|