2020-12-02 21:08:35 -05:00
|
|
|
// Copyright 2020 yuzu Emulator Project
|
2018-02-18 14:58:40 -05:00
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
|
|
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
|
|
|
|
2018-02-18 14:58:40 -05:00
|
|
|
#pragma once
|
|
|
|
|
2019-12-07 22:09:20 -05:00
|
|
|
#include <atomic>
|
|
|
|
|
2018-02-18 14:58:40 -05:00
|
|
|
#include "common/common_types.h"
|
2020-02-24 21:04:12 -05:00
|
|
|
#include "common/spin_lock.h"
|
2020-12-03 19:43:18 -05:00
|
|
|
#include "core/hle/kernel/global_scheduler_context.h"
|
2020-12-02 21:08:35 -05:00
|
|
|
#include "core/hle/kernel/k_priority_queue.h"
|
|
|
|
#include "core/hle/kernel/k_scheduler_lock.h"
|
2020-12-04 01:26:42 -05:00
|
|
|
#include "core/hle/kernel/k_scoped_lock.h"
|
2018-02-18 14:58:40 -05:00
|
|
|
|
2020-03-06 08:52:24 -05:00
|
|
|
namespace Common {
|
2020-03-08 12:51:24 -04:00
|
|
|
class Fiber;
|
2020-03-06 08:52:24 -05:00
|
|
|
}
|
|
|
|
|
2018-08-24 21:43:32 -04:00
|
|
|
namespace Core {
|
2019-03-04 16:02:59 -05:00
|
|
|
class System;
|
2020-12-03 19:43:18 -05:00
|
|
|
}
|
2018-07-31 08:06:09 -04:00
|
|
|
|
2018-02-18 14:58:40 -05:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-02-13 21:04:10 -05:00
|
|
|
class KernelCore;
|
2018-10-25 18:42:50 -04:00
|
|
|
class Process;
|
2020-02-14 10:44:31 -05:00
|
|
|
class SchedulerLock;
|
2020-12-31 02:01:08 -05:00
|
|
|
class KThread;
|
2019-03-29 17:01:17 -04:00
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
class KScheduler final {
|
2019-03-29 17:01:17 -04:00
|
|
|
public:
|
2020-12-02 21:08:35 -05:00
|
|
|
explicit KScheduler(Core::System& system, std::size_t core_id);
|
|
|
|
~KScheduler();
|
2019-03-29 17:01:17 -04:00
|
|
|
|
|
|
|
/// Reschedules to the next available thread (call after current thread is suspended)
|
2020-12-02 21:08:35 -05:00
|
|
|
void RescheduleCurrentCore();
|
|
|
|
|
|
|
|
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
|
|
|
|
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
|
|
|
Core::EmuThreadHandle global_thread);
|
2019-03-29 17:01:17 -04:00
|
|
|
|
2020-03-10 11:50:33 -04:00
|
|
|
/// The next two are for SingleCore Only.
|
|
|
|
/// Unload current thread before preempting core.
|
2020-12-31 02:01:08 -05:00
|
|
|
void Unload(KThread* thread);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-03-10 11:50:33 -04:00
|
|
|
/// Reload current thread after core preemption.
|
2020-12-31 02:01:08 -05:00
|
|
|
void Reload(KThread* thread);
|
2020-03-10 11:50:33 -04:00
|
|
|
|
2019-03-29 17:01:17 -04:00
|
|
|
/// Gets the current running thread
|
2020-12-31 02:01:08 -05:00
|
|
|
[[nodiscard]] KThread* GetCurrentThread() const;
|
2019-03-29 17:01:17 -04:00
|
|
|
|
|
|
|
/// Gets the timestamp for the last context switch in ticks.
|
2020-12-05 03:02:30 -05:00
|
|
|
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
|
2019-03-29 17:01:17 -04:00
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
[[nodiscard]] bool ContextSwitchPending() const {
|
|
|
|
return state.needs_scheduling.load(std::memory_order_relaxed);
|
2019-03-29 17:01:17 -04:00
|
|
|
}
|
2018-02-18 14:58:40 -05:00
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
void Initialize();
|
|
|
|
|
|
|
|
void OnThreadStart();
|
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
[[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
|
2020-03-10 11:50:33 -04:00
|
|
|
return switch_fiber;
|
|
|
|
}
|
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
[[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
|
2020-06-27 18:20:06 -04:00
|
|
|
return switch_fiber;
|
|
|
|
}
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
[[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Takes a thread and moves it to the back of the it's priority list.
|
|
|
|
*
|
|
|
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
|
|
|
*/
|
|
|
|
void YieldWithoutCoreMigration();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Takes a thread and moves it to the back of the it's priority list.
|
|
|
|
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
|
|
|
* a better priority than the next thread in the core.
|
|
|
|
*
|
|
|
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
|
|
|
*/
|
|
|
|
void YieldWithCoreMigration();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Takes a thread and moves it out of the scheduling queue.
|
|
|
|
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
|
|
|
|
* a suggested thread is obtained instead.
|
|
|
|
*
|
|
|
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
|
|
|
*/
|
|
|
|
void YieldToAnyThread();
|
|
|
|
|
|
|
|
/// Notify the scheduler a thread's status has changed.
|
2020-12-31 02:01:08 -05:00
|
|
|
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
|
|
|
/// Notify the scheduler a thread's priority has changed.
|
2020-12-31 02:01:08 -05:00
|
|
|
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
|
|
|
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
2020-12-31 02:01:08 -05:00
|
|
|
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
2020-12-02 21:08:35 -05:00
|
|
|
const KAffinityMask& old_affinity, s32 old_core);
|
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
static bool CanSchedule(KernelCore& kernel);
|
|
|
|
static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
|
|
|
|
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
|
|
|
|
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
|
|
|
|
static void DisableScheduling(KernelCore& kernel);
|
|
|
|
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
|
|
|
Core::EmuThreadHandle global_thread);
|
|
|
|
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
private:
|
2020-12-05 03:02:30 -05:00
|
|
|
friend class GlobalSchedulerContext;
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
/**
|
|
|
|
* Takes care of selecting the new scheduled threads in three steps:
|
|
|
|
*
|
|
|
|
* 1. First a thread is selected from the top of the priority queue. If no thread
|
|
|
|
* is obtained then we move to step two, else we are done.
|
|
|
|
*
|
|
|
|
* 2. Second we try to get a suggested thread that's not assigned to any core or
|
|
|
|
* that is not the top thread in that core.
|
|
|
|
*
|
|
|
|
* 3. Third is no suggested thread is found, we do a second pass and pick a running
|
|
|
|
* thread in another core and swap it with its current thread.
|
|
|
|
*
|
|
|
|
* returns the cores needing scheduling.
|
|
|
|
*/
|
2020-12-05 03:02:30 -05:00
|
|
|
[[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
[[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-12-05 03:02:30 -05:00
|
|
|
void RotateScheduledQueue(s32 core_id, s32 priority);
|
2020-12-02 21:08:35 -05:00
|
|
|
|
|
|
|
void Schedule() {
|
|
|
|
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
|
|
|
this->ScheduleImpl();
|
|
|
|
}
|
2019-10-27 22:01:45 -04:00
|
|
|
|
|
|
|
/// Switches the CPU's active thread context to that of the specified thread
|
2020-12-02 21:08:35 -05:00
|
|
|
void ScheduleImpl();
|
2018-02-18 14:58:40 -05:00
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
/// When a thread wakes up, it must run this through it's new scheduler
|
|
|
|
void SwitchContextStep2();
|
|
|
|
|
2018-10-25 18:42:50 -04:00
|
|
|
/**
|
|
|
|
* Called on every context switch to update the internal timestamp
|
|
|
|
* This also updates the running time ticks for the given thread and
|
|
|
|
* process using the following difference:
|
|
|
|
*
|
|
|
|
* ticks += most_recent_ticks - last_context_switch_ticks
|
|
|
|
*
|
|
|
|
* The internal tick timestamp for the scheduler is simply the
|
|
|
|
* most recent tick count retrieved. No special arithmetic is
|
|
|
|
* applied to it.
|
|
|
|
*/
|
2020-12-31 02:01:08 -05:00
|
|
|
void UpdateLastContextSwitchTime(KThread* thread, Process* process);
|
2018-10-25 18:42:50 -04:00
|
|
|
|
2020-03-06 08:52:24 -05:00
|
|
|
static void OnSwitch(void* this_scheduler);
|
|
|
|
void SwitchToCurrent();
|
|
|
|
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* current_thread{};
|
|
|
|
KThread* idle_thread{};
|
2020-12-02 21:08:35 -05:00
|
|
|
|
|
|
|
std::shared_ptr<Common::Fiber> switch_fiber{};
|
2018-02-18 14:58:40 -05:00
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
struct SchedulingState {
|
|
|
|
std::atomic<bool> needs_scheduling;
|
|
|
|
bool interrupt_task_thread_runnable{};
|
|
|
|
bool should_count_idle{};
|
|
|
|
u64 idle_count{};
|
2020-12-31 02:01:08 -05:00
|
|
|
KThread* highest_priority_thread{};
|
2020-12-02 21:08:35 -05:00
|
|
|
void* idle_thread_stack{};
|
|
|
|
};
|
|
|
|
|
|
|
|
SchedulingState state;
|
2020-03-06 08:52:24 -05:00
|
|
|
|
2019-03-29 17:01:17 -04:00
|
|
|
Core::System& system;
|
2020-12-02 21:08:35 -05:00
|
|
|
u64 last_context_switch_time{};
|
2019-11-12 03:32:53 -05:00
|
|
|
const std::size_t core_id;
|
2018-05-07 22:12:45 -04:00
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
Common::SpinLock guard{};
|
2018-02-18 14:58:40 -05:00
|
|
|
};
|
|
|
|
|
2020-12-04 01:26:42 -05:00
|
|
|
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
2020-02-14 10:44:31 -05:00
|
|
|
public:
|
2020-12-04 01:26:42 -05:00
|
|
|
explicit KScopedSchedulerLock(KernelCore& kernel);
|
|
|
|
~KScopedSchedulerLock();
|
2020-02-14 10:44:31 -05:00
|
|
|
};
|
|
|
|
|
2018-02-18 14:58:40 -05:00
|
|
|
} // namespace Kernel
|