2021-03-20 03:53:00 -04:00
|
|
|
// Copyright 2021 yuzu Emulator Project
|
2014-12-17 00:38:14 -05:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 03:49:13 -05:00
|
|
|
// Refer to the license.txt file included.
|
2014-05-09 22:11:18 -04:00
|
|
|
|
2020-02-29 12:58:50 -05:00
|
|
|
#include <array>
|
2018-08-28 12:30:33 -04:00
|
|
|
#include <atomic>
|
2020-02-22 09:27:40 -05:00
|
|
|
#include <bitset>
|
2020-01-26 09:28:23 -05:00
|
|
|
#include <functional>
|
2018-08-28 12:30:33 -04:00
|
|
|
#include <memory>
|
2020-02-14 08:30:53 -05:00
|
|
|
#include <thread>
|
2020-12-15 03:41:48 -05:00
|
|
|
#include <unordered_set>
|
2018-08-28 12:30:33 -04:00
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/logging/log.h"
|
2020-03-12 16:48:43 -04:00
|
|
|
#include "common/microprofile.h"
|
2020-02-24 21:04:12 -05:00
|
|
|
#include "common/thread.h"
|
2020-12-29 19:39:04 -05:00
|
|
|
#include "common/thread_worker.h"
|
2020-01-25 17:55:32 -05:00
|
|
|
#include "core/arm/arm_interface.h"
|
2020-02-29 12:58:50 -05:00
|
|
|
#include "core/arm/cpu_interrupt_handler.h"
|
2020-01-25 17:55:32 -05:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-08-28 12:30:33 -04:00
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/core_timing.h"
|
2019-09-10 11:04:40 -04:00
|
|
|
#include "core/core_timing_util.h"
|
2020-02-24 21:04:12 -05:00
|
|
|
#include "core/cpu_manager.h"
|
2020-04-08 21:06:37 -04:00
|
|
|
#include "core/device_memory.h"
|
2020-02-22 09:27:40 -05:00
|
|
|
#include "core/hardware_properties.h"
|
2021-04-03 22:11:46 -04:00
|
|
|
#include "core/hle/kernel/init/init_slab_setup.h"
|
2021-04-22 00:43:25 -04:00
|
|
|
#include "core/hle/kernel/k_client_port.h"
|
2021-04-24 05:40:31 -04:00
|
|
|
#include "core/hle/kernel/k_handle_table.h"
|
2021-02-12 19:02:35 -05:00
|
|
|
#include "core/hle/kernel/k_memory_layout.h"
|
2021-02-12 20:38:40 -05:00
|
|
|
#include "core/hle/kernel/k_memory_manager.h"
|
2021-04-24 01:04:28 -04:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2021-01-30 04:40:49 -05:00
|
|
|
#include "core/hle/kernel/k_resource_limit.h"
|
2020-12-02 21:08:35 -05:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2021-02-06 02:14:31 -05:00
|
|
|
#include "core/hle/kernel/k_shared_memory.h"
|
2021-02-12 19:21:12 -05:00
|
|
|
#include "core/hle/kernel/k_slab_heap.h"
|
2020-12-31 02:01:08 -05:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2016-09-21 02:52:38 -04:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2020-01-25 17:55:32 -05:00
|
|
|
#include "core/hle/kernel/physical_core.h"
|
2020-12-15 03:41:48 -05:00
|
|
|
#include "core/hle/kernel/service_thread.h"
|
2020-02-14 09:56:27 -05:00
|
|
|
#include "core/hle/kernel/time_manager.h"
|
2018-08-28 12:30:33 -04:00
|
|
|
#include "core/hle/result.h"
|
2021-05-10 18:57:59 -04:00
|
|
|
#include "core/hle/service/sm/sm.h"
|
2019-04-07 01:10:44 -04:00
|
|
|
#include "core/memory.h"
|
2014-05-09 22:11:18 -04:00
|
|
|
|
2020-03-12 16:48:43 -04:00
|
|
|
MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
|
|
|
|
2014-05-20 18:13:25 -04:00
|
|
|
namespace Kernel {
|
2014-05-09 22:11:18 -04:00
|
|
|
|
2018-08-28 12:30:33 -04:00
|
|
|
struct KernelCore::Impl {
|
2021-05-08 12:11:36 -04:00
|
|
|
explicit Impl(Core::System& system_, KernelCore& kernel_)
|
|
|
|
: time_manager{system_}, object_list_container{kernel_}, system{system_} {}
|
2019-03-05 12:28:10 -05:00
|
|
|
|
2021-05-08 12:11:36 -04:00
|
|
|
void SetMulticore(bool is_multi) {
|
|
|
|
is_multicore = is_multi;
|
2020-03-08 22:39:41 -04:00
|
|
|
}
|
|
|
|
|
2019-03-05 12:28:10 -05:00
|
|
|
void Initialize(KernelCore& kernel) {
|
2021-01-21 16:00:16 -05:00
|
|
|
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
2021-04-24 05:40:31 -04:00
|
|
|
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
2021-06-28 19:38:13 -04:00
|
|
|
global_handle_table->Initialize(KHandleTable::MaxTableSize);
|
2021-01-21 16:00:16 -05:00
|
|
|
|
2020-12-31 05:13:02 -05:00
|
|
|
is_phantom_mode_for_singlecore = false;
|
2020-12-02 21:08:35 -05:00
|
|
|
|
2020-01-30 18:19:21 -05:00
|
|
|
InitializePhysicalCores();
|
2021-04-12 16:14:19 -04:00
|
|
|
|
|
|
|
// Derive the initial memory layout from the emulated board
|
2021-05-05 00:35:42 -04:00
|
|
|
Init::InitializeSlabResourceCounts(kernel);
|
2021-04-12 16:14:19 -04:00
|
|
|
KMemoryLayout memory_layout;
|
|
|
|
DeriveInitialMemoryLayout(memory_layout);
|
2021-04-03 22:11:46 -04:00
|
|
|
Init::InitializeSlabHeaps(system, memory_layout);
|
|
|
|
|
|
|
|
// Initialize kernel memory and resources.
|
2021-04-14 00:40:33 -04:00
|
|
|
InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
|
2021-04-21 00:28:11 -04:00
|
|
|
InitializeMemoryLayout(memory_layout);
|
2021-04-03 22:11:46 -04:00
|
|
|
InitializePageSlab();
|
2020-02-24 21:04:12 -05:00
|
|
|
InitializeSchedulers();
|
|
|
|
InitializeSuspendThreads();
|
2021-03-27 13:04:13 -04:00
|
|
|
InitializePreemption(kernel);
|
2021-04-02 21:02:10 -04:00
|
|
|
|
|
|
|
RegisterHostThread();
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2020-11-13 14:11:12 -05:00
|
|
|
void InitializeCores() {
|
2021-08-07 01:58:46 -04:00
|
|
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
|
|
|
cores[core_id].Initialize(current_process->Is64BitProcess());
|
|
|
|
system.Memory().SetCurrentPageTable(*current_process, core_id);
|
2020-11-13 14:11:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-28 12:30:33 -04:00
|
|
|
void Shutdown() {
|
2020-12-29 19:39:04 -05:00
|
|
|
process_list.clear();
|
|
|
|
|
2021-07-02 18:19:04 -04:00
|
|
|
// Close all open server ports.
|
|
|
|
std::unordered_set<KServerPort*> server_ports_;
|
|
|
|
{
|
|
|
|
std::lock_guard lk(server_ports_lock);
|
|
|
|
server_ports_ = server_ports;
|
|
|
|
server_ports.clear();
|
|
|
|
}
|
|
|
|
for (auto* server_port : server_ports_) {
|
|
|
|
server_port->Close();
|
|
|
|
}
|
|
|
|
// Close all open server sessions.
|
|
|
|
std::unordered_set<KServerSession*> server_sessions_;
|
|
|
|
{
|
|
|
|
std::lock_guard lk(server_sessions_lock);
|
|
|
|
server_sessions_ = server_sessions;
|
|
|
|
server_sessions.clear();
|
|
|
|
}
|
|
|
|
for (auto* server_session : server_sessions_) {
|
|
|
|
server_session->Close();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the object list container is finalized and properly shutdown.
|
|
|
|
object_list_container.Finalize();
|
|
|
|
|
|
|
|
// Ensures all service threads gracefully shutdown.
|
2020-12-29 19:39:04 -05:00
|
|
|
service_threads.clear();
|
|
|
|
|
2018-08-28 12:30:33 -04:00
|
|
|
next_object_id = 0;
|
2021-04-24 01:04:28 -04:00
|
|
|
next_kernel_process_id = KProcess::InitialKIPIDMin;
|
|
|
|
next_user_process_id = KProcess::ProcessIDMin;
|
2018-08-28 12:30:33 -04:00
|
|
|
next_thread_id = 1;
|
|
|
|
|
2020-03-11 20:44:53 -04:00
|
|
|
cores.clear();
|
|
|
|
|
2021-07-02 00:44:50 -04:00
|
|
|
global_handle_table->Finalize();
|
2021-04-24 05:40:31 -04:00
|
|
|
global_handle_table.reset();
|
2018-08-28 12:30:33 -04:00
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
preemption_event = nullptr;
|
2019-10-12 08:21:51 -04:00
|
|
|
|
2021-04-22 00:43:25 -04:00
|
|
|
for (auto& iter : named_ports) {
|
|
|
|
iter.second->Close();
|
|
|
|
}
|
2018-09-02 11:58:58 -04:00
|
|
|
named_ports.clear();
|
2020-01-25 17:55:32 -05:00
|
|
|
|
2020-01-30 18:19:21 -05:00
|
|
|
exclusive_monitor.reset();
|
2020-10-13 17:00:25 -04:00
|
|
|
|
2021-04-23 20:00:46 -04:00
|
|
|
// Cleanup persistent kernel objects
|
|
|
|
auto CleanupObject = [](KAutoObject* obj) {
|
|
|
|
if (obj) {
|
|
|
|
obj->Close();
|
|
|
|
obj = nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
CleanupObject(hid_shared_mem);
|
|
|
|
CleanupObject(font_shared_mem);
|
|
|
|
CleanupObject(irs_shared_mem);
|
|
|
|
CleanupObject(time_shared_mem);
|
|
|
|
CleanupObject(system_resource_limit);
|
2021-02-20 20:51:11 -05:00
|
|
|
|
2021-08-07 01:58:46 -04:00
|
|
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
|
|
|
if (suspend_threads[core_id]) {
|
|
|
|
suspend_threads[core_id]->Close();
|
|
|
|
suspend_threads[core_id] = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
schedulers[core_id]->Finalize();
|
|
|
|
schedulers[core_id].reset();
|
|
|
|
}
|
|
|
|
|
2020-12-29 18:55:30 -05:00
|
|
|
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
|
|
|
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
2021-06-28 17:38:14 -04:00
|
|
|
|
2021-10-25 06:55:20 -04:00
|
|
|
// Close kernel objects that were not freed on shutdown
|
|
|
|
{
|
|
|
|
std::lock_guard lk(registered_in_use_objects_lock);
|
|
|
|
if (registered_in_use_objects.size()) {
|
2021-10-26 00:43:27 -04:00
|
|
|
for (auto& object : registered_in_use_objects) {
|
|
|
|
object->Close();
|
2021-10-25 06:55:20 -04:00
|
|
|
}
|
|
|
|
registered_in_use_objects.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-26 06:12:13 -04:00
|
|
|
// Shutdown all processes.
|
|
|
|
if (current_process) {
|
|
|
|
current_process->Finalize();
|
|
|
|
current_process->Close();
|
|
|
|
current_process = nullptr;
|
|
|
|
}
|
|
|
|
|
2021-06-28 17:38:14 -04:00
|
|
|
// Track kernel objects that were not freed on shutdown
|
2021-07-02 18:19:04 -04:00
|
|
|
{
|
|
|
|
std::lock_guard lk(registered_objects_lock);
|
|
|
|
if (registered_objects.size()) {
|
|
|
|
LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!",
|
|
|
|
registered_objects.size());
|
|
|
|
registered_objects.clear();
|
|
|
|
}
|
2021-06-28 17:38:14 -04:00
|
|
|
}
|
2020-01-25 17:55:32 -05:00
|
|
|
}
|
|
|
|
|
2020-01-30 18:19:21 -05:00
|
|
|
void InitializePhysicalCores() {
|
2020-01-26 15:14:18 -05:00
|
|
|
exclusive_monitor =
|
2020-02-14 08:30:53 -05:00
|
|
|
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
|
2021-01-20 21:10:07 -05:00
|
|
|
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
2020-12-02 21:08:35 -05:00
|
|
|
schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
|
2020-11-13 14:11:12 -05:00
|
|
|
cores.emplace_back(i, system, *schedulers[i], interrupts);
|
2020-01-25 17:55:32 -05:00
|
|
|
}
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
void InitializeSchedulers() {
|
2021-01-20 21:10:07 -05:00
|
|
|
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
2020-02-24 21:04:12 -05:00
|
|
|
cores[i].Scheduler().Initialize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-19 12:54:06 -05:00
|
|
|
// Creates the default system resource limit
|
2021-04-14 00:40:33 -04:00
|
|
|
void InitializeSystemResourceLimit(KernelCore& kernel,
|
|
|
|
const Core::Timing::CoreTiming& core_timing,
|
2021-04-12 16:14:19 -04:00
|
|
|
const KMemoryLayout& memory_layout) {
|
2021-04-21 00:28:11 -04:00
|
|
|
system_resource_limit = KResourceLimit::Create(system.Kernel());
|
|
|
|
system_resource_limit->Initialize(&core_timing);
|
|
|
|
|
2021-04-12 16:14:19 -04:00
|
|
|
const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
|
2018-11-19 12:54:06 -05:00
|
|
|
|
|
|
|
// If setting the default system values fails, then something seriously wrong has occurred.
|
2021-04-12 16:14:19 -04:00
|
|
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
|
2021-01-30 04:40:49 -05:00
|
|
|
.IsSuccess());
|
2021-02-02 20:55:16 -05:00
|
|
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
|
2021-04-07 01:01:05 -04:00
|
|
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess());
|
2021-02-02 20:55:16 -05:00
|
|
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
|
2018-11-19 12:54:06 -05:00
|
|
|
.IsSuccess());
|
2021-04-07 01:01:05 -04:00
|
|
|
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess());
|
2021-04-12 16:14:19 -04:00
|
|
|
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size);
|
2020-04-08 21:07:30 -04:00
|
|
|
|
2021-02-12 19:05:24 -05:00
|
|
|
// Reserve secure applet memory, introduced in firmware 5.0.0
|
2021-06-23 17:18:27 -04:00
|
|
|
constexpr u64 secure_applet_memory_size{4_MiB};
|
2021-02-12 19:05:24 -05:00
|
|
|
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
|
|
|
|
secure_applet_memory_size));
|
2021-04-12 16:14:19 -04:00
|
|
|
|
|
|
|
// This memory seems to be reserved on hardware, but is not reserved/used by yuzu.
|
|
|
|
// Likely Horizon OS reserved memory
|
|
|
|
// TODO(ameerj): Derive the memory rather than hardcode it.
|
|
|
|
constexpr u64 unknown_reserved_memory{0x2f896000};
|
|
|
|
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
|
|
|
|
unknown_reserved_memory));
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
void InitializePreemption(KernelCore& kernel) {
|
|
|
|
preemption_event = Core::Timing::CreateEvent(
|
2020-07-27 19:00:41 -04:00
|
|
|
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
|
2020-02-24 21:04:12 -05:00
|
|
|
{
|
2020-12-04 01:26:42 -05:00
|
|
|
KScopedSchedulerLock lock(kernel);
|
2020-12-02 21:08:35 -05:00
|
|
|
global_scheduler_context->PreemptThreads();
|
2020-02-24 21:04:12 -05:00
|
|
|
}
|
2020-07-15 18:30:06 -04:00
|
|
|
const auto time_interval = std::chrono::nanoseconds{
|
|
|
|
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
2019-09-10 11:04:40 -04:00
|
|
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
|
|
|
});
|
|
|
|
|
2020-07-15 18:30:06 -04:00
|
|
|
const auto time_interval =
|
|
|
|
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
2019-09-10 11:04:40 -04:00
|
|
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
|
|
|
}
|
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
void InitializeSuspendThreads() {
|
2021-05-02 02:24:51 -04:00
|
|
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
2021-04-10 01:42:23 -04:00
|
|
|
suspend_threads[core_id] = KThread::Create(system.Kernel());
|
|
|
|
ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
|
|
|
|
core_id)
|
2021-04-03 22:11:46 -04:00
|
|
|
.IsSuccess());
|
|
|
|
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
|
2020-02-24 21:04:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-24 01:04:28 -04:00
|
|
|
void MakeCurrentProcess(KProcess* process) {
|
2019-11-26 18:34:30 -05:00
|
|
|
current_process = process;
|
|
|
|
}
|
|
|
|
|
2021-08-15 16:30:56 -04:00
|
|
|
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
|
|
|
|
|
|
|
/// Gets the host thread ID for the caller, allocating a new one if this is the first time
|
|
|
|
u32 GetHostThreadId(std::size_t core_id) {
|
|
|
|
if (host_thread_id == UINT32_MAX) {
|
|
|
|
// The first four slots are reserved for CPU core threads
|
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
host_thread_id = static_cast<u32>(core_id);
|
2020-12-29 18:55:30 -05:00
|
|
|
}
|
2021-08-15 16:30:56 -04:00
|
|
|
return host_thread_id;
|
2020-12-29 18:55:30 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the host thread ID for the caller, allocating a new one if this is the first time
|
2021-08-15 16:30:56 -04:00
|
|
|
u32 GetHostThreadId() {
|
|
|
|
if (host_thread_id == UINT32_MAX) {
|
|
|
|
host_thread_id = next_host_thread_id++;
|
|
|
|
}
|
2020-12-29 18:55:30 -05:00
|
|
|
return host_thread_id;
|
|
|
|
}
|
|
|
|
|
2021-01-21 16:00:16 -05:00
|
|
|
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
|
|
|
|
KThread* GetHostDummyThread() {
|
2021-04-03 22:11:46 -04:00
|
|
|
auto make_thread = [this]() {
|
2021-11-17 18:00:55 -05:00
|
|
|
std::lock_guard lk(dummy_thread_lock);
|
|
|
|
auto& thread = dummy_threads.emplace_back(std::make_unique<KThread>(system.Kernel()));
|
2021-04-09 19:56:11 -04:00
|
|
|
KAutoObject::Create(thread.get());
|
2021-04-08 21:58:38 -04:00
|
|
|
ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
|
2021-04-03 22:11:46 -04:00
|
|
|
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
|
2021-11-17 18:00:55 -05:00
|
|
|
return thread.get();
|
2021-04-03 22:11:46 -04:00
|
|
|
};
|
|
|
|
|
2021-11-17 18:00:55 -05:00
|
|
|
thread_local KThread* saved_thread = make_thread();
|
|
|
|
return saved_thread;
|
2021-01-21 16:00:16 -05:00
|
|
|
}
|
|
|
|
|
2020-12-29 18:55:30 -05:00
|
|
|
/// Registers a CPU core thread by allocating a host thread ID for it
|
2020-02-14 08:30:53 -05:00
|
|
|
void RegisterCoreThread(std::size_t core_id) {
|
2020-12-29 18:55:30 -05:00
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
const auto this_id = GetHostThreadId(core_id);
|
2020-03-08 22:39:41 -04:00
|
|
|
if (!is_multicore) {
|
2020-10-13 17:00:25 -04:00
|
|
|
single_core_thread_id = this_id;
|
2020-03-08 22:39:41 -04:00
|
|
|
}
|
2020-02-14 08:30:53 -05:00
|
|
|
}
|
|
|
|
|
2020-12-29 18:55:30 -05:00
|
|
|
/// Registers a new host thread by allocating a host thread ID for it
|
2020-02-14 08:30:53 -05:00
|
|
|
void RegisterHostThread() {
|
2020-12-29 18:55:30 -05:00
|
|
|
[[maybe_unused]] const auto this_id = GetHostThreadId();
|
2021-01-21 16:00:16 -05:00
|
|
|
[[maybe_unused]] const auto dummy_thread = GetHostDummyThread();
|
2020-10-13 17:00:25 -04:00
|
|
|
}
|
|
|
|
|
2020-12-29 18:55:30 -05:00
|
|
|
[[nodiscard]] u32 GetCurrentHostThreadID() {
|
|
|
|
const auto this_id = GetHostThreadId();
|
2020-10-13 17:00:25 -04:00
|
|
|
if (!is_multicore && single_core_thread_id == this_id) {
|
|
|
|
return static_cast<u32>(system.GetCpuManager().CurrentCore());
|
2020-03-08 22:39:41 -04:00
|
|
|
}
|
2020-12-29 18:55:30 -05:00
|
|
|
return this_id;
|
2020-02-14 08:30:53 -05:00
|
|
|
}
|
|
|
|
|
2020-12-31 05:13:02 -05:00
|
|
|
bool IsPhantomModeForSingleCore() const {
|
|
|
|
return is_phantom_mode_for_singlecore;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetIsPhantomModeForSingleCore(bool value) {
|
|
|
|
ASSERT(!is_multicore);
|
|
|
|
is_phantom_mode_for_singlecore = value;
|
|
|
|
}
|
|
|
|
|
2021-01-21 16:00:16 -05:00
|
|
|
KThread* GetCurrentEmuThread() {
|
2021-01-20 00:07:07 -05:00
|
|
|
const auto thread_id = GetCurrentHostThreadID();
|
|
|
|
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
2021-01-21 16:00:16 -05:00
|
|
|
return GetHostDummyThread();
|
2020-02-14 08:30:53 -05:00
|
|
|
}
|
2021-01-21 16:00:16 -05:00
|
|
|
return schedulers[thread_id]->GetCurrentThread();
|
2020-02-14 08:30:53 -05:00
|
|
|
}
|
|
|
|
|
2021-03-23 21:47:16 -04:00
|
|
|
void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
|
2021-03-20 03:53:00 -04:00
|
|
|
// Insert the root region for the virtual memory tree, from which all other regions will
|
|
|
|
// derive.
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
|
|
|
|
KernelVirtualAddressSpaceBase,
|
|
|
|
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
|
|
|
|
|
|
|
|
// Insert the root region for the physical memory tree, from which all other regions will
|
|
|
|
// derive.
|
|
|
|
memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
|
|
|
|
KernelPhysicalAddressSpaceBase,
|
|
|
|
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
|
|
|
|
|
|
|
|
// Save start and end for ease of use.
|
|
|
|
const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
|
|
|
|
const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
|
|
|
|
|
|
|
|
// Setup the containing kernel region.
|
2021-06-23 17:18:27 -04:00
|
|
|
constexpr size_t KernelRegionSize = 1_GiB;
|
|
|
|
constexpr size_t KernelRegionAlign = 1_GiB;
|
2021-03-20 03:53:00 -04:00
|
|
|
constexpr VAddr kernel_region_start =
|
|
|
|
Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
|
|
|
|
size_t kernel_region_size = KernelRegionSize;
|
|
|
|
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
|
|
|
|
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
|
|
|
|
}
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
|
|
|
|
|
|
|
|
// Setup the code region.
|
|
|
|
constexpr size_t CodeRegionAlign = PageSize;
|
|
|
|
constexpr VAddr code_region_start =
|
|
|
|
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
|
|
|
|
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
|
|
|
|
constexpr size_t code_region_size = code_region_end - code_region_start;
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
|
|
|
|
|
|
|
|
// Setup board-specific device physical regions.
|
|
|
|
Init::SetupDevicePhysicalMemoryRegions(memory_layout);
|
|
|
|
|
|
|
|
// Determine the amount of space needed for the misc region.
|
|
|
|
size_t misc_region_needed_size;
|
|
|
|
{
|
|
|
|
// Each core has a one page stack for all three stack types (Main, Idle, Exception).
|
|
|
|
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
|
|
|
|
|
|
|
|
// Account for each auto-map device.
|
|
|
|
for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
|
|
|
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
|
|
|
// Check that the region is valid.
|
|
|
|
ASSERT(region.GetEndAddress() != 0);
|
|
|
|
|
|
|
|
// Account for the region.
|
|
|
|
misc_region_needed_size +=
|
|
|
|
PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) -
|
|
|
|
Common::AlignDown(region.GetAddress(), PageSize));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Multiply the needed size by three, to account for the need for guard space.
|
|
|
|
misc_region_needed_size *= 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decide on the actual size for the misc region.
|
|
|
|
constexpr size_t MiscRegionAlign = KernelAslrAlignment;
|
2021-06-23 17:18:27 -04:00
|
|
|
constexpr size_t MiscRegionMinimumSize = 32_MiB;
|
2021-03-20 03:53:00 -04:00
|
|
|
const size_t misc_region_size = Common::AlignUp(
|
|
|
|
std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign);
|
|
|
|
ASSERT(misc_region_size > 0);
|
|
|
|
|
|
|
|
// Setup the misc region.
|
|
|
|
const VAddr misc_region_start =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
|
|
|
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
|
|
|
|
|
|
|
|
// Setup the stack region.
|
2021-06-23 17:18:27 -04:00
|
|
|
constexpr size_t StackRegionSize = 14_MiB;
|
2021-03-20 03:53:00 -04:00
|
|
|
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
|
|
|
const VAddr stack_region_start =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
|
|
|
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
|
|
|
|
|
|
|
|
// Determine the size of the resource region.
|
|
|
|
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
|
|
|
|
|
|
|
|
// Determine the size of the slab region.
|
2021-04-03 22:11:46 -04:00
|
|
|
const size_t slab_region_size =
|
2021-05-05 00:35:42 -04:00
|
|
|
Common::AlignUp(Init::CalculateTotalSlabHeapSize(system.Kernel()), PageSize);
|
2021-03-20 03:53:00 -04:00
|
|
|
ASSERT(slab_region_size <= resource_region_size);
|
|
|
|
|
|
|
|
// Setup the slab region.
|
|
|
|
const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
|
|
|
|
const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
|
|
|
|
const PAddr slab_start_phys_addr = code_end_phys_addr;
|
|
|
|
const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
|
|
|
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
|
|
|
const size_t slab_region_needed_size =
|
|
|
|
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
|
|
|
|
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
|
|
|
|
const VAddr slab_region_start =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
|
|
|
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
|
|
|
|
(code_end_phys_addr % SlabRegionAlign);
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
|
|
|
|
|
|
|
|
// Setup the temp region.
|
2021-06-23 17:18:27 -04:00
|
|
|
constexpr size_t TempRegionSize = 128_MiB;
|
2021-03-20 03:53:00 -04:00
|
|
|
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
|
|
|
const VAddr temp_region_start =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
|
|
|
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
|
|
|
|
KMemoryRegionType_KernelTemp));
|
|
|
|
|
|
|
|
// Automatically map in devices that have auto-map attributes.
|
|
|
|
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
|
|
|
// We only care about kernel regions.
|
|
|
|
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether we should map the region.
|
|
|
|
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this region has already been mapped, no need to consider it.
|
|
|
|
if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the region is valid.
|
|
|
|
ASSERT(region.GetEndAddress() != 0);
|
|
|
|
|
|
|
|
// Set the attribute to note we've mapped this region.
|
|
|
|
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
|
|
|
|
|
|
|
// Create a virtual pair region and insert it into the tree.
|
|
|
|
const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
|
|
|
|
const size_t map_size =
|
|
|
|
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
|
|
|
|
const VAddr map_virt_addr =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
|
|
|
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
|
|
|
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Init::SetupDramPhysicalMemoryRegions(memory_layout);
|
|
|
|
|
|
|
|
// Insert a physical region for the kernel code region.
|
|
|
|
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
|
|
|
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
|
|
|
|
|
|
|
|
// Insert a physical region for the kernel slab region.
|
|
|
|
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
|
|
|
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
|
|
|
|
|
|
|
|
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
|
|
|
const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
|
|
|
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
|
2021-06-23 17:18:27 -04:00
|
|
|
ASSERT(page_table_heap_size / 4_MiB > 2);
|
2021-03-20 03:53:00 -04:00
|
|
|
|
|
|
|
// Insert a physical region for the kernel page table heap region
|
|
|
|
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
|
|
|
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
|
|
|
|
|
|
|
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
|
|
|
// mapping. Tag them.
|
|
|
|
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
|
|
|
if (region.GetType() == KMemoryRegionType_Dram) {
|
|
|
|
// Check that the region is valid.
|
|
|
|
ASSERT(region.GetEndAddress() != 0);
|
|
|
|
|
|
|
|
// Set the linear map attribute.
|
|
|
|
region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the linear region extents.
|
|
|
|
const auto linear_extents =
|
|
|
|
memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
|
|
|
KMemoryRegionAttr_LinearMapped);
|
|
|
|
ASSERT(linear_extents.GetEndAddress() != 0);
|
|
|
|
|
|
|
|
// Setup the linear mapping region.
|
2021-06-23 17:18:27 -04:00
|
|
|
constexpr size_t LinearRegionAlign = 1_GiB;
|
2021-03-20 03:53:00 -04:00
|
|
|
const PAddr aligned_linear_phys_start =
|
|
|
|
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
|
|
|
|
const size_t linear_region_size =
|
|
|
|
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
|
|
|
|
aligned_linear_phys_start;
|
|
|
|
const VAddr linear_region_start =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
|
|
|
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
|
|
|
|
|
|
|
|
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
|
|
|
|
|
|
|
|
// Map and create regions for all the linearly-mapped data.
|
|
|
|
{
|
|
|
|
PAddr cur_phys_addr = 0;
|
|
|
|
u64 cur_size = 0;
|
|
|
|
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
|
|
|
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(region.GetEndAddress() != 0);
|
|
|
|
|
|
|
|
if (cur_size == 0) {
|
|
|
|
cur_phys_addr = region.GetAddress();
|
|
|
|
cur_size = region.GetSize();
|
|
|
|
} else if (cur_phys_addr + cur_size == region.GetAddress()) {
|
|
|
|
cur_size += region.GetSize();
|
|
|
|
} else {
|
|
|
|
cur_phys_addr = region.GetAddress();
|
|
|
|
cur_size = region.GetSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
const VAddr region_virt_addr =
|
|
|
|
region.GetAddress() + linear_region_phys_to_virt_diff;
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
region_virt_addr, region.GetSize(),
|
|
|
|
GetTypeForVirtualLinearMapping(region.GetType())));
|
|
|
|
region.SetPairAddress(region_virt_addr);
|
|
|
|
|
|
|
|
KMemoryRegion* virt_region =
|
|
|
|
memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
|
|
|
ASSERT(virt_region != nullptr);
|
|
|
|
virt_region->SetPairAddress(region.GetAddress());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert regions for the initial page table region.
|
|
|
|
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
|
|
|
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
|
|
|
|
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
|
|
|
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
|
|
|
|
KMemoryRegionType_VirtualDramKernelInitPt));
|
|
|
|
|
|
|
|
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
|
|
|
|
// some pool partition. Tag them.
|
|
|
|
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
|
|
|
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
|
|
|
|
region.SetType(KMemoryRegionType_DramPoolPartition);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup all other memory regions needed to arrange the pool partitions.
|
|
|
|
Init::SetupPoolPartitionMemoryRegions(memory_layout);
|
|
|
|
|
|
|
|
// Cache all linear regions in their own trees for faster access, later.
|
|
|
|
memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
|
|
|
|
linear_region_start);
|
2021-03-23 21:47:16 -04:00
|
|
|
}
|
|
|
|
|
2021-04-12 16:14:19 -04:00
|
|
|
void InitializeMemoryLayout(const KMemoryLayout& memory_layout) {
|
2021-03-20 03:53:00 -04:00
|
|
|
const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
|
|
|
|
const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
|
|
|
|
const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
|
|
|
|
|
|
|
|
// Initialize memory managers
|
|
|
|
memory_manager = std::make_unique<KMemoryManager>();
|
|
|
|
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
|
|
|
application_pool.GetAddress(),
|
|
|
|
application_pool.GetEndAddress());
|
|
|
|
memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
|
|
|
|
applet_pool.GetEndAddress());
|
|
|
|
memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
|
|
|
|
system_pool.GetEndAddress());
|
|
|
|
|
|
|
|
// Setup memory regions for emulated processes
|
|
|
|
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
|
2020-04-08 21:06:37 -04:00
|
|
|
constexpr std::size_t hid_size{0x40000};
|
|
|
|
constexpr std::size_t font_size{0x1100000};
|
|
|
|
constexpr std::size_t irs_size{0x8000};
|
|
|
|
constexpr std::size_t time_size{0x1000};
|
|
|
|
|
2021-03-20 03:53:00 -04:00
|
|
|
const PAddr hid_phys_addr{system_pool.GetAddress()};
|
|
|
|
const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
|
|
|
|
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
|
|
|
|
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
|
2020-04-08 21:06:37 -04:00
|
|
|
|
2021-04-10 01:42:23 -04:00
|
|
|
hid_shared_mem = KSharedMemory::Create(system.Kernel());
|
|
|
|
font_shared_mem = KSharedMemory::Create(system.Kernel());
|
|
|
|
irs_shared_mem = KSharedMemory::Create(system.Kernel());
|
|
|
|
time_shared_mem = KSharedMemory::Create(system.Kernel());
|
2021-04-09 19:56:11 -04:00
|
|
|
|
2021-05-08 12:11:36 -04:00
|
|
|
hid_shared_mem->Initialize(system.DeviceMemory(), nullptr,
|
2021-04-30 17:53:22 -04:00
|
|
|
{hid_phys_addr, hid_size / PageSize},
|
|
|
|
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
|
|
|
|
hid_phys_addr, hid_size, "HID:SharedMemory");
|
2021-05-08 12:11:36 -04:00
|
|
|
font_shared_mem->Initialize(system.DeviceMemory(), nullptr,
|
2021-04-30 17:53:22 -04:00
|
|
|
{font_phys_addr, font_size / PageSize},
|
|
|
|
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
|
|
|
|
font_phys_addr, font_size, "Font:SharedMemory");
|
2021-05-08 12:11:36 -04:00
|
|
|
irs_shared_mem->Initialize(system.DeviceMemory(), nullptr,
|
2021-04-30 17:53:22 -04:00
|
|
|
{irs_phys_addr, irs_size / PageSize},
|
|
|
|
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
|
|
|
|
irs_phys_addr, irs_size, "IRS:SharedMemory");
|
2021-05-08 12:11:36 -04:00
|
|
|
time_shared_mem->Initialize(system.DeviceMemory(), nullptr,
|
2021-04-30 17:53:22 -04:00
|
|
|
{time_phys_addr, time_size / PageSize},
|
|
|
|
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
|
|
|
|
time_phys_addr, time_size, "Time:SharedMemory");
|
2021-04-12 16:14:19 -04:00
|
|
|
}
|
2020-04-08 21:06:37 -04:00
|
|
|
|
2021-04-03 22:11:46 -04:00
|
|
|
void InitializePageSlab() {
|
2020-04-08 21:06:37 -04:00
|
|
|
// Allocate slab heaps
|
2021-05-27 20:15:23 -04:00
|
|
|
user_slab_heap_pages =
|
|
|
|
std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
|
2020-04-08 21:06:37 -04:00
|
|
|
|
2021-04-12 16:14:19 -04:00
|
|
|
// TODO(ameerj): This should be derived, not hardcoded within the kernel
|
|
|
|
constexpr u64 user_slab_heap_size{0x3de000};
|
2021-02-12 19:05:24 -05:00
|
|
|
// Reserve slab heaps
|
|
|
|
ASSERT(
|
|
|
|
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
|
2021-04-03 22:11:46 -04:00
|
|
|
// Initialize slab heap
|
2020-04-08 21:06:37 -04:00
|
|
|
user_slab_heap_pages->Initialize(
|
|
|
|
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
|
|
|
user_slab_heap_size);
|
|
|
|
}
|
|
|
|
|
2021-07-02 18:19:04 -04:00
|
|
|
KClientPort* CreateNamedServicePort(std::string name) {
|
|
|
|
auto search = service_interface_factory.find(name);
|
|
|
|
if (search == service_interface_factory.end()) {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
KClientPort* port = &search->second(system.ServiceManager(), system);
|
|
|
|
{
|
|
|
|
std::lock_guard lk(server_ports_lock);
|
|
|
|
server_ports.insert(&port->GetParent()->GetServerPort());
|
|
|
|
}
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2021-11-17 18:00:55 -05:00
|
|
|
std::mutex server_ports_lock;
|
|
|
|
std::mutex server_sessions_lock;
|
|
|
|
std::mutex registered_objects_lock;
|
|
|
|
std::mutex registered_in_use_objects_lock;
|
|
|
|
std::mutex dummy_thread_lock;
|
|
|
|
|
2018-08-28 12:30:33 -04:00
|
|
|
std::atomic<u32> next_object_id{0};
|
2021-04-24 01:04:28 -04:00
|
|
|
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
|
|
|
|
std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
|
2018-12-18 22:37:01 -05:00
|
|
|
std::atomic<u64> next_thread_id{1};
|
2018-08-28 12:30:33 -04:00
|
|
|
|
|
|
|
// Lists all processes that exist in the current session.
|
2021-04-24 01:04:28 -04:00
|
|
|
std::vector<KProcess*> process_list;
|
|
|
|
KProcess* current_process{};
|
2020-12-02 21:08:35 -05:00
|
|
|
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
|
2020-02-14 09:56:27 -05:00
|
|
|
Kernel::TimeManager time_manager;
|
2018-08-28 12:30:33 -04:00
|
|
|
|
2021-05-05 00:35:42 -04:00
|
|
|
Init::KSlabResourceCounts slab_resource_counts{};
|
2021-04-21 00:28:11 -04:00
|
|
|
KResourceLimit* system_resource_limit{};
|
2018-08-28 12:30:33 -04:00
|
|
|
|
2019-11-26 21:48:56 -05:00
|
|
|
std::shared_ptr<Core::Timing::EventType> preemption_event;
|
|
|
|
|
2020-02-22 09:27:40 -05:00
|
|
|
// This is the kernel's handle table or supervisor handle table which
|
|
|
|
// stores all the objects in place.
|
2021-04-24 05:40:31 -04:00
|
|
|
std::unique_ptr<KHandleTable> global_handle_table;
|
2018-09-02 11:58:58 -04:00
|
|
|
|
2021-04-03 22:11:46 -04:00
|
|
|
KAutoObjectWithListContainer object_list_container;
|
|
|
|
|
2018-09-02 11:58:58 -04:00
|
|
|
/// Map of named ports managed by the kernel, which can be retrieved using
|
|
|
|
/// the ConnectToPort SVC.
|
2021-05-10 18:57:59 -04:00
|
|
|
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
|
2018-09-02 11:58:58 -04:00
|
|
|
NamedPortTable named_ports;
|
2021-07-02 18:19:04 -04:00
|
|
|
std::unordered_set<KServerPort*> server_ports;
|
|
|
|
std::unordered_set<KServerSession*> server_sessions;
|
2021-06-28 17:38:14 -04:00
|
|
|
std::unordered_set<KAutoObject*> registered_objects;
|
2021-10-25 06:55:20 -04:00
|
|
|
std::unordered_set<KAutoObject*> registered_in_use_objects;
|
2019-03-05 12:28:10 -05:00
|
|
|
|
2020-01-25 17:55:32 -05:00
|
|
|
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
|
|
|
|
std::vector<Kernel::PhysicalCore> cores;
|
|
|
|
|
2020-12-29 18:55:30 -05:00
|
|
|
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
|
|
|
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
|
2020-02-14 08:30:53 -05:00
|
|
|
|
2020-04-08 21:06:37 -04:00
|
|
|
// Kernel memory management
|
2021-02-12 20:38:40 -05:00
|
|
|
std::unique_ptr<KMemoryManager> memory_manager;
|
2021-02-12 20:02:51 -05:00
|
|
|
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
2020-04-08 21:06:37 -04:00
|
|
|
|
|
|
|
// Shared memory for services
|
2021-04-10 01:42:23 -04:00
|
|
|
Kernel::KSharedMemory* hid_shared_mem{};
|
|
|
|
Kernel::KSharedMemory* font_shared_mem{};
|
|
|
|
Kernel::KSharedMemory* irs_shared_mem{};
|
|
|
|
Kernel::KSharedMemory* time_shared_mem{};
|
2020-04-08 21:06:37 -04:00
|
|
|
|
2020-12-15 03:41:48 -05:00
|
|
|
// Threads used for services
|
|
|
|
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
|
|
|
|
2021-04-10 01:42:23 -04:00
|
|
|
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
|
2020-02-29 12:58:50 -05:00
|
|
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
2020-12-02 21:08:35 -05:00
|
|
|
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
2020-02-24 21:04:12 -05:00
|
|
|
|
2021-11-17 18:00:55 -05:00
|
|
|
// Specifically tracked to be automatically destroyed with kernel
|
|
|
|
std::vector<std::unique_ptr<KThread>> dummy_threads;
|
|
|
|
|
2020-03-08 22:39:41 -04:00
|
|
|
bool is_multicore{};
|
2020-12-31 05:13:02 -05:00
|
|
|
bool is_phantom_mode_for_singlecore{};
|
2020-12-29 18:55:30 -05:00
|
|
|
u32 single_core_thread_id{};
|
2020-03-08 22:39:41 -04:00
|
|
|
|
2020-03-12 16:48:43 -04:00
|
|
|
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
|
|
|
|
|
2019-03-05 12:28:10 -05:00
|
|
|
// System context
|
|
|
|
Core::System& system;
|
2018-08-28 12:30:33 -04:00
|
|
|
};
|
|
|
|
|
2020-02-13 21:04:10 -05:00
|
|
|
KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
|
2021-04-23 20:04:33 -04:00
|
|
|
KernelCore::~KernelCore() = default;
|
2018-08-28 12:30:33 -04:00
|
|
|
|
2020-03-08 22:39:41 -04:00
|
|
|
void KernelCore::SetMulticore(bool is_multicore) {
|
|
|
|
impl->SetMulticore(is_multicore);
|
|
|
|
}
|
|
|
|
|
2019-03-05 12:28:10 -05:00
|
|
|
void KernelCore::Initialize() {
|
2021-04-10 02:16:13 -04:00
|
|
|
slab_heap_container = std::make_unique<SlabHeapContainer>();
|
2019-03-05 12:28:10 -05:00
|
|
|
impl->Initialize(*this);
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2020-11-13 14:11:12 -05:00
|
|
|
void KernelCore::InitializeCores() {
|
|
|
|
impl->InitializeCores();
|
|
|
|
}
|
|
|
|
|
2018-08-28 12:30:33 -04:00
|
|
|
void KernelCore::Shutdown() {
|
|
|
|
impl->Shutdown();
|
|
|
|
}
|
|
|
|
|
2021-04-21 00:28:11 -04:00
|
|
|
const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
|
|
|
|
return impl->system_resource_limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
KResourceLimit* KernelCore::GetSystemResourceLimit() {
|
2018-11-19 12:54:06 -05:00
|
|
|
return impl->system_resource_limit;
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2021-04-03 22:11:46 -04:00
|
|
|
KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
|
2021-04-24 05:40:31 -04:00
|
|
|
return impl->global_handle_table->GetObject<KThread>(handle);
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2021-04-24 01:04:28 -04:00
|
|
|
void KernelCore::AppendNewProcess(KProcess* process) {
|
2021-04-04 01:22:36 -04:00
|
|
|
impl->process_list.push_back(process);
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
|
|
|
|
2021-04-24 01:04:28 -04:00
|
|
|
void KernelCore::MakeCurrentProcess(KProcess* process) {
|
2019-11-26 18:34:30 -05:00
|
|
|
impl->MakeCurrentProcess(process);
|
2018-09-06 20:34:51 -04:00
|
|
|
}
|
|
|
|
|
2021-04-24 01:04:28 -04:00
|
|
|
KProcess* KernelCore::CurrentProcess() {
|
2018-09-06 20:34:51 -04:00
|
|
|
return impl->current_process;
|
|
|
|
}
|
|
|
|
|
2021-04-24 01:04:28 -04:00
|
|
|
const KProcess* KernelCore::CurrentProcess() const {
|
2018-09-06 20:34:51 -04:00
|
|
|
return impl->current_process;
|
|
|
|
}
|
|
|
|
|
2021-04-24 01:04:28 -04:00
|
|
|
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
2019-03-20 15:03:52 -04:00
|
|
|
return impl->process_list;
|
|
|
|
}
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
|
|
|
|
return *impl->global_scheduler_context;
|
2019-03-29 17:02:57 -04:00
|
|
|
}
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
|
|
|
|
return *impl->global_scheduler_context;
|
2019-03-29 17:02:57 -04:00
|
|
|
}
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
|
2020-03-01 11:14:17 -05:00
|
|
|
return *impl->schedulers[id];
|
2020-02-13 21:04:10 -05:00
|
|
|
}
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
|
2020-03-01 11:14:17 -05:00
|
|
|
return *impl->schedulers[id];
|
2020-02-13 21:04:10 -05:00
|
|
|
}
|
|
|
|
|
2020-01-25 17:55:32 -05:00
|
|
|
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
|
|
|
|
return impl->cores[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
|
|
|
return impl->cores[id];
|
|
|
|
}
|
|
|
|
|
2021-08-07 01:45:18 -04:00
|
|
|
size_t KernelCore::CurrentPhysicalCoreIndex() const {
|
|
|
|
const u32 core_id = impl->GetCurrentHostThreadID();
|
|
|
|
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
|
|
|
return Core::Hardware::NUM_CPU_CORES - 1;
|
|
|
|
}
|
|
|
|
return core_id;
|
|
|
|
}
|
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
2021-08-25 23:59:28 -04:00
|
|
|
u32 core_id = impl->GetCurrentHostThreadID();
|
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
return impl->cores[core_id];
|
2020-02-24 21:04:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
2021-08-25 23:59:28 -04:00
|
|
|
u32 core_id = impl->GetCurrentHostThreadID();
|
|
|
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
|
|
return impl->cores[core_id];
|
2020-02-24 21:04:12 -05:00
|
|
|
}
|
|
|
|
|
2020-12-02 21:08:35 -05:00
|
|
|
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
2020-03-01 11:14:17 -05:00
|
|
|
u32 core_id = impl->GetCurrentHostThreadID();
|
2020-12-02 21:08:35 -05:00
|
|
|
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
|
|
|
// This is expected when called from not a guest thread
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
return impl->schedulers[core_id].get();
|
2020-03-01 11:14:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
|
|
|
|
return impl->interrupts;
|
|
|
|
}
|
|
|
|
|
2020-04-01 17:28:49 -04:00
|
|
|
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
|
|
|
|
const {
|
2020-03-01 11:14:17 -05:00
|
|
|
return impl->interrupts;
|
2020-02-24 21:04:12 -05:00
|
|
|
}
|
|
|
|
|
2020-02-14 09:56:27 -05:00
|
|
|
Kernel::TimeManager& KernelCore::TimeManager() {
|
|
|
|
return impl->time_manager;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::TimeManager& KernelCore::TimeManager() const {
|
|
|
|
return impl->time_manager;
|
|
|
|
}
|
|
|
|
|
2020-01-25 17:55:32 -05:00
|
|
|
Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() {
|
|
|
|
return *impl->exclusive_monitor;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
|
|
|
|
return *impl->exclusive_monitor;
|
|
|
|
}
|
|
|
|
|
2021-04-03 22:11:46 -04:00
|
|
|
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
|
|
|
|
return impl->object_list_container;
|
|
|
|
}
|
|
|
|
|
|
|
|
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
|
|
|
|
return impl->object_list_container;
|
|
|
|
}
|
|
|
|
|
2020-01-25 17:55:32 -05:00
|
|
|
void KernelCore::InvalidateAllInstructionCaches() {
|
2020-11-14 02:20:32 -05:00
|
|
|
for (auto& physical_core : impl->cores) {
|
|
|
|
physical_core.ArmInterface().ClearInstructionCache();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
|
|
|
for (auto& physical_core : impl->cores) {
|
|
|
|
if (!physical_core.IsInitialized()) {
|
|
|
|
continue;
|
2020-03-31 15:12:41 -04:00
|
|
|
}
|
2020-11-14 02:20:32 -05:00
|
|
|
physical_core.ArmInterface().InvalidateCacheRange(addr, size);
|
2020-03-31 15:12:41 -04:00
|
|
|
}
|
2020-01-25 17:55:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::PrepareReschedule(std::size_t id) {
|
2020-03-01 11:14:17 -05:00
|
|
|
// TODO: Reimplement, this
|
2020-01-25 17:55:32 -05:00
|
|
|
}
|
|
|
|
|
2021-05-10 18:57:59 -04:00
|
|
|
void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory) {
|
|
|
|
impl->service_interface_factory.emplace(std::move(name), factory);
|
2018-09-02 11:58:58 -04:00
|
|
|
}
|
|
|
|
|
2021-05-10 18:57:59 -04:00
|
|
|
KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
|
2021-07-02 18:19:04 -04:00
|
|
|
return impl->CreateNamedServicePort(std::move(name));
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::RegisterServerSession(KServerSession* server_session) {
|
|
|
|
std::lock_guard lk(impl->server_sessions_lock);
|
|
|
|
impl->server_sessions.insert(server_session);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::UnregisterServerSession(KServerSession* server_session) {
|
|
|
|
std::lock_guard lk(impl->server_sessions_lock);
|
|
|
|
impl->server_sessions.erase(server_session);
|
2018-09-02 11:58:58 -04:00
|
|
|
}
|
|
|
|
|
2021-06-28 17:38:14 -04:00
|
|
|
void KernelCore::RegisterKernelObject(KAutoObject* object) {
|
2021-07-02 18:19:04 -04:00
|
|
|
std::lock_guard lk(impl->registered_objects_lock);
|
2021-06-28 17:38:14 -04:00
|
|
|
impl->registered_objects.insert(object);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
|
2021-07-02 18:19:04 -04:00
|
|
|
std::lock_guard lk(impl->registered_objects_lock);
|
2021-06-28 17:38:14 -04:00
|
|
|
impl->registered_objects.erase(object);
|
|
|
|
}
|
|
|
|
|
2021-10-25 06:55:20 -04:00
|
|
|
void KernelCore::RegisterInUseObject(KAutoObject* object) {
|
|
|
|
std::lock_guard lk(impl->registered_in_use_objects_lock);
|
|
|
|
impl->registered_in_use_objects.insert(object);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::UnregisterInUseObject(KAutoObject* object) {
|
|
|
|
std::lock_guard lk(impl->registered_in_use_objects_lock);
|
|
|
|
impl->registered_in_use_objects.erase(object);
|
|
|
|
}
|
|
|
|
|
2018-09-02 11:58:58 -04:00
|
|
|
bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
|
|
|
|
return port != impl->named_ports.cend();
|
|
|
|
}
|
|
|
|
|
2018-08-28 12:30:33 -04:00
|
|
|
u32 KernelCore::CreateNewObjectID() {
|
|
|
|
return impl->next_object_id++;
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:37:01 -05:00
|
|
|
u64 KernelCore::CreateNewThreadID() {
|
2018-08-28 12:30:33 -04:00
|
|
|
return impl->next_thread_id++;
|
2014-05-20 18:13:25 -04:00
|
|
|
}
|
|
|
|
|
2019-06-10 00:28:33 -04:00
|
|
|
u64 KernelCore::CreateNewKernelProcessID() {
|
|
|
|
return impl->next_kernel_process_id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 KernelCore::CreateNewUserProcessID() {
|
|
|
|
return impl->next_user_process_id++;
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
2015-08-05 20:26:52 -04:00
|
|
|
|
2021-04-24 05:40:31 -04:00
|
|
|
KHandleTable& KernelCore::GlobalHandleTable() {
|
|
|
|
return *impl->global_handle_table;
|
2020-02-14 08:30:53 -05:00
|
|
|
}
|
|
|
|
|
2021-04-24 05:40:31 -04:00
|
|
|
const KHandleTable& KernelCore::GlobalHandleTable() const {
|
|
|
|
return *impl->global_handle_table;
|
2020-02-14 08:30:53 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::RegisterCoreThread(std::size_t core_id) {
|
|
|
|
impl->RegisterCoreThread(core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::RegisterHostThread() {
|
|
|
|
impl->RegisterHostThread();
|
|
|
|
}
|
|
|
|
|
2020-02-22 09:27:40 -05:00
|
|
|
u32 KernelCore::GetCurrentHostThreadID() const {
|
|
|
|
return impl->GetCurrentHostThreadID();
|
2018-08-28 12:30:33 -04:00
|
|
|
}
|
2015-08-05 20:26:52 -04:00
|
|
|
|
2021-01-21 16:00:16 -05:00
|
|
|
KThread* KernelCore::GetCurrentEmuThread() const {
|
|
|
|
return impl->GetCurrentEmuThread();
|
2014-05-13 21:57:12 -04:00
|
|
|
}
|
2014-05-22 19:06:12 -04:00
|
|
|
|
2021-02-12 20:38:40 -05:00
|
|
|
KMemoryManager& KernelCore::MemoryManager() {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->memory_manager;
|
|
|
|
}
|
|
|
|
|
2021-02-12 20:38:40 -05:00
|
|
|
const KMemoryManager& KernelCore::MemoryManager() const {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->memory_manager;
|
|
|
|
}
|
|
|
|
|
2021-02-12 20:02:51 -05:00
|
|
|
KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->user_slab_heap_pages;
|
|
|
|
}
|
|
|
|
|
2021-02-12 20:02:51 -05:00
|
|
|
const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->user_slab_heap_pages;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->hid_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
const Kernel::KSharedMemory& KernelCore::GetHidSharedMem() const {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->hid_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
Kernel::KSharedMemory& KernelCore::GetFontSharedMem() {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->font_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
const Kernel::KSharedMemory& KernelCore::GetFontSharedMem() const {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->font_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
Kernel::KSharedMemory& KernelCore::GetIrsSharedMem() {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->irs_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
const Kernel::KSharedMemory& KernelCore::GetIrsSharedMem() const {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->irs_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->time_shared_mem;
|
|
|
|
}
|
|
|
|
|
2021-02-06 02:14:31 -05:00
|
|
|
const Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() const {
|
2020-04-08 21:06:37 -04:00
|
|
|
return *impl->time_shared_mem;
|
|
|
|
}
|
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
void KernelCore::Suspend(bool in_suspention) {
|
|
|
|
const bool should_suspend = exception_exited || in_suspention;
|
|
|
|
{
|
2020-12-04 01:26:42 -05:00
|
|
|
KScopedSchedulerLock lock(*this);
|
2020-12-28 16:36:24 -05:00
|
|
|
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
|
2021-05-02 02:24:51 -04:00
|
|
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
2021-04-10 01:42:23 -04:00
|
|
|
impl->suspend_threads[core_id]->SetState(state);
|
|
|
|
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
2021-01-10 17:29:02 -05:00
|
|
|
ThreadWaitReasonForDebugging::Suspended);
|
2020-02-24 21:04:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-10 13:13:39 -04:00
|
|
|
bool KernelCore::IsMulticore() const {
|
|
|
|
return impl->is_multicore;
|
|
|
|
}
|
|
|
|
|
2020-02-24 21:04:12 -05:00
|
|
|
void KernelCore::ExceptionalExit() {
|
|
|
|
exception_exited = true;
|
|
|
|
Suspend(true);
|
|
|
|
}
|
|
|
|
|
2020-03-12 16:48:43 -04:00
|
|
|
void KernelCore::EnterSVCProfile() {
|
2021-08-07 01:58:46 -04:00
|
|
|
impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
2020-03-12 16:48:43 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::ExitSVCProfile() {
|
2021-08-07 01:58:46 -04:00
|
|
|
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
|
2020-03-12 16:48:43 -04:00
|
|
|
}
|
|
|
|
|
2020-12-15 03:41:48 -05:00
|
|
|
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
|
|
|
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
|
2021-06-08 00:10:51 -04:00
|
|
|
impl->service_threads.emplace(service_thread);
|
2020-12-15 03:41:48 -05:00
|
|
|
return service_thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
|
2021-06-08 00:10:51 -04:00
|
|
|
if (auto strong_ptr = service_thread.lock()) {
|
|
|
|
impl->service_threads.erase(strong_ptr);
|
|
|
|
}
|
2020-12-15 03:41:48 -05:00
|
|
|
}
|
|
|
|
|
2021-05-05 00:35:42 -04:00
|
|
|
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
|
|
|
|
return impl->slab_resource_counts;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() const {
|
|
|
|
return impl->slab_resource_counts;
|
|
|
|
}
|
|
|
|
|
2020-12-31 05:13:02 -05:00
|
|
|
bool KernelCore::IsPhantomModeForSingleCore() const {
|
|
|
|
return impl->IsPhantomModeForSingleCore();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
|
|
|
|
impl->SetIsPhantomModeForSingleCore(value);
|
|
|
|
}
|
|
|
|
|
2021-04-03 22:11:46 -04:00
|
|
|
Core::System& KernelCore::System() {
|
|
|
|
return impl->system;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Core::System& KernelCore::System() const {
|
|
|
|
return impl->system;
|
|
|
|
}
|
|
|
|
|
2018-01-01 13:25:37 -05:00
|
|
|
} // namespace Kernel
|