summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/kernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/kernel.cpp593
1 files changed, 364 insertions, 229 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 1fb25f221..f33600ca5 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -29,18 +29,20 @@
#include "core/hle/kernel/k_hardware_timer.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_object_name.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/service_thread.h"
#include "core/hle/result.h"
+#include "core/hle/service/server_manager.h"
#include "core/hle/service/sm/sm.h"
#include "core/memory.h"
@@ -54,9 +56,7 @@ struct KernelCore::Impl {
static constexpr size_t BlockInfoSlabHeapSize = 4000;
static constexpr size_t ReservedDynamicPageCount = 64;
- explicit Impl(Core::System& system_, KernelCore& kernel_)
- : service_threads_manager{1, "ServiceThreadsManager"},
- service_thread_barrier{2}, system{system_} {}
+ explicit Impl(Core::System& system_, KernelCore& kernel_) : system{system_} {}
void SetMulticore(bool is_multi) {
is_multicore = is_multi;
@@ -84,6 +84,7 @@ struct KernelCore::Impl {
InitializeShutdownThreads();
InitializePhysicalCores();
InitializePreemption(kernel);
+ InitializeGlobalData(kernel);
// Initialize the Dynamic Slab Heaps.
{
@@ -94,21 +95,19 @@ struct KernelCore::Impl {
pt_heap_region.GetSize());
}
- InitializeHackSharedMemory();
+ InitializeHackSharedMemory(kernel);
RegisterHostThread(nullptr);
-
- default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread");
}
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id]->Initialize((*current_process).Is64BitProcess());
- system.Memory().SetCurrentPageTable(*current_process, core_id);
+ cores[core_id]->Initialize((*application_process).Is64BitProcess());
+ system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
}
}
- void CloseCurrentProcess() {
- KProcess* old_process = current_process.exchange(nullptr);
+ void CloseApplicationProcess() {
+ KProcess* old_process = application_process.exchange(nullptr);
if (old_process == nullptr) {
return;
}
@@ -138,11 +137,6 @@ struct KernelCore::Impl {
preemption_event = nullptr;
- for (auto& iter : named_ports) {
- iter.second->Close();
- }
- named_ports.clear();
-
exclusive_monitor.reset();
// Cleanup persistent kernel objects
@@ -182,7 +176,7 @@ struct KernelCore::Impl {
}
}
- CloseCurrentProcess();
+ CloseApplicationProcess();
// Track kernel objects that were not freed on shutdown
{
@@ -194,6 +188,8 @@ struct KernelCore::Impl {
}
}
+ object_name_global_data.reset();
+
// Ensure that the object list container is finalized and properly shutdown.
global_object_list_container->Finalize();
global_object_list_container.reset();
@@ -203,13 +199,14 @@ struct KernelCore::Impl {
}
void CloseServices() {
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ // Ensures all servers gracefully shutdown.
+ std::scoped_lock lk{server_lock};
+ server_managers.clear();
}
void InitializePhysicalCores() {
exclusive_monitor =
- Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
+ Core::MakeExclusiveMonitor(system.ApplicationMemory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
const s32 core{static_cast<s32>(i)};
@@ -217,13 +214,14 @@ struct KernelCore::Impl {
cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]);
auto* main_thread{Kernel::KThread::Create(system.Kernel())};
- main_thread->SetName(fmt::format("MainThread:{}", core));
main_thread->SetCurrentCore(core);
ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
+ KThread::Register(system.Kernel(), main_thread);
auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
idle_thread->SetCurrentCore(core);
ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
+ KThread::Register(system.Kernel(), idle_thread);
schedulers[i]->Initialize(main_thread, idle_thread, core);
}
@@ -234,6 +232,7 @@ struct KernelCore::Impl {
const Core::Timing::CoreTiming& core_timing) {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
+ KResourceLimit::Register(kernel, system_resource_limit);
const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
const auto total_size{sizes.first};
@@ -275,9 +274,9 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
- void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) {
+ void InitializeResourceManagers(KernelCore& kernel, KVirtualAddress address, size_t size) {
// Ensure that the buffer is suitable for our use.
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Ensure that we have space for our reference counts.
@@ -359,55 +358,52 @@ struct KernelCore::Impl {
ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {},
core_id)
.IsSuccess());
- shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
+ KThread::Register(system.Kernel(), shutdown_threads[core_id]);
}
}
- void MakeCurrentProcess(KProcess* process) {
- current_process = process;
+ void InitializeGlobalData(KernelCore& kernel) {
+ object_name_global_data = std::make_unique<KObjectNameGlobalData>(kernel);
}
- static inline thread_local u32 host_thread_id = UINT32_MAX;
+ void MakeApplicationProcess(KProcess* process) {
+ application_process = process;
+ }
- /// Gets the host thread ID for the caller, allocating a new one if this is the first time
- u32 GetHostThreadId(std::size_t core_id) {
- if (host_thread_id == UINT32_MAX) {
- // The first four slots are reserved for CPU core threads
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- host_thread_id = static_cast<u32>(core_id);
- }
+ static inline thread_local u8 host_thread_id = UINT8_MAX;
+
+ /// Sets the host thread ID for the caller.
+ u32 SetHostThreadId(std::size_t core_id) {
+ // This should only be called during core init.
+ ASSERT(host_thread_id == UINT8_MAX);
+
+ // The first four slots are reserved for CPU core threads
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ host_thread_id = static_cast<u8>(core_id);
return host_thread_id;
}
- /// Gets the host thread ID for the caller, allocating a new one if this is the first time
- u32 GetHostThreadId() {
- if (host_thread_id == UINT32_MAX) {
- host_thread_id = next_host_thread_id++;
- }
+ /// Gets the host thread ID for the caller
+ u32 GetHostThreadId() const {
return host_thread_id;
}
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread(KThread* existing_thread) {
- auto initialize = [this](KThread* thread) {
+ const auto initialize{[](KThread* thread) {
ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess());
- thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return thread;
- };
+ }};
thread_local KThread raw_thread{system.Kernel()};
- thread_local KThread* thread = nullptr;
- if (thread == nullptr) {
- thread = (existing_thread == nullptr) ? initialize(&raw_thread) : existing_thread;
- }
-
+ thread_local KThread* thread = existing_thread ? existing_thread : initialize(&raw_thread);
return thread;
}
/// Registers a CPU core thread by allocating a host thread ID for it
void RegisterCoreThread(std::size_t core_id) {
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- const auto this_id = GetHostThreadId(core_id);
+ const auto this_id = SetHostThreadId(core_id);
if (!is_multicore) {
single_core_thread_id = this_id;
}
@@ -415,7 +411,6 @@ struct KernelCore::Impl {
/// Registers a new host thread by allocating a host thread ID for it
void RegisterHostThread(KThread* existing_thread) {
- [[maybe_unused]] const auto this_id = GetHostThreadId();
[[maybe_unused]] const auto dummy_thread = GetHostDummyThread(existing_thread);
}
@@ -445,11 +440,9 @@ struct KernelCore::Impl {
static inline thread_local KThread* current_thread{nullptr};
KThread* GetCurrentEmuThread() {
- const auto thread_id = GetCurrentHostThreadID();
- if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
- return GetHostDummyThread(nullptr);
+ if (!current_thread) {
+ current_thread = GetHostDummyThread(nullptr);
}
-
return current_thread;
}
@@ -473,29 +466,30 @@ struct KernelCore::Impl {
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
// Save start and end for ease of use.
- const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
- const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
+ constexpr KVirtualAddress code_start_virt_addr = KernelVirtualAddressCodeBase;
+ constexpr KVirtualAddress code_end_virt_addr = KernelVirtualAddressCodeEnd;
// Setup the containing kernel region.
constexpr size_t KernelRegionSize = 1_GiB;
constexpr size_t KernelRegionAlign = 1_GiB;
- constexpr VAddr kernel_region_start =
- Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
+ constexpr KVirtualAddress kernel_region_start =
+ Common::AlignDown(GetInteger(code_start_virt_addr), KernelRegionAlign);
size_t kernel_region_size = KernelRegionSize;
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
- kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
+ kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start);
}
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
+ GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region.
constexpr size_t CodeRegionAlign = PageSize;
- constexpr VAddr code_region_start =
- Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
- constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
+ constexpr KVirtualAddress code_region_start =
+ Common::AlignDown(GetInteger(code_start_virt_addr), CodeRegionAlign);
+ constexpr KVirtualAddress code_region_end =
+ Common::AlignUp(GetInteger(code_end_virt_addr), CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- code_region_start, code_region_size, KMemoryRegionType_KernelCode));
+ GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions.
Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
@@ -531,11 +525,11 @@ struct KernelCore::Impl {
ASSERT(misc_region_size > 0);
// Setup the misc region.
- const VAddr misc_region_start =
+ const KVirtualAddress misc_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
+ GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
// Determine if we'll use extra thread resources.
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
@@ -543,11 +537,11 @@ struct KernelCore::Impl {
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
- const VAddr stack_region_start =
+ const KVirtualAddress stack_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
+ GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
const size_t resource_region_size =
@@ -559,29 +553,29 @@ struct KernelCore::Impl {
ASSERT(slab_region_size <= resource_region_size);
// Setup the slab region.
- const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
- const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
- const PAddr slab_start_phys_addr = code_end_phys_addr;
- const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
+ const KPhysicalAddress code_start_phys_addr = KernelPhysicalAddressCodeBase;
+ const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
+ const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
+ const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
const size_t slab_region_needed_size =
- Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
- Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
- const VAddr slab_region_start =
+ Common::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) -
+ Common::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
+ const KVirtualAddress slab_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
- (code_end_phys_addr % SlabRegionAlign);
+ (GetInteger(code_end_phys_addr) % SlabRegionAlign);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
+ GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment;
- const VAddr temp_region_start =
+ const KVirtualAddress temp_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
- KMemoryRegionType_KernelTemp));
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
+ GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes.
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
@@ -607,35 +601,37 @@ struct KernelCore::Impl {
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
// Create a virtual pair region and insert it into the tree.
- const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
+ const KPhysicalAddress map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
const size_t map_size =
- Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
- const VAddr map_virt_addr =
+ Common::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
+ const KVirtualAddress map_virt_addr =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
- region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
+ GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
+ region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() -
+ GetInteger(map_phys_addr));
}
Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
+ GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
+ GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB.
- const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
+ const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
+ GetInteger(slab_end_phys_addr), page_table_heap_size,
+ KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them.
@@ -657,20 +653,21 @@ struct KernelCore::Impl {
// Setup the linear mapping region.
constexpr size_t LinearRegionAlign = 1_GiB;
- const PAddr aligned_linear_phys_start =
+ const KPhysicalAddress aligned_linear_phys_start =
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
const size_t linear_region_size =
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
- aligned_linear_phys_start;
- const VAddr linear_region_start =
+ GetInteger(aligned_linear_phys_start);
+ const KVirtualAddress linear_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
- const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
+ const u64 linear_region_phys_to_virt_diff =
+ GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
// Map and create regions for all the linearly-mapped data.
{
- PAddr cur_phys_addr = 0;
+ KPhysicalAddress cur_phys_addr = 0;
u64 cur_size = 0;
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
@@ -689,15 +686,16 @@ struct KernelCore::Impl {
cur_size = region.GetSize();
}
- const VAddr region_virt_addr =
+ const KVirtualAddress region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- region_virt_addr, region.GetSize(),
+ GetInteger(region_virt_addr), region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType())));
- region.SetPairAddress(region_virt_addr);
+ region.SetPairAddress(GetInteger(region_virt_addr));
KMemoryRegion* virt_region =
- memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
+ memory_layout->GetVirtualMemoryRegionTree().FindModifiable(
+ GetInteger(region_virt_addr));
ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress());
}
@@ -705,10 +703,11 @@ struct KernelCore::Impl {
// Insert regions for the initial page table region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
+ GetInteger(resource_end_phys_addr), KernelPageTableHeapSize,
+ KMemoryRegionType_DramKernelInitPt));
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
- KMemoryRegionType_VirtualDramKernelInitPt));
+ GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff,
+ KernelPageTableHeapSize, KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them.
@@ -734,7 +733,7 @@ struct KernelCore::Impl {
memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
}
- void InitializeHackSharedMemory() {
+ void InitializeHackSharedMemory(KernelCore& kernel) {
// Setup memory regions for emulated processes
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
constexpr std::size_t hid_size{0x40000};
@@ -750,65 +749,24 @@ struct KernelCore::Impl {
hidbus_shared_mem = KSharedMemory::Create(system.Kernel());
hid_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, hid_size, "HID:SharedMemory");
- font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, font_size, "Font:SharedMemory");
- irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, irs_size, "IRS:SharedMemory");
- time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, time_size, "Time:SharedMemory");
- hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
- Svc::MemoryPermission::Read, hidbus_size,
- "HidBus:SharedMemory");
- }
-
- KClientPort* CreateNamedServicePort(std::string name) {
- auto search = service_interface_factory.find(name);
- if (search == service_interface_factory.end()) {
- UNIMPLEMENTED();
- return {};
- }
-
- return &search->second(system.ServiceManager(), system);
- }
-
- void RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
- auto search = service_interface_handlers.find(name);
- if (search == service_interface_handlers.end()) {
- return;
- }
-
- search->second(system.ServiceManager(), server_port);
- }
+ Svc::MemoryPermission::Read, hid_size);
+ KSharedMemory::Register(kernel, hid_shared_mem);
- Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) {
- auto* ptr = new ServiceThread(kernel, name);
-
- service_threads_manager.QueueWork(
- [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); });
-
- return *ptr;
- }
-
- void ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
- auto* ptr = &service_thread;
+ font_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, font_size);
+ KSharedMemory::Register(kernel, font_shared_mem);
- if (ptr == default_service_thread) {
- // Nothing to do here, the service is using default_service_thread, which will be
- // released on shutdown.
- return;
- }
+ irs_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, irs_size);
+ KSharedMemory::Register(kernel, irs_shared_mem);
- service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); });
- }
+ time_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, time_size);
+ KSharedMemory::Register(kernel, time_shared_mem);
- void ClearServiceThreads() {
- service_threads_manager.QueueWork([this] {
- service_threads.clear();
- default_service_thread = nullptr;
- service_thread_barrier.Sync();
- });
- service_thread_barrier.Sync();
+ hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr, Svc::MemoryPermission::None,
+ Svc::MemoryPermission::Read, hidbus_size);
+ KSharedMemory::Register(kernel, hidbus_shared_mem);
}
std::mutex registered_objects_lock;
@@ -821,7 +779,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<KProcess*> process_list;
- std::atomic<KProcess*> current_process{};
+ std::atomic<KProcess*> application_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
std::unique_ptr<Kernel::KHardwareTimer> hardware_timer;
@@ -838,14 +796,14 @@ struct KernelCore::Impl {
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
- /// Map of named ports managed by the kernel, which can be retrieved using
- /// the ConnectToPort SVC.
- std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
- std::unordered_map<std::string, ServiceInterfaceHandlerFn> service_interface_handlers;
- NamedPortTable named_ports;
+ std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
+
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
+ std::mutex server_lock;
+ std::vector<std::unique_ptr<Service::ServerManager>> server_managers;
+
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
@@ -880,12 +838,6 @@ struct KernelCore::Impl {
// Memory layout
std::unique_ptr<KMemoryLayout> memory_layout;
- // Threads used for services
- std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads;
- ServiceThread* default_service_thread{};
- Common::ThreadWorker service_threads_manager;
- Common::Barrier service_thread_barrier;
-
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@@ -941,20 +893,20 @@ void KernelCore::AppendNewProcess(KProcess* process) {
impl->process_list.push_back(process);
}
-void KernelCore::MakeCurrentProcess(KProcess* process) {
- impl->MakeCurrentProcess(process);
+void KernelCore::MakeApplicationProcess(KProcess* process) {
+ impl->MakeApplicationProcess(process);
}
-KProcess* KernelCore::CurrentProcess() {
- return impl->current_process;
+KProcess* KernelCore::ApplicationProcess() {
+ return impl->application_process;
}
-const KProcess* KernelCore::CurrentProcess() const {
- return impl->current_process;
+const KProcess* KernelCore::ApplicationProcess() const {
+ return impl->application_process;
}
-void KernelCore::CloseCurrentProcess() {
- impl->CloseCurrentProcess();
+void KernelCore::CloseApplicationProcess() {
+ impl->CloseApplicationProcess();
}
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
@@ -1002,7 +954,7 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
}
Kernel::KScheduler* KernelCore::CurrentScheduler() {
- u32 core_id = impl->GetCurrentHostThreadID();
+ const u32 core_id = impl->GetCurrentHostThreadID();
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
// This is expected when called from not a guest thread
return {};
@@ -1036,12 +988,12 @@ void KernelCore::InvalidateAllInstructionCaches() {
}
}
-void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
+void KernelCore::InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
if (!physical_core->IsInitialized()) {
continue;
}
- physical_core->ArmInterface().InvalidateCacheRange(addr, size);
+ physical_core->ArmInterface().InvalidateCacheRange(GetInteger(addr), size);
}
}
@@ -1049,23 +1001,6 @@ void KernelCore::PrepareReschedule(std::size_t id) {
// TODO: Reimplement, this
}
-void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory) {
- impl->service_interface_factory.emplace(std::move(name), factory);
-}
-
-void KernelCore::RegisterInterfaceForNamedService(std::string name,
- ServiceInterfaceHandlerFn&& handler) {
- impl->service_interface_handlers.emplace(std::move(name), handler);
-}
-
-KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
- return impl->CreateNamedServicePort(std::move(name));
-}
-
-void KernelCore::RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
- impl->RegisterNamedServiceHandler(std::move(name), server_port);
-}
-
void KernelCore::RegisterKernelObject(KAutoObject* object) {
std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
@@ -1086,8 +1021,19 @@ void KernelCore::UnregisterInUseObject(KAutoObject* object) {
impl->registered_in_use_objects.erase(object);
}
-bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
- return port != impl->named_ports.cend();
+void KernelCore::RunServer(std::unique_ptr<Service::ServerManager>&& server_manager) {
+ auto* manager = server_manager.get();
+
+ {
+ std::scoped_lock lk{impl->server_lock};
+ if (impl->is_shutting_down) {
+ return;
+ }
+
+ impl->server_managers.emplace_back(std::move(server_manager));
+ }
+
+ manager->LoopProcess();
}
u32 KernelCore::CreateNewObjectID() {
@@ -1126,6 +1072,99 @@ void KernelCore::RegisterHostThread(KThread* existing_thread) {
}
}
+static std::jthread RunHostThreadFunc(KernelCore& kernel, KProcess* process,
+ std::string&& thread_name, std::function<void()>&& func) {
+ // Reserve a new thread from the process resource limit.
+ KScopedResourceReservation thread_reservation(process, LimitableResource::ThreadCountMax);
+ ASSERT(thread_reservation.Succeeded());
+
+ // Initialize the thread.
+ KThread* thread = KThread::Create(kernel);
+ ASSERT(R_SUCCEEDED(KThread::InitializeDummyThread(thread, process)));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Register the thread.
+ KThread::Register(kernel, thread);
+
+ return std::jthread(
+ [&kernel, thread, thread_name{std::move(thread_name)}, func{std::move(func)}] {
+ // Set the thread name.
+ Common::SetCurrentThreadName(thread_name.c_str());
+
+ // Set the thread as current.
+ kernel.RegisterHostThread(thread);
+
+ // Run the callback.
+ func();
+
+ // Close the thread.
+ // This will free the process if it is the last reference.
+ thread->Close();
+ });
+}
+
+std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
+ std::function<void()> func) {
+ // Make a new process.
+ KProcess* process = KProcess::Create(*this);
+ ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
+ GetSystemResourceLimit())));
+
+ // Ensure that we don't hold onto any extra references.
+ SCOPE_EXIT({ process->Close(); });
+
+ // Register the new process.
+ KProcess::Register(*this, process);
+
+ // Run the host thread.
+ return RunHostThreadFunc(*this, process, std::move(process_name), std::move(func));
+}
+
+std::jthread KernelCore::RunOnHostCoreThread(std::string&& thread_name,
+ std::function<void()> func) {
+ // Get the current process.
+ KProcess* process = GetCurrentProcessPointer(*this);
+
+ // Run the host thread.
+ return RunHostThreadFunc(*this, process, std::move(thread_name), std::move(func));
+}
+
+void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function<void()> func) {
+ constexpr s32 ServiceThreadPriority = 16;
+ constexpr s32 ServiceThreadCore = 3;
+
+ // Make a new process.
+ KProcess* process = KProcess::Create(*this);
+ ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
+ GetSystemResourceLimit())));
+
+ // Ensure that we don't hold onto any extra references.
+ SCOPE_EXIT({ process->Close(); });
+
+ // Register the new process.
+ KProcess::Register(*this, process);
+
+ // Reserve a new thread from the process resource limit.
+ KScopedResourceReservation thread_reservation(process, LimitableResource::ThreadCountMax);
+ ASSERT(thread_reservation.Succeeded());
+
+ // Initialize the thread.
+ KThread* thread = KThread::Create(*this);
+ ASSERT(R_SUCCEEDED(KThread::InitializeServiceThread(
+ System(), thread, std::move(func), ServiceThreadPriority, ServiceThreadCore, process)));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Register the new thread.
+ KThread::Register(*this, thread);
+
+ // Begin running the thread.
+ ASSERT(R_SUCCEEDED(thread->Run()));
+}
+
u32 KernelCore::GetCurrentHostThreadID() const {
return impl->GetCurrentHostThreadID();
}
@@ -1138,6 +1177,10 @@ void KernelCore::SetCurrentEmuThread(KThread* thread) {
impl->SetCurrentEmuThread(thread);
}
+KObjectNameGlobalData& KernelCore::ObjectNameGlobalData() {
+ return *impl->object_name_global_data;
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1146,6 +1189,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
+KSystemResource& KernelCore::GetAppSystemResource() {
+ return *impl->app_system_resource;
+}
+
+const KSystemResource& KernelCore::GetAppSystemResource() const {
+ return *impl->app_system_resource;
+}
+
KSystemResource& KernelCore::GetSystemSystemResource() {
return *impl->sys_system_resource;
}
@@ -1194,32 +1245,39 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
return *impl->hidbus_shared_mem;
}
-void KernelCore::Suspend(bool suspended) {
+void KernelCore::SuspendApplication(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
- std::vector<KScopedAutoObject<KThread>> process_threads;
- {
- KScopedSchedulerLock sl{*this};
+ // Get the application process.
+ KScopedAutoObject<KProcess> process = ApplicationProcess();
+ if (process.IsNull()) {
+ return;
+ }
- if (auto* process = CurrentProcess(); process != nullptr) {
- process->SetActivity(activity);
+ // Set the new activity.
+ process->SetActivity(activity);
- if (!should_suspend) {
- // Runnable now; no need to wait.
- return;
- }
+ // Wait for process execution to stop.
+ bool must_wait{should_suspend};
+
+ // KernelCore::SuspendApplication must be called from locked context,
+ // or we could race another call to SetActivity, interfering with waiting.
+ while (must_wait) {
+ KScopedSchedulerLock sl{*this};
+
+ // Assume that all threads have finished running.
+ must_wait = false;
- for (auto* thread : process->GetThreadList()) {
- process_threads.emplace_back(thread);
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
+ process.GetPointerUnsafe()) {
+ // A thread has not finished running yet.
+ // Continue waiting.
+ must_wait = true;
}
}
}
-
- // Wait for execution to stop.
- for (auto& thread : process_threads) {
- thread->WaitUntilSuspended();
- }
}
void KernelCore::ShutdownCores() {
@@ -1238,9 +1296,9 @@ bool KernelCore::IsShuttingDown() const {
return impl->IsShuttingDown();
}
-void KernelCore::ExceptionalExit() {
+void KernelCore::ExceptionalExitApplication() {
exception_exited = true;
- Suspend(true);
+ SuspendApplication(true);
}
void KernelCore::EnterSVCProfile() {
@@ -1251,18 +1309,6 @@ void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
}
-Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) {
- return impl->CreateServiceThread(*this, name);
-}
-
-Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const {
- return *impl->default_service_thread;
-}
-
-void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
- impl->ReleaseServiceThread(service_thread);
-}
-
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
return impl->slab_resource_counts;
}
@@ -1299,4 +1345,93 @@ const Core::System& KernelCore::System() const {
return impl->system;
}
+struct KernelCore::SlabHeapContainer {
+ KSlabHeap<KClientSession> client_session;
+ KSlabHeap<KEvent> event;
+ KSlabHeap<KPort> port;
+ KSlabHeap<KProcess> process;
+ KSlabHeap<KResourceLimit> resource_limit;
+ KSlabHeap<KSession> session;
+ KSlabHeap<KSharedMemory> shared_memory;
+ KSlabHeap<KSharedMemoryInfo> shared_memory_info;
+ KSlabHeap<KThread> thread;
+ KSlabHeap<KTransferMemory> transfer_memory;
+ KSlabHeap<KCodeMemory> code_memory;
+ KSlabHeap<KDeviceAddressSpace> device_address_space;
+ KSlabHeap<KPageBuffer> page_buffer;
+ KSlabHeap<KThreadLocalPage> thread_local_page;
+ KSlabHeap<KObjectName> object_name;
+ KSlabHeap<KSessionRequest> session_request;
+ KSlabHeap<KSecureSystemResource> secure_system_resource;
+ KSlabHeap<KThread::LockWithPriorityInheritanceInfo> lock_info;
+ KSlabHeap<KEventInfo> event_info;
+ KSlabHeap<KDebug> debug;
+};
+
+template <typename T>
+KSlabHeap<T>& KernelCore::SlabHeap() {
+ if constexpr (std::is_same_v<T, KClientSession>) {
+ return slab_heap_container->client_session;
+ } else if constexpr (std::is_same_v<T, KEvent>) {
+ return slab_heap_container->event;
+ } else if constexpr (std::is_same_v<T, KPort>) {
+ return slab_heap_container->port;
+ } else if constexpr (std::is_same_v<T, KProcess>) {
+ return slab_heap_container->process;
+ } else if constexpr (std::is_same_v<T, KResourceLimit>) {
+ return slab_heap_container->resource_limit;
+ } else if constexpr (std::is_same_v<T, KSession>) {
+ return slab_heap_container->session;
+ } else if constexpr (std::is_same_v<T, KSharedMemory>) {
+ return slab_heap_container->shared_memory;
+ } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) {
+ return slab_heap_container->shared_memory_info;
+ } else if constexpr (std::is_same_v<T, KThread>) {
+ return slab_heap_container->thread;
+ } else if constexpr (std::is_same_v<T, KTransferMemory>) {
+ return slab_heap_container->transfer_memory;
+ } else if constexpr (std::is_same_v<T, KCodeMemory>) {
+ return slab_heap_container->code_memory;
+ } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) {
+ return slab_heap_container->device_address_space;
+ } else if constexpr (std::is_same_v<T, KPageBuffer>) {
+ return slab_heap_container->page_buffer;
+ } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
+ return slab_heap_container->thread_local_page;
+ } else if constexpr (std::is_same_v<T, KObjectName>) {
+ return slab_heap_container->object_name;
+ } else if constexpr (std::is_same_v<T, KSessionRequest>) {
+ return slab_heap_container->session_request;
+ } else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
+ return slab_heap_container->secure_system_resource;
+ } else if constexpr (std::is_same_v<T, KThread::LockWithPriorityInheritanceInfo>) {
+ return slab_heap_container->lock_info;
+ } else if constexpr (std::is_same_v<T, KEventInfo>) {
+ return slab_heap_container->event_info;
+ } else if constexpr (std::is_same_v<T, KDebug>) {
+ return slab_heap_container->debug;
+ }
+}
+
+template KSlabHeap<KClientSession>& KernelCore::SlabHeap();
+template KSlabHeap<KEvent>& KernelCore::SlabHeap();
+template KSlabHeap<KPort>& KernelCore::SlabHeap();
+template KSlabHeap<KProcess>& KernelCore::SlabHeap();
+template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap();
+template KSlabHeap<KSession>& KernelCore::SlabHeap();
+template KSlabHeap<KSharedMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KSharedMemoryInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KThread>& KernelCore::SlabHeap();
+template KSlabHeap<KTransferMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KCodeMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KDeviceAddressSpace>& KernelCore::SlabHeap();
+template KSlabHeap<KPageBuffer>& KernelCore::SlabHeap();
+template KSlabHeap<KThreadLocalPage>& KernelCore::SlabHeap();
+template KSlabHeap<KObjectName>& KernelCore::SlabHeap();
+template KSlabHeap<KSessionRequest>& KernelCore::SlabHeap();
+template KSlabHeap<KSecureSystemResource>& KernelCore::SlabHeap();
+template KSlabHeap<KThread::LockWithPriorityInheritanceInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KEventInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KDebug>& KernelCore::SlabHeap();
+
} // namespace Kernel