summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp30
-rw-r--r--src/core/hle/kernel/hle_ipc.h8
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp2
-rw-r--r--src/core/hle/kernel/k_auto_object.h20
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp358
-rw-r--r--src/core/hle/kernel/k_capabilities.h295
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp12
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp2
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp150
-rw-r--r--src/core/hle/kernel/k_device_address_space.h60
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp2
-rw-r--r--src/core/hle/kernel/k_memory_layout.h6
-rw-r--r--src/core/hle/kernel/k_page_table.cpp544
-rw-r--r--src/core/hle/kernel/k_page_table.h86
-rw-r--r--src/core/hle/kernel/k_priority_queue.h54
-rw-r--r--src/core/hle/kernel/k_process.cpp40
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h11
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp6
-rw-r--r--src/core/hle/kernel/k_thread.cpp21
-rw-r--r--src/core/hle/kernel/k_thread.h32
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h6
-rw-r--r--src/core/hle/kernel/kernel.cpp39
-rw-r--r--src/core/hle/kernel/kernel.h4
-rw-r--r--src/core/hle/kernel/svc.cpp2686
-rw-r--r--src/core/hle/kernel/svc.h156
-rw-r--r--src/core/hle/kernel/svc/svc_activity.cpp44
-rw-r--r--src/core/hle/kernel/svc/svc_address_arbiter.cpp113
-rw-r--r--src/core/hle/kernel/svc/svc_address_translation.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_cache.cpp31
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp154
-rw-r--r--src/core/hle/kernel/svc/svc_condition_variable.cpp69
-rw-r--r--src/core/hle/kernel/svc/svc_debug.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_debug_string.cpp25
-rw-r--r--src/core/hle/kernel/svc/svc_device_address_space.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_event.cpp111
-rw-r--r--src/core/hle/kernel/svc/svc_exception.cpp121
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp282
-rw-r--r--src/core/hle/kernel/svc/svc_interrupt_event.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_io_pool.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp89
-rw-r--r--src/core/hle/kernel/svc/svc_kernel_debug.cpp19
-rw-r--r--src/core/hle/kernel/svc/svc_light_ipc.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp57
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp189
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp137
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp71
-rw-r--r--src/core/hle/kernel/svc/svc_power_management.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_process.cpp124
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp274
-rw-r--r--src/core/hle/kernel/svc/svc_processor.cpp21
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp55
-rw-r--r--src/core/hle/kernel/svc/svc_register.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_resource_limit.cpp95
-rw-r--r--src/core/hle/kernel/svc/svc_secure_monitor_call.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_session.cpp103
-rw-r--r--src/core/hle/kernel/svc/svc_shared_memory.cpp106
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp139
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp396
-rw-r--r--src/core/hle/kernel/svc/svc_thread_profiler.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_tick.cpp33
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp79
-rw-r--r--src/core/hle/kernel/svc_types.h19
-rw-r--r--src/core/hle/kernel/svc_version.h58
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/service/am/am.cpp2
-rw-r--r--src/core/hle/service/audio/audren_u.cpp2
-rw-r--r--src/core/hle/service/audio/hwopus.cpp2
-rw-r--r--src/core/hle/service/es/es.cpp2
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp11
-rw-r--r--src/core/hle/service/glue/arp.cpp3
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp8
-rw-r--r--src/core/hle/service/hid/controllers/npad.h3
-rw-r--r--src/core/hle/service/hid/hid.cpp4
-rw-r--r--src/core/hle/service/hid/hidbus/hidbus_base.h3
-rw-r--r--src/core/hle/service/hid/hidbus/ringcon.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/ringcon.h3
-rw-r--r--src/core/hle/service/hid/hidbus/starlink.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/starlink.h2
-rw-r--r--src/core/hle/service/hid/hidbus/stubbed.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/stubbed.h2
-rw-r--r--src/core/hle/service/jit/jit.cpp4
-rw-r--r--src/core/hle/service/ldn/ldn.cpp4
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdevice.h10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.h10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp26
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h28
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp21
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.h22
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp31
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h32
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp35
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h36
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.h10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp17
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.h10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp20
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h22
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp8
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.h12
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp4
-rw-r--r--src/core/hle/service/nvflinger/graphic_buffer_producer.cpp2
-rw-r--r--src/core/hle/service/nvflinger/graphic_buffer_producer.h4
-rw-r--r--src/core/hle/service/nvflinger/parcel.h87
-rw-r--r--src/core/hle/service/prepo/prepo.cpp8
-rw-r--r--src/core/hle/service/sockets/bsd.cpp15
-rw-r--r--src/core/hle/service/sockets/bsd.h23
-rw-r--r--src/core/hle/service/sockets/sfdnsres.cpp2
-rw-r--r--src/core/hle/service/ssl/ssl.cpp8
-rw-r--r--src/core/hle/service/vi/vi.cpp4
115 files changed, 5033 insertions, 3307 deletions
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 738b6d0f1..494151eef 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -11,6 +11,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/logging/log.h"
+#include "common/scratch_buffer.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_auto_object.h"
@@ -325,7 +326,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
return ResultSuccess;
}
-std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
+std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
if (is_buffer_a) {
@@ -345,6 +346,33 @@ std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
}
}
+std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
+ static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_a;
+ static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_x;
+
+ const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
+ BufferDescriptorA()[buffer_index].Size()};
+ if (is_buffer_a) {
+ ASSERT_OR_EXECUTE_MSG(
+ BufferDescriptorA().size() > buffer_index, { return {}; },
+ "BufferDescriptorA invalid buffer_index {}", buffer_index);
+ auto& read_buffer = read_buffer_a[buffer_index];
+ read_buffer.resize_destructive(BufferDescriptorA()[buffer_index].Size());
+ memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), read_buffer.data(),
+ read_buffer.size());
+ return read_buffer;
+ } else {
+ ASSERT_OR_EXECUTE_MSG(
+ BufferDescriptorX().size() > buffer_index, { return {}; },
+ "BufferDescriptorX invalid buffer_index {}", buffer_index);
+ auto& read_buffer = read_buffer_x[buffer_index];
+ read_buffer.resize_destructive(BufferDescriptorX()[buffer_index].Size());
+ memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), read_buffer.data(),
+ read_buffer.size());
+ return read_buffer;
+ }
+}
+
std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
std::size_t buffer_index) const {
if (size == 0) {
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index e252b5f4b..5bf4f171b 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -7,6 +7,7 @@
#include <functional>
#include <memory>
#include <optional>
+#include <span>
#include <string>
#include <type_traits>
#include <vector>
@@ -270,8 +271,11 @@ public:
return domain_message_header.has_value();
}
- /// Helper function to read a buffer using the appropriate buffer descriptor
- [[nodiscard]] std::vector<u8> ReadBuffer(std::size_t buffer_index = 0) const;
+ /// Helper function to get a span of a buffer using the appropriate buffer descriptor
+ [[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
+
+ /// Helper function to read a copy of a buffer using the appropriate buffer descriptor
+ [[nodiscard]] std::vector<u8> ReadBufferCopy(std::size_t buffer_index = 0) const;
/// Helper function to write a buffer using the appropriate buffer descriptor
std::size_t WriteBuffer(const void* buffer, std::size_t size,
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 7b363eb1e..571acf4b2 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -11,6 +11,7 @@
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_debug.h"
+#include "core/hle/kernel/k_device_address_space.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_event_info.h"
#include "core/hle/kernel/k_memory_layout.h"
@@ -43,6 +44,7 @@ namespace Kernel::Init {
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
+ HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ##__VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 2827763d5..e8118c2b8 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -24,9 +24,7 @@ private:
friend class ::Kernel::KClassTokenGenerator; \
static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \
static constexpr inline const char* const TypeName = #CLASS; \
- static constexpr inline ClassTokenType ClassToken() { \
- return ::Kernel::ClassToken<CLASS>; \
- } \
+ static constexpr inline ClassTokenType ClassToken() { return ::Kernel::ClassToken<CLASS>; } \
\
public: \
YUZU_NON_COPYABLE(CLASS); \
@@ -37,15 +35,9 @@ public:
constexpr ClassTokenType Token = ClassToken(); \
return TypeObj(TypeName, Token); \
} \
- static constexpr const char* GetStaticTypeName() { \
- return TypeName; \
- } \
- virtual TypeObj GetTypeObj() ATTRIBUTE { \
- return GetStaticTypeObj(); \
- } \
- virtual const char* GetTypeName() ATTRIBUTE { \
- return GetStaticTypeName(); \
- } \
+ static constexpr const char* GetStaticTypeName() { return TypeName; } \
+ virtual TypeObj GetTypeObj() ATTRIBUTE { return GetStaticTypeObj(); } \
+ virtual const char* GetTypeName() ATTRIBUTE { return GetStaticTypeName(); } \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
@@ -245,8 +237,8 @@ public:
}
template <typename U>
- requires(std::derived_from<T, U> ||
- std::derived_from<U, T>) constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
+ requires(std::derived_from<T, U> || std::derived_from<U, T>)
+ constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) {
if constexpr (std::derived_from<U, T>) {
// Upcast.
m_obj = rhs.m_obj;
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
new file mode 100644
index 000000000..64f1d7371
--- /dev/null
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -0,0 +1,358 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_capabilities.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_version.h"
+
+namespace Kernel {
+
+Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) {
+ // We're initializing an initial process.
+ m_svc_access_flags.reset();
+ m_irq_access_flags.reset();
+ m_debug_capabilities = 0;
+ m_handle_table_size = 0;
+ m_intended_kernel_version = 0;
+ m_program_type = 0;
+
+ // Initial processes may run on all cores.
+ constexpr u64 VirtMask = Core::Hardware::VirtualCoreMask;
+ constexpr u64 PhysMask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(VirtMask);
+
+ m_core_mask = VirtMask;
+ m_phys_core_mask = PhysMask;
+
+ // Initial processes may use any user priority they like.
+ m_priority_mask = ~0xFULL;
+
+ // Here, Nintendo sets the kernel version to the current kernel version.
+ // We will follow suit and set the version to the highest supported kernel version.
+ KernelVersion intended_kernel_version{};
+ intended_kernel_version.major_version.Assign(Svc::SupportedKernelMajorVersion);
+ intended_kernel_version.minor_version.Assign(Svc::SupportedKernelMinorVersion);
+ m_intended_kernel_version = intended_kernel_version.raw;
+
+ // Parse the capabilities array.
+ R_RETURN(this->SetCapabilities(kern_caps, page_table));
+}
+
+Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
+ // We're initializing a user process.
+ m_svc_access_flags.reset();
+ m_irq_access_flags.reset();
+ m_debug_capabilities = 0;
+ m_handle_table_size = 0;
+ m_intended_kernel_version = 0;
+ m_program_type = 0;
+
+ // User processes must specify what cores/priorities they can use.
+ m_core_mask = 0;
+ m_priority_mask = 0;
+
+ // Parse the user capabilities array.
+ R_RETURN(this->SetCapabilities(user_caps, page_table));
+}
+
+Result KCapabilities::SetCorePriorityCapability(const u32 cap) {
+ // We can't set core/priority if we've already set them.
+ R_UNLESS(m_core_mask == 0, ResultInvalidArgument);
+ R_UNLESS(m_priority_mask == 0, ResultInvalidArgument);
+
+ // Validate the core/priority.
+ CorePriority pack{cap};
+ const u32 min_core = pack.minimum_core_id;
+ const u32 max_core = pack.maximum_core_id;
+ const u32 max_prio = pack.lowest_thread_priority;
+ const u32 min_prio = pack.highest_thread_priority;
+
+ R_UNLESS(min_core <= max_core, ResultInvalidCombination);
+ R_UNLESS(min_prio <= max_prio, ResultInvalidCombination);
+ R_UNLESS(max_core < Core::Hardware::NumVirtualCores, ResultInvalidCoreId);
+
+ ASSERT(max_prio < Common::BitSize<u64>());
+
+ // Set core mask.
+ for (auto core_id = min_core; core_id <= max_core; core_id++) {
+ m_core_mask |= (1ULL << core_id);
+ }
+ ASSERT((m_core_mask & Core::Hardware::VirtualCoreMask) == m_core_mask);
+
+ // Set physical core mask.
+ m_phys_core_mask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(m_core_mask);
+
+ // Set priority mask.
+ for (auto prio = min_prio; prio <= max_prio; prio++) {
+ m_priority_mask |= (1ULL << prio);
+ }
+
+ // We must have some core/priority we can use.
+ R_UNLESS(m_core_mask != 0, ResultInvalidArgument);
+ R_UNLESS(m_priority_mask != 0, ResultInvalidArgument);
+
+ // Processes must not have access to kernel thread priorities.
+ R_UNLESS((m_priority_mask & 0xF) == 0, ResultInvalidArgument);
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
+ // Validate the index.
+ SyscallMask pack{cap};
+ const u32 mask = pack.mask;
+ const u32 index = pack.index;
+
+ const u32 index_flag = (1U << index);
+ R_UNLESS((set_svc & index_flag) == 0, ResultInvalidCombination);
+ set_svc |= index_flag;
+
+ // Set SVCs.
+ for (size_t i = 0; i < decltype(SyscallMask::mask)::bits; i++) {
+ const u32 svc_id = static_cast<u32>(decltype(SyscallMask::mask)::bits * index + i);
+ if (mask & (1U << i)) {
+ R_UNLESS(this->SetSvcAllowed(svc_id), ResultOutOfRange);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
+ const auto range_pack = MapRange{cap};
+ const auto size_pack = MapRangeSize{size_cap};
+
+ // Get/validate address/size
+ const u64 phys_addr = range_pack.address.Value() * PageSize;
+
+ // Validate reserved bits are unused.
+ R_UNLESS(size_pack.reserved.Value() == 0, ResultOutOfRange);
+
+ const size_t num_pages = size_pack.pages;
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(num_pages != 0, ResultInvalidSize);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
+
+ // Do the mapping.
+ [[maybe_unused]] const KMemoryPermission perm = range_pack.read_only.Value()
+ ? KMemoryPermission::UserRead
+ : KMemoryPermission::UserReadWrite;
+ if (MapRangeSize{size_cap}.normal) {
+ // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
+ } else {
+ // R_RETURN(page_table->MapIo(phys_addr, size, perm));
+ }
+
+ UNIMPLEMENTED();
+ R_SUCCEED();
+}
+
+Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
+ // Get/validate address/size
+ const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
+ const size_t num_pages = 1;
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(num_pages != 0, ResultInvalidSize);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
+
+ // Do the mapping.
+ // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
+
+ UNIMPLEMENTED();
+ R_SUCCEED();
+}
+
+template <typename F>
+Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
+ // Define the allowed memory regions.
+ constexpr std::array<KMemoryRegionType, 4> MemoryRegions{
+ KMemoryRegionType_None,
+ KMemoryRegionType_KernelTraceBuffer,
+ KMemoryRegionType_OnMemoryBootImage,
+ KMemoryRegionType_DTB,
+ };
+
+ // Extract regions/read only.
+ const MapRegion pack{cap};
+ const std::array<RegionType, 3> types{pack.region0, pack.region1, pack.region2};
+ const std::array<u32, 3> ro{pack.read_only0, pack.read_only1, pack.read_only2};
+
+ for (size_t i = 0; i < types.size(); i++) {
+ const auto type = types[i];
+ const auto perm = ro[i] ? KMemoryPermission::UserRead : KMemoryPermission::UserReadWrite;
+ switch (type) {
+ case RegionType::NoMapping:
+ break;
+ case RegionType::KernelTraceBuffer:
+ case RegionType::OnMemoryBootImage:
+ case RegionType::DTB:
+ R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));
+ break;
+ default:
+ R_THROW(ResultNotFound);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
+ // Map each region into the process's page table.
+ R_RETURN(ProcessMapRegionCapability(
+ cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ // R_RETURN(page_table->MapRegion(region_type, perm));
+ UNIMPLEMENTED();
+ R_SUCCEED();
+ }));
+}
+
+Result KCapabilities::CheckMapRegion(KernelCore& kernel, const u32 cap) {
+ // Check that each region has a physical backing store.
+ R_RETURN(ProcessMapRegionCapability(
+ cap, [&](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ R_UNLESS(kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(
+ region_type) != nullptr,
+ ResultOutOfRange);
+ R_SUCCEED();
+ }));
+}
+
+Result KCapabilities::SetInterruptPairCapability(const u32 cap) {
+ // Extract interrupts.
+ const InterruptPair pack{cap};
+ const std::array<u32, 2> ids{pack.interrupt_id0, pack.interrupt_id1};
+
+ for (size_t i = 0; i < ids.size(); i++) {
+ if (ids[i] != PaddingInterruptId) {
+ UNIMPLEMENTED();
+ // R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), ResultOutOfRange);
+ // R_UNLESS(this->SetInterruptPermitted(ids[i]), ResultOutOfRange);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetProgramTypeCapability(const u32 cap) {
+ // Validate.
+ const ProgramType pack{cap};
+ R_UNLESS(pack.reserved == 0, ResultReservedUsed);
+
+ m_program_type = pack.type;
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetKernelVersionCapability(const u32 cap) {
+ // Ensure we haven't set our version before.
+ R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version == 0, ResultInvalidArgument);
+
+ // Set, ensure that we set a valid version.
+ m_intended_kernel_version = cap;
+ R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version != 0, ResultInvalidArgument);
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetHandleTableCapability(const u32 cap) {
+ // Validate.
+ const HandleTable pack{cap};
+ R_UNLESS(pack.reserved == 0, ResultReservedUsed);
+
+ m_handle_table_size = pack.size;
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
+ // Validate.
+ const DebugFlags pack{cap};
+ R_UNLESS(pack.reserved == 0, ResultReservedUsed);
+
+ DebugFlags debug_capabilities{m_debug_capabilities};
+ debug_capabilities.allow_debug.Assign(pack.allow_debug);
+ debug_capabilities.force_debug.Assign(pack.force_debug);
+ m_debug_capabilities = debug_capabilities.raw;
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
+ KPageTable* page_table) {
+ // Validate this is a capability we can act on.
+ const auto type = GetCapabilityType(cap);
+ R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
+
+ // If the type is padding, we have no work to do.
+ R_SUCCEED_IF(type == CapabilityType::Padding);
+
+ // Check that we haven't already processed this capability.
+ const auto flag = GetCapabilityFlag(type);
+ R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, ResultInvalidCombination);
+ set_flags |= flag;
+
+ // Process the capability.
+ switch (type) {
+ case CapabilityType::CorePriority:
+ R_RETURN(this->SetCorePriorityCapability(cap));
+ case CapabilityType::SyscallMask:
+ R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
+ case CapabilityType::MapIoPage:
+ R_RETURN(this->MapIoPage_(cap, page_table));
+ case CapabilityType::MapRegion:
+ R_RETURN(this->MapRegion_(cap, page_table));
+ case CapabilityType::InterruptPair:
+ R_RETURN(this->SetInterruptPairCapability(cap));
+ case CapabilityType::ProgramType:
+ R_RETURN(this->SetProgramTypeCapability(cap));
+ case CapabilityType::KernelVersion:
+ R_RETURN(this->SetKernelVersionCapability(cap));
+ case CapabilityType::HandleTable:
+ R_RETURN(this->SetHandleTableCapability(cap));
+ case CapabilityType::DebugFlags:
+ R_RETURN(this->SetDebugFlagsCapability(cap));
+ default:
+ R_THROW(ResultInvalidArgument);
+ }
+}
+
+Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
+ u32 set_flags = 0, set_svc = 0;
+
+ for (size_t i = 0; i < caps.size(); i++) {
+ const u32 cap{caps[i]};
+
+ if (GetCapabilityType(cap) == CapabilityType::MapRange) {
+ // Check that the pair cap exists.
+ R_UNLESS((++i) < caps.size(), ResultInvalidCombination);
+
+ // Check the pair cap is a map range cap.
+ const u32 size_cap{caps[i]};
+ R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange,
+ ResultInvalidCombination);
+
+ // Map the range.
+ R_TRY(this->MapRange_(cap, size_cap, page_table));
+ } else {
+ R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KCapabilities::CheckCapabilities(KernelCore& kernel, std::span<const u32> caps) {
+ for (auto cap : caps) {
+ // Check the capability refers to a valid region.
+ if (GetCapabilityType(cap) == CapabilityType::MapRegion) {
+ R_TRY(CheckMapRegion(kernel, cap));
+ }
+ }
+
+ R_SUCCEED();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
new file mode 100644
index 000000000..cd96f8d23
--- /dev/null
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -0,0 +1,295 @@
+
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <bitset>
+#include <span>
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KPageTable;
+class KernelCore;
+
+class KCapabilities {
+public:
+ constexpr explicit KCapabilities() = default;
+
+ Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table);
+ Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
+
+ static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
+
+ constexpr u64 GetCoreMask() const {
+ return m_core_mask;
+ }
+
+ constexpr u64 GetPhysicalCoreMask() const {
+ return m_phys_core_mask;
+ }
+
+ constexpr u64 GetPriorityMask() const {
+ return m_priority_mask;
+ }
+
+ constexpr s32 GetHandleTableSize() const {
+ return m_handle_table_size;
+ }
+
+ constexpr const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
+ return m_svc_access_flags;
+ }
+
+ constexpr bool IsPermittedSvc(u32 id) const {
+ return (id < m_svc_access_flags.size()) && m_svc_access_flags[id];
+ }
+
+ constexpr bool IsPermittedInterrupt(u32 id) const {
+ return (id < m_irq_access_flags.size()) && m_irq_access_flags[id];
+ }
+
+ constexpr bool IsPermittedDebug() const {
+ return DebugFlags{m_debug_capabilities}.allow_debug.Value() != 0;
+ }
+
+ constexpr bool CanForceDebug() const {
+ return DebugFlags{m_debug_capabilities}.force_debug.Value() != 0;
+ }
+
+ constexpr u32 GetIntendedKernelMajorVersion() const {
+ return KernelVersion{m_intended_kernel_version}.major_version;
+ }
+
+ constexpr u32 GetIntendedKernelMinorVersion() const {
+ return KernelVersion{m_intended_kernel_version}.minor_version;
+ }
+
+private:
+ static constexpr size_t InterruptIdCount = 0x400;
+ using InterruptFlagSet = std::bitset<InterruptIdCount>;
+
+ enum class CapabilityType : u32 {
+ CorePriority = (1U << 3) - 1,
+ SyscallMask = (1U << 4) - 1,
+ MapRange = (1U << 6) - 1,
+ MapIoPage = (1U << 7) - 1,
+ MapRegion = (1U << 10) - 1,
+ InterruptPair = (1U << 11) - 1,
+ ProgramType = (1U << 13) - 1,
+ KernelVersion = (1U << 14) - 1,
+ HandleTable = (1U << 15) - 1,
+ DebugFlags = (1U << 16) - 1,
+
+ Invalid = 0U,
+ Padding = ~0U,
+ };
+
+ using RawCapabilityValue = u32;
+
+ static constexpr CapabilityType GetCapabilityType(const RawCapabilityValue value) {
+ return static_cast<CapabilityType>((~value & (value + 1)) - 1);
+ }
+
+ static constexpr u32 GetCapabilityFlag(CapabilityType type) {
+ return static_cast<u32>(type) + 1;
+ }
+
+ template <CapabilityType Type>
+ static constexpr inline u32 CapabilityFlag = static_cast<u32>(Type) + 1;
+
+ template <CapabilityType Type>
+ static constexpr inline u32 CapabilityId = std::countr_zero(CapabilityFlag<Type>);
+
+ union CorePriority {
+ static_assert(CapabilityId<CapabilityType::CorePriority> + 1 == 4);
+
+ RawCapabilityValue raw;
+ BitField<0, 4, CapabilityType> id;
+ BitField<4, 6, u32> lowest_thread_priority;
+ BitField<10, 6, u32> highest_thread_priority;
+ BitField<16, 8, u32> minimum_core_id;
+ BitField<24, 8, u32> maximum_core_id;
+ };
+
+ union SyscallMask {
+ static_assert(CapabilityId<CapabilityType::SyscallMask> + 1 == 5);
+
+ RawCapabilityValue raw;
+ BitField<0, 5, CapabilityType> id;
+ BitField<5, 24, u32> mask;
+ BitField<29, 3, u32> index;
+ };
+
+ // #undef MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
+ static constexpr u64 PhysicalMapAllowedMask = (1ULL << 36) - 1;
+
+ union MapRange {
+ static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
+
+ RawCapabilityValue raw;
+ BitField<0, 7, CapabilityType> id;
+ BitField<7, 24, u32> address;
+ BitField<31, 1, u32> read_only;
+ };
+
+ union MapRangeSize {
+ static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
+
+ RawCapabilityValue raw;
+ BitField<0, 7, CapabilityType> id;
+ BitField<7, 20, u32> pages;
+ BitField<27, 4, u32> reserved;
+ BitField<31, 1, u32> normal;
+ };
+
+ union MapIoPage {
+ static_assert(CapabilityId<CapabilityType::MapIoPage> + 1 == 8);
+
+ RawCapabilityValue raw;
+ BitField<0, 8, CapabilityType> id;
+ BitField<8, 24, u32> address;
+ };
+
+ enum class RegionType : u32 {
+ NoMapping = 0,
+ KernelTraceBuffer = 1,
+ OnMemoryBootImage = 2,
+ DTB = 3,
+ };
+
+ union MapRegion {
+ static_assert(CapabilityId<CapabilityType::MapRegion> + 1 == 11);
+
+ RawCapabilityValue raw;
+ BitField<0, 11, CapabilityType> id;
+ BitField<11, 6, RegionType> region0;
+ BitField<17, 1, u32> read_only0;
+ BitField<18, 6, RegionType> region1;
+ BitField<24, 1, u32> read_only1;
+ BitField<25, 6, RegionType> region2;
+ BitField<31, 1, u32> read_only2;
+ };
+
+ union InterruptPair {
+ static_assert(CapabilityId<CapabilityType::InterruptPair> + 1 == 12);
+
+ RawCapabilityValue raw;
+ BitField<0, 12, CapabilityType> id;
+ BitField<12, 10, u32> interrupt_id0;
+ BitField<22, 10, u32> interrupt_id1;
+ };
+
+ union ProgramType {
+ static_assert(CapabilityId<CapabilityType::ProgramType> + 1 == 14);
+
+ RawCapabilityValue raw;
+ BitField<0, 14, CapabilityType> id;
+ BitField<14, 3, u32> type;
+ BitField<17, 15, u32> reserved;
+ };
+
+ union KernelVersion {
+ static_assert(CapabilityId<CapabilityType::KernelVersion> + 1 == 15);
+
+ RawCapabilityValue raw;
+ BitField<0, 15, CapabilityType> id;
+ BitField<15, 4, u32> major_version;
+ BitField<19, 13, u32> minor_version;
+ };
+
+ union HandleTable {
+ static_assert(CapabilityId<CapabilityType::HandleTable> + 1 == 16);
+
+ RawCapabilityValue raw;
+ BitField<0, 16, CapabilityType> id;
+ BitField<16, 10, u32> size;
+ BitField<26, 6, u32> reserved;
+ };
+
+ union DebugFlags {
+ static_assert(CapabilityId<CapabilityType::DebugFlags> + 1 == 17);
+
+ RawCapabilityValue raw;
+ BitField<0, 17, CapabilityType> id;
+ BitField<17, 1, u32> allow_debug;
+ BitField<18, 1, u32> force_debug;
+ BitField<19, 13, u32> reserved;
+ };
+
+ static_assert(sizeof(CorePriority) == 4);
+ static_assert(sizeof(SyscallMask) == 4);
+ static_assert(sizeof(MapRange) == 4);
+ static_assert(sizeof(MapRangeSize) == 4);
+ static_assert(sizeof(MapIoPage) == 4);
+ static_assert(sizeof(MapRegion) == 4);
+ static_assert(sizeof(InterruptPair) == 4);
+ static_assert(sizeof(ProgramType) == 4);
+ static_assert(sizeof(KernelVersion) == 4);
+ static_assert(sizeof(HandleTable) == 4);
+ static_assert(sizeof(DebugFlags) == 4);
+
+ static constexpr u32 InitializeOnceFlags =
+ CapabilityFlag<CapabilityType::CorePriority> | CapabilityFlag<CapabilityType::ProgramType> |
+ CapabilityFlag<CapabilityType::KernelVersion> |
+ CapabilityFlag<CapabilityType::HandleTable> | CapabilityFlag<CapabilityType::DebugFlags>;
+
+ static const u32 PaddingInterruptId = 0x3FF;
+ static_assert(PaddingInterruptId < InterruptIdCount);
+
+private:
+ constexpr bool SetSvcAllowed(u32 id) {
+ if (id < m_svc_access_flags.size()) [[likely]] {
+ m_svc_access_flags[id] = true;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ constexpr bool SetInterruptPermitted(u32 id) {
+ if (id < m_irq_access_flags.size()) [[likely]] {
+ m_irq_access_flags[id] = true;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ Result SetCorePriorityCapability(const u32 cap);
+ Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
+ Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
+ Result MapIoPage_(const u32 cap, KPageTable* page_table);
+ Result MapRegion_(const u32 cap, KPageTable* page_table);
+ Result SetInterruptPairCapability(const u32 cap);
+ Result SetProgramTypeCapability(const u32 cap);
+ Result SetKernelVersionCapability(const u32 cap);
+ Result SetHandleTableCapability(const u32 cap);
+ Result SetDebugFlagsCapability(const u32 cap);
+
+ template <typename F>
+ static Result ProcessMapRegionCapability(const u32 cap, F f);
+ static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
+
+ Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
+ Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
+
+private:
+ Svc::SvcAccessFlagSet m_svc_access_flags{};
+ InterruptFlagSet m_irq_access_flags{};
+ u64 m_core_mask{};
+ u64 m_phys_core_mask{};
+ u64 m_priority_mask{};
+ u32 m_debug_capabilities{};
+ s32 m_handle_table_size{};
+ u32 m_intended_kernel_version{};
+ u32 m_program_type{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index d9da1e600..884eba001 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_UNLESS(!m_is_mapped, ResultInvalidState);
// Map the memory.
- R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
+ R_TRY(kernel.CurrentProcess()->PageTable().MapPageGroup(
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
// Mark ourselves as mapped.
@@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
KScopedLightLock lk(m_lock);
// Unmap the memory.
- R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,
- KMemoryState::CodeOut));
+ R_TRY(kernel.CurrentProcess()->PageTable().UnmapPageGroup(address, *m_page_group,
+ KMemoryState::CodeOut));
// Mark ourselves as unmapped.
m_is_mapped = false;
@@ -125,8 +125,8 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
}
// Map the memory.
- R_TRY(
- m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));
+ R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode,
+ k_perm));
// Mark ourselves as mapped.
m_is_owner_mapped = true;
@@ -142,7 +142,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
KScopedLightLock lk(m_lock);
// Unmap the memory.
- R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));
+ R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode));
// Mark ourselves as unmapped.
m_is_owner_mapped = false;
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 124149697..0c6b20db3 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -171,7 +171,7 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
// Update the lock.
- cur_thread->SetAddressKey(addr, value);
+ cur_thread->SetUserAddressKey(addr, value);
owner_thread->AddWaiter(cur_thread);
// Begin waiting.
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
new file mode 100644
index 000000000..27659ea3b
--- /dev/null
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -0,0 +1,150 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_device_address_space.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel {
+
+KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_)
+ : KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {}
+KDeviceAddressSpace::~KDeviceAddressSpace() = default;
+
+void KDeviceAddressSpace::Initialize() {
+ // This just forwards to the device page table manager.
+ // KDevicePageTable::Initialize();
+}
+
+// Member functions.
+Result KDeviceAddressSpace::Initialize(u64 address, u64 size) {
+ // Initialize the device page table.
+ // R_TRY(m_table.Initialize(address, size));
+
+ // Set member variables.
+ m_space_address = address;
+ m_space_size = size;
+ m_is_initialized = true;
+
+ R_SUCCEED();
+}
+
+void KDeviceAddressSpace::Finalize() {
+ // Finalize the table.
+ // m_table.Finalize();
+}
+
+Result KDeviceAddressSpace::Attach(Svc::DeviceName device_name) {
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Attach.
+ // R_RETURN(m_table.Attach(device_name, m_space_address, m_space_size));
+ R_SUCCEED();
+}
+
+Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Detach.
+ // R_RETURN(m_table.Detach(device_name));
+ R_SUCCEED();
+}
+
+Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, size_t size,
+ u64 device_address, u32 option, bool is_aligned) {
+ // Check that the address falls within the space.
+ R_UNLESS((m_space_address <= device_address &&
+ device_address + size - 1 <= m_space_address + m_space_size - 1),
+ ResultInvalidCurrentMemory);
+
+ // Decode the option.
+ const Svc::MapDeviceAddressSpaceOption option_pack{option};
+ const auto device_perm = option_pack.permission.Value();
+ const auto flags = option_pack.flags.Value();
+ const auto reserved = option_pack.reserved.Value();
+
+ // Validate the option.
+ // TODO: It is likely that this check for flags == none is only on NX board.
+ R_UNLESS(flags == Svc::MapDeviceAddressSpaceFlag::None, ResultInvalidEnumValue);
+ R_UNLESS(reserved == 0, ResultInvalidEnumValue);
+
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Lock the page table to prevent concurrent device mapping operations.
+ // KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
+
+ // Lock the pages.
+ bool is_io{};
+ R_TRY(page_table->LockForMapDeviceAddressSpace(std::addressof(is_io), process_address, size,
+ ConvertToKMemoryPermission(device_perm),
+ is_aligned, true));
+
+ // Ensure that if we fail, we don't keep unmapped pages locked.
+ ON_RESULT_FAILURE {
+ ASSERT(page_table->UnlockForDeviceAddressSpace(process_address, size) == ResultSuccess);
+ };
+
+ // Check that the io status is allowable.
+ if (is_io) {
+ R_UNLESS(static_cast<u32>(flags & Svc::MapDeviceAddressSpaceFlag::NotIoRegister) == 0,
+ ResultInvalidCombination);
+ }
+
+ // Map the pages.
+ {
+ // Perform the mapping.
+ // R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm,
+ // is_aligned, is_io));
+
+ // Ensure that we unmap the pages if we fail to update the protections.
+ // NOTE: Nintendo does not check the result of this unmap call.
+ // ON_RESULT_FAILURE { m_table.Unmap(device_address, size); };
+
+ // Update the protections in accordance with how much we mapped.
+ // R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
+ }
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KDeviceAddressSpace::Unmap(KPageTable* page_table, VAddr process_address, size_t size,
+ u64 device_address) {
+ // Check that the address falls within the space.
+ R_UNLESS((m_space_address <= device_address &&
+ device_address + size - 1 <= m_space_address + m_space_size - 1),
+ ResultInvalidCurrentMemory);
+
+ // Lock the address space.
+ KScopedLightLock lk(m_lock);
+
+ // Lock the page table to prevent concurrent device mapping operations.
+ // KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
+
+ // Lock the pages.
+ R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size, true));
+
+ // Unmap the pages.
+ {
+ // If we fail to unmap, we want to do a partial unlock.
+ // ON_RESULT_FAILURE {
+ // ASSERT(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size) ==
+ // ResultSuccess);
+ // };
+
+ // Perform the unmap.
+ // R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
+ }
+
+ // Unlock the pages.
+ ASSERT(page_table->UnlockForDeviceAddressSpace(process_address, size) == ResultSuccess);
+
+ R_SUCCEED();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
new file mode 100644
index 000000000..4709df995
--- /dev/null
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -0,0 +1,60 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <string>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KDeviceAddressSpace final
+ : public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
+ KERNEL_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
+
+public:
+ explicit KDeviceAddressSpace(KernelCore& kernel);
+ ~KDeviceAddressSpace();
+
+ Result Initialize(u64 address, u64 size);
+ void Finalize();
+
+ bool IsInitialized() const {
+ return m_is_initialized;
+ }
+ static void PostDestroy(uintptr_t arg) {}
+
+ Result Attach(Svc::DeviceName device_name);
+ Result Detach(Svc::DeviceName device_name);
+
+ Result MapByForce(KPageTable* page_table, VAddr process_address, size_t size,
+ u64 device_address, u32 option) {
+ R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
+ }
+
+ Result MapAligned(KPageTable* page_table, VAddr process_address, size_t size,
+ u64 device_address, u32 option) {
+ R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
+ }
+
+ Result Unmap(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address);
+
+ static void Initialize();
+
+private:
+ Result Map(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address,
+ u32 option, bool is_aligned);
+
+private:
+ KLightLock m_lock;
+ // KDevicePageTable m_table;
+ u64 m_space_address{};
+ u64 m_space_size{};
+ bool m_is_initialized{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 43185320d..d791acbe3 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -68,7 +68,7 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
// Add the current thread as a waiter on the owner.
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
- cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
+ cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
owner_thread->AddWaiter(cur_thread);
// Begin waiting to hold the lock.
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index fd6e1d3e6..17fa1a6ed 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -67,9 +67,9 @@ constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize +
KernelSlabHeapSize + KernelPageBufferHeapSize;
-constexpr bool IsKernelAddressKey(VAddr key) {
- return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
-}
+//! NB: Use KThread::GetAddressKeyIsKernel().
+//! See explanation for deviation of GetAddressKey.
+bool IsKernelAddressKey(VAddr key) = delete;
constexpr bool IsKernelAddress(VAddr address) {
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 9c7ac22dc..2e13d5d0d 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -435,6 +435,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
KPageGroup pg{m_kernel, m_block_info_manager};
AddRegionToPages(src_address, num_pages, pg);
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
// Reprotect the source as kernel-read/not mapped.
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
KMemoryPermission::NotMapped);
@@ -447,7 +450,10 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
});
// Map the alias pages.
- R_TRY(MapPages(dst_address, pg, new_perm));
+ const KPageProperties dst_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(
+ this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
// failure.
@@ -1881,7 +1887,8 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
R_SUCCEED();
}
-Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
// Lock the table.
KScopedLightLock lk(m_general_lock);
@@ -1902,53 +1909,73 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
KMemoryAttribute::None));
// Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
+ Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager,
num_src_allocator_blocks);
R_TRY(src_allocator_result);
// Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
+ Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager,
num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
// Map the memory.
- KPageGroup page_linked_list{m_kernel, m_block_info_manager};
- const size_t num_pages{size / PageSize};
- const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
- KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
- const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
-
- AddRegionToPages(src_address, num_pages, page_linked_list);
{
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg{m_kernel, m_block_info_manager};
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
// Reprotect the source as kernel-read/not mapped.
- auto block_guard = detail::ScopeExit([&] {
- Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions);
- });
- R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
- R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
+ const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+ const KPageProperties src_properties = {new_src_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
+ OperationType::ChangePermissions));
- block_guard.Cancel();
- }
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableHeadBodyTail};
+ ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
+ OperationType::ChangePermissions) == ResultSuccess);
+ };
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
- new_src_perm, new_src_attr,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::Stack, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
+ // Map the alias pages.
+ const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
+ false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_src_perm, new_src_attr,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+ }
R_SUCCEED();
}
-Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
+Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
// Lock the table.
KScopedLightLock lk(m_general_lock);
@@ -1970,108 +1997,208 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
// Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
+ Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager,
num_src_allocator_blocks);
R_TRY(src_allocator_result);
// Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
+ Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager,
num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
- KPageGroup src_pages{m_kernel, m_block_info_manager};
- KPageGroup dst_pages{m_kernel, m_block_info_manager};
- const size_t num_pages{size / PageSize};
+ // Unmap the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
- AddRegionToPages(src_address, num_pages, src_pages);
- AddRegionToPages(dst_address, num_pages, dst_pages);
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg{m_kernel, m_block_info_manager};
- R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
- {
- auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
+ // Ensure the page group is the valid for the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
- R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
- R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions));
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
- block_guard.Cancel();
- }
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(
+ this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Locked);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
+ OperationType::ChangePermissions));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, src_state,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ }
R_SUCCEED();
}
-Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
- KMemoryPermission perm) {
+Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread());
- VAddr cur_addr{addr};
+ // Create a page group to hold the pages we allocate.
+ KPageGroup pg{m_kernel, m_block_info_manager};
- for (const auto& node : page_linked_list) {
- if (const auto result{
- Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
- result.IsError()) {
- const size_t num_pages{(addr - cur_addr) / PageSize};
+ // Allocate the pages.
+ R_TRY(
+ m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
- ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
+ // Ensure that the page group is closed when we're done working with it.
+ SCOPE_EXIT({ pg.Close(); });
- R_RETURN(result);
+ // Clear all pages.
+ for (const auto& it : pg) {
+ std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
+ it.GetSize());
+ }
+
+ // Map the pages.
+ R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
+}
+
+Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ KProcessAddress cur_address = address;
+
+ // Ensure that we clean up on failure.
+ ON_RESULT_FAILURE {
+ ASSERT(!reuse_ll);
+ if (cur_address != start_address) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
+ unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
}
+ };
- cur_addr += node.GetNumPages() * PageSize;
+ // Iterate, mapping all pages in the group.
+ for (const auto& block : pg) {
+ // Map and advance.
+ const KPageProperties cur_properties =
+ (cur_address == start_address)
+ ? properties
+ : KPageProperties{properties.perm, properties.io, properties.uncached,
+ DisableMergeAttribute::None};
+ this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
+ block.GetAddress());
+ cur_address += block.GetSize();
}
+ // We succeeded!
R_SUCCEED();
}
-Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
- KMemoryPermission perm) {
- // Check that the map is in range.
- const size_t num_pages{page_linked_list.GetNumPages()};
- const size_t size{num_pages * PageSize};
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ ASSERT(this->IsLockedByCurrentThread());
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ const KProcessAddress last_address = start_address + size - 1;
+ const KProcessAddress end_address = last_address + 1;
- // Check the memory state.
- R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
+ // Iterate over the memory.
+ auto pg_it = pg.begin();
+ ASSERT(pg_it != pg.end());
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
- // Map the pages.
- R_TRY(MapPages(address, page_linked_list, perm));
+ auto it = m_memory_block_manager.FindIterator(start_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
- R_SUCCEED();
+ // Determine the range to map.
+ KProcessAddress map_address = std::max<VAddr>(info.GetAddress(), start_address);
+ const KProcessAddress map_end_address = std::min<VAddr>(info.GetEndAddress(), end_address);
+ ASSERT(map_end_address != map_address);
+
+ // Determine if we should disable head merge.
+ const bool disable_head_merge =
+ info.GetAddress() >= start_address &&
+ True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
+ const KPageProperties map_properties = {
+ info.GetPermission(), false, false,
+ disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
+
+ // While we have pages to map, map them.
+ size_t map_pages = (map_end_address - map_address) / PageSize;
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
+ pg_phys_addr) == ResultSuccess);
+
+ // Advance.
+ map_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+
+ // Check that we re-mapped precisely the page group.
+ ASSERT((++pg_it) == pg.end());
}
-Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
- bool is_pa_valid, VAddr region_start, size_t region_num_pages,
+Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid,
+ KProcessAddress region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
@@ -2084,26 +2211,30 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
KScopedLightLock lk(m_general_lock);
// Find a random address to map at.
- VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
- this->GetNumGuardPages());
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
+ 0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
ASSERT(Common::IsAligned(addr, alignment));
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None)
- .IsSuccess());
+ KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
// Perform mapping operation.
if (is_pa_valid) {
- R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
} else {
- UNIMPLEMENTED();
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
}
// Update the blocks.
@@ -2116,28 +2247,45 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
- ASSERT(this->IsLockedByCurrentThread());
+Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check that the map is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
- VAddr cur_addr{addr};
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
- for (const auto& node : page_linked_list) {
- if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
- OperationType::Unmap)};
- result.IsError()) {
- R_RETURN(result);
- }
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
- cur_addr += node.GetNumPages() * PageSize;
- }
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
+Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
- const size_t num_pages{page_linked_list.GetNumPages()};
- const size_t size{num_pages * PageSize};
+ const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
@@ -2151,13 +2299,18 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
KMemoryAttribute::None));
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
// Perform the unmap.
- R_TRY(UnmapPages(address, page_linked_list));
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@@ -2168,29 +2321,130 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
R_SUCCEED();
}
-Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
- // Check that the unmap is in range.
+Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::Io, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+ R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
- // Check the memory state.
- size_t num_allocator_blocks{};
+ // Check if state allows us to map.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::Io, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
+ KMemoryState state) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid unmap request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to unmap.
+ size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
+ // Check that the page group is valid.
+ R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
+
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
- // Perform the unmap.
- R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform unmapping operation.
+ const KPageProperties properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@@ -2550,54 +2804,6 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
}
}
-ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
- bool is_map_only, VAddr region_start,
- size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm, PAddr map_addr) {
- KScopedLightLock lk(m_general_lock);
-
- R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
- const VAddr addr{
- AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
- R_UNLESS(addr, ResultOutOfMemory);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
-
- if (is_map_only) {
- R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
- } else {
- // Create a page group tohold the pages we allocate.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, needed_num_pages,
- KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
-
- // Ensure that the page group is closed when we're done working with it.
- SCOPE_EXIT({ pg.Close(); });
-
- // Clear all pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
- m_heap_fill_value, it.GetSize());
- }
-
- R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
- }
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- return addr;
-}
-
Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned,
bool check_heap) {
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 0a454b05b..367dab613 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -24,12 +24,36 @@ class System;
namespace Kernel {
+enum class DisableMergeAttribute : u8 {
+ None = (0U << 0),
+ DisableHead = (1U << 0),
+ DisableHeadAndBody = (1U << 1),
+ EnableHeadAndBody = (1U << 2),
+ DisableTail = (1U << 3),
+ EnableTail = (1U << 4),
+ EnableAndMergeHeadBodyTail = (1U << 5),
+ EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
+ DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
+};
+
+struct KPageProperties {
+ KMemoryPermission perm;
+ bool io;
+ bool uncached;
+ DisableMergeAttribute disable_merge_attributes;
+};
+static_assert(std::is_trivial_v<KPageProperties>);
+static_assert(sizeof(KPageProperties) == sizeof(u32));
+
class KBlockInfoManager;
class KMemoryBlockManager;
class KResourceLimit;
class KSystemResource;
class KPageTable final {
+protected:
+ struct PageLinkedList;
+
public:
enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
@@ -57,27 +81,12 @@ public:
Result UnmapPhysicalMemory(VAddr addr, size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
- Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
- KMemoryPermission perm);
- Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
- KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
- Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
- Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size);
Result SetHeapSize(VAddr* out, size_t size);
- ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
- VAddr region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm,
- PAddr map_addr = 0);
-
Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
@@ -113,6 +122,40 @@ public:
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
+ region_num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
+
+ Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
+ void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg);
+
protected:
struct PageLinkedList {
private:
@@ -166,11 +209,9 @@ private:
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
- Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
- Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
- bool is_pa_valid, VAddr region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm);
- Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
@@ -265,6 +306,11 @@ private:
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
size_t size, KMemoryPermission prot_perm);
+ Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm);
+ Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
+
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index cb2512b0b..645c5b531 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -17,35 +17,41 @@ namespace Kernel {
class KThread;
template <typename T>
-concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
- { t.GetAffinityMask() } -> Common::ConvertibleTo<u64>;
- {t.SetAffinityMask(0)};
+concept KPriorityQueueAffinityMask = !
+std::is_reference_v<T>&& requires(T& t) {
+ { t.GetAffinityMask() } -> Common::ConvertibleTo<u64>;
+ { t.SetAffinityMask(0) };
- { t.GetAffinity(0) } -> std::same_as<bool>;
- {t.SetAffinity(0, false)};
- {t.SetAll()};
-};
+ { t.GetAffinity(0) } -> std::same_as<bool>;
+ { t.SetAffinity(0, false) };
+ { t.SetAll() };
+ };
template <typename T>
-concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
- {typename T::QueueEntry()};
- {(typename T::QueueEntry()).Initialize()};
- {(typename T::QueueEntry()).SetPrev(std::addressof(t))};
- {(typename T::QueueEntry()).SetNext(std::addressof(t))};
- { (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
- { (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
- { t.GetPriorityQueueEntry(0) } -> std::same_as<typename T::QueueEntry&>;
-
- {t.GetAffinityMask()};
- { std::remove_cvref_t<decltype(t.GetAffinityMask())>() } -> KPriorityQueueAffinityMask;
-
- { t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
- { t.GetPriority() } -> Common::ConvertibleTo<s32>;
- { t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
-};
+concept KPriorityQueueMember = !
+std::is_reference_v<T>&& requires(T& t) {
+ { typename T::QueueEntry() };
+ { (typename T::QueueEntry()).Initialize() };
+ { (typename T::QueueEntry()).SetPrev(std::addressof(t)) };
+ { (typename T::QueueEntry()).SetNext(std::addressof(t)) };
+ { (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
+ { (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
+ {
+ t.GetPriorityQueueEntry(0)
+ } -> std::same_as<typename T::QueueEntry&>;
+
+ { t.GetAffinityMask() };
+ {
+ std::remove_cvref_t<decltype(t.GetAffinityMask())>()
+ } -> KPriorityQueueAffinityMask;
+
+ { t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
+ { t.GetPriority() } -> Common::ConvertibleTo<s32>;
+ { t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
+ };
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
-requires KPriorityQueueMember<Member>
+ requires KPriorityQueueMember<Member>
class KPriorityQueue {
public:
using AffinityMaskType = std::remove_cv_t<
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index a1abf5d68..e201bb0cd 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -417,9 +417,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
- AllocateMainThreadStack(stack_size);
+ ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess);
resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
- resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
@@ -675,20 +674,31 @@ void KProcess::ChangeState(State new_state) {
}
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
- ASSERT(stack_size);
-
- // The kernel always ensures that the given stack size is page aligned.
- main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
-
- const VAddr start{page_table.GetStackRegionStart()};
- const std::size_t size{page_table.GetStackRegionEnd() - start};
-
- CASCADE_RESULT(main_thread_stack_top,
- page_table.AllocateAndMapMemory(
- main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
- KMemoryState::Stack, KMemoryPermission::UserReadWrite));
+ // Ensure that we haven't already allocated stack.
+ ASSERT(main_thread_stack_size == 0);
+
+ // Ensure that we're allocating a valid stack.
+ stack_size = Common::AlignUp(stack_size, PageSize);
+ // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
+ R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory);
+
+ // Place a tentative reservation of memory for our new stack.
+ KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
+ stack_size);
+ R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate and map our stack.
+ if (stack_size) {
+ KProcessAddress stack_bottom;
+ R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
+ KMemoryState::Stack, KMemoryPermission::UserReadWrite));
+
+ main_thread_stack_top = stack_bottom + stack_size;
+ main_thread_stack_size = stack_size;
+ }
- main_thread_stack_top += main_thread_stack_size;
+ // We succeeded! Commit our memory reservation.
+ mem_reservation.Commit();
R_SUCCEED();
}
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
index 857e21156..59b3e32ae 100644
--- a/src/core/hle/kernel/k_scoped_lock.h
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -9,13 +9,14 @@
namespace Kernel {
template <typename T>
-concept KLockable = !std::is_reference_v<T> && requires(T & t) {
- { t.Lock() } -> std::same_as<void>;
- { t.Unlock() } -> std::same_as<void>;
-};
+concept KLockable = !
+std::is_reference_v<T>&& requires(T& t) {
+ { t.Lock() } -> std::same_as<void>;
+ { t.Unlock() } -> std::same_as<void>;
+ };
template <typename T>
-requires KLockable<T>
+ requires KLockable<T>
class [[nodiscard]] KScopedLock {
public:
explicit KScopedLock(T* l) : lock_ptr(l) {
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 3cf2b5d91..df505edfe 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -94,15 +94,15 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
}
- return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared,
- ConvertToKMemoryPermission(map_perm));
+ return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared,
+ ConvertToKMemoryPermission(map_perm));
}
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
// Validate the size.
R_UNLESS(size == unmap_size, ResultInvalidSize);
- return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared);
+ return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 21207fe99..84ff3c64b 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -330,7 +330,7 @@ void KThread::Finalize() {
KThread* const waiter = std::addressof(*it);
// The thread shouldn't be a kernel waiter.
- ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
+ ASSERT(!waiter->GetAddressKeyIsKernel());
// Clear the lock owner.
waiter->SetLockOwner(nullptr);
@@ -763,19 +763,6 @@ void KThread::Continue() {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
-void KThread::WaitUntilSuspended() {
- // Make sure we have a suspend requested.
- ASSERT(IsSuspendRequested());
-
- // Loop until the thread is not executing on any core.
- for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
- KThread* core_thread{};
- do {
- core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
- } while (core_thread == this);
- }
-}
-
Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
KScopedLightLock lk(activity_pause_lock);
@@ -897,7 +884,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
}
// Keep track of how many kernel waiters we have.
- if (IsKernelAddressKey(thread->GetAddressKey())) {
+ if (thread->GetAddressKeyIsKernel()) {
ASSERT((num_kernel_waiters++) >= 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
@@ -911,7 +898,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Keep track of how many kernel waiters we have.
- if (IsKernelAddressKey(thread->GetAddressKey())) {
+ if (thread->GetAddressKeyIsKernel()) {
ASSERT((num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
@@ -987,7 +974,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
KThread* thread = std::addressof(*it);
// Keep track of how many kernel waiters we have.
- if (IsKernelAddressKey(thread->GetAddressKey())) {
+ if (thread->GetAddressKeyIsKernel()) {
ASSERT((num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 7cd94a340..8b8dc51be 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -214,8 +214,6 @@ public:
void Continue();
- void WaitUntilSuspended();
-
constexpr void SetSyncedIndex(s32 index) {
synced_index = index;
}
@@ -607,13 +605,30 @@ public:
return address_key_value;
}
- void SetAddressKey(VAddr key) {
+ [[nodiscard]] bool GetAddressKeyIsKernel() const {
+ return address_key_is_kernel;
+ }
+
+ //! NB: intentional deviation from official kernel.
+ //
+ // Separate SetAddressKey into user and kernel versions
+ // to cope with arbitrary host pointers making their way
+ // into things.
+
+ void SetUserAddressKey(VAddr key) {
address_key = key;
+ address_key_is_kernel = false;
}
- void SetAddressKey(VAddr key, u32 val) {
+ void SetUserAddressKey(VAddr key, u32 val) {
address_key = key;
address_key_value = val;
+ address_key_is_kernel = false;
+ }
+
+ void SetKernelAddressKey(VAddr key) {
+ address_key = key;
+ address_key_is_kernel = true;
}
void ClearWaitQueue() {
@@ -662,7 +677,7 @@ private:
union SyncObjectBuffer {
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
std::array<Handle,
- Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
+ Svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject*) / sizeof(Handle))>
handles;
constexpr SyncObjectBuffer() {}
};
@@ -683,10 +698,8 @@ private:
};
template <typename T>
- requires(
- std::same_as<T, KThread> ||
- std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
- const KThread& rhs) {
+ requires(std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
+ static constexpr int Compare(const T& lhs, const KThread& rhs) {
const u64 l_key = lhs.GetConditionVariableKey();
const u64 r_key = rhs.GetConditionVariableKey();
@@ -772,6 +785,7 @@ private:
bool debug_attached{};
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
+ bool address_key_is_kernel{};
StackParameters stack_parameters{};
Common::SpinLock context_guard{};
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index fe0cff084..71254eb55 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -70,10 +70,8 @@ public:
}
template <typename T>
- requires(std::same_as<T, KThreadLocalPage> ||
- std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
- const KThreadLocalPage&
- rhs) {
+ requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
+ static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) {
const VAddr lval = GetRedBlackKey(lhs);
const VAddr rval = GetRedBlackKey(rhs);
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 1fb25f221..d9eafe261 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -1198,28 +1198,35 @@ void KernelCore::Suspend(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
- std::vector<KScopedAutoObject<KThread>> process_threads;
- {
- KScopedSchedulerLock sl{*this};
+ //! This refers to the application process, not the current process.
+ KScopedAutoObject<KProcess> process = CurrentProcess();
+ if (process.IsNull()) {
+ return;
+ }
- if (auto* process = CurrentProcess(); process != nullptr) {
- process->SetActivity(activity);
+ // Set the new activity.
+ process->SetActivity(activity);
- if (!should_suspend) {
- // Runnable now; no need to wait.
- return;
- }
+ // Wait for process execution to stop.
+ bool must_wait{should_suspend};
+
+ // KernelCore::Suspend must be called from locked context, or we
+ // could race another call to SetActivity, interfering with waiting.
+ while (must_wait) {
+ KScopedSchedulerLock sl{*this};
+
+ // Assume that all threads have finished running.
+ must_wait = false;
- for (auto* thread : process->GetThreadList()) {
- process_threads.emplace_back(thread);
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (Scheduler(i).GetSchedulerCurrentThread()->GetOwnerProcess() ==
+ process.GetPointerUnsafe()) {
+ // A thread has not finished running yet.
+ // Continue waiting.
+ must_wait = true;
}
}
}
-
- // Wait for execution to stop.
- for (auto& thread : process_threads) {
- thread->WaitUntilSuspended();
- }
}
void KernelCore::ShutdownCores() {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 8d22f8d2c..5f52e1e95 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -35,6 +35,7 @@ class GlobalSchedulerContext;
class KAutoObjectWithListContainer;
class KClientSession;
class KDebug;
+class KDeviceAddressSpace;
class KDynamicPageManager;
class KEvent;
class KEventInfo;
@@ -359,6 +360,8 @@ public:
return slab_heap_container->transfer_memory;
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
return slab_heap_container->code_memory;
+ } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) {
+ return slab_heap_container->device_address_space;
} else if constexpr (std::is_same_v<T, KPageBuffer>) {
return slab_heap_container->page_buffer;
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
@@ -431,6 +434,7 @@ private:
KSlabHeap<KThread> thread;
KSlabHeap<KTransferMemory> transfer_memory;
KSlabHeap<KCodeMemory> code_memory;
+ KSlabHeap<KDeviceAddressSpace> device_address_space;
KSlabHeap<KPageBuffer> page_buffer;
KSlabHeap<KThreadLocalPage> thread_local_page;
KSlabHeap<KSessionRequest> session_request;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index aca442196..4cb6f40a0 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1,2697 +1,16 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <algorithm>
-#include <cinttypes>
-#include <iterator>
-#include <mutex>
-#include <vector>
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/common_funcs.h"
-#include "common/fiber.h"
-#include "common/logging/log.h"
-#include "common/scope_exit.h"
-#include "core/core.h"
-#include "core/core_timing.h"
-#include "core/debugger/debugger.h"
-#include "core/hle/kernel/k_client_port.h"
-#include "core/hle/kernel/k_client_session.h"
-#include "core/hle/kernel/k_code_memory.h"
-#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_port.h"
+#include "common/common_types.h"
#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scheduler.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_session.h"
-#include "core/hle/kernel/k_shared_memory.h"
-#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/k_thread_queue.h"
-#include "core/hle/kernel/k_transfer_memory.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/svc.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/hle/kernel/svc_types.h"
#include "core/hle/kernel/svc_wrap.h"
#include "core/hle/result.h"
-#include "core/memory.h"
-#include "core/reporter.h"
namespace Kernel::Svc {
namespace {
-// Checks if address + size is greater than the given address
-// This can return false if the size causes an overflow of a 64-bit type
-// or if the given size is zero.
-constexpr bool IsValidAddressRange(VAddr address, u64 size) {
- return address + size > address;
-}
-
-// Helper function that performs the common sanity checks for svcMapMemory
-// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
-// in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
- u64 size) {
- if (!Common::Is4KBAligned(dst_addr)) {
- LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
- return ResultInvalidAddress;
- }
-
- if (!Common::Is4KBAligned(src_addr)) {
- LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
- return ResultInvalidSize;
- }
-
- if (size == 0) {
- LOG_ERROR(Kernel_SVC, "Size is 0");
- return ResultInvalidSize;
- }
-
- if (!Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
- return ResultInvalidSize;
- }
-
- if (!IsValidAddressRange(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (!IsValidAddressRange(src_addr, size)) {
- LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
- src_addr, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (!manager.IsInsideAddressSpace(src_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
- src_addr, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (manager.IsOutsideStackRegion(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidMemoryRegion;
- }
-
- if (manager.IsInsideHeapRegion(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination does not fit within the heap region, addr=0x{:016X}, "
- "size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidMemoryRegion;
- }
-
- if (manager.IsInsideAliasRegion(dst_addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination does not fit within the map region, addr=0x{:016X}, "
- "size=0x{:016X}",
- dst_addr, size);
- return ResultInvalidMemoryRegion;
- }
-
- return ResultSuccess;
-}
-
-enum class ResourceLimitValueType {
- CurrentValue,
- LimitValue,
- PeakValue,
-};
-
-} // Anonymous namespace
-
-/// Set the process heap to a given Size. It can both extend and shrink the heap.
-static Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
- LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
-
- // Validate size.
- R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize);
- R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
-
- // Set the heap size.
- R_TRY(system.Kernel().CurrentProcess()->PageTable().SetHeapSize(out_address, size));
-
- return ResultSuccess;
-}
-
-static Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
- VAddr temp_heap_addr{};
- const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
- *heap_addr = static_cast<u32>(temp_heap_addr);
- return result;
-}
-
-constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
- switch (perm) {
- case MemoryPermission::None:
- case MemoryPermission::Read:
- case MemoryPermission::ReadWrite:
- return true;
- default:
- return false;
- }
-}
-
-static Result SetMemoryPermission(Core::System& system, VAddr address, u64 size,
- MemoryPermission perm) {
- LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
- perm);
-
- // Validate address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Validate the permission.
- R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Validate that the region is in range for the current process.
- auto& page_table = system.Kernel().CurrentProcess()->PageTable();
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
-
- // Set the memory attribute.
- return page_table.SetMemoryPermission(address, size, perm);
-}
-
-static Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
- u32 attr) {
- LOG_DEBUG(Kernel_SVC,
- "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
- size, mask, attr);
-
- // Validate address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Validate the attribute and mask.
- constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
- R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
- R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
-
- // Validate that the region is in range for the current process.
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
-
- // Set the memory attribute.
- return page_table.SetMemoryAttribute(address, size, mask, attr);
-}
-
-static Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
- u32 attr) {
- return SetMemoryAttribute(system, address, size, mask, attr);
-}
-
-/// Maps a memory range into a different range.
-static Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
- LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
- src_addr, size);
-
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
-
- if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
- result.IsError()) {
- return result;
- }
-
- return page_table.MapMemory(dst_addr, src_addr, size);
-}
-
-static Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return MapMemory(system, dst_addr, src_addr, size);
-}
-
-/// Unmaps a region that was previously mapped with svcMapMemory
-static Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
- LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
- src_addr, size);
-
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
-
- if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
- result.IsError()) {
- return result;
- }
-
- return page_table.UnmapMemory(dst_addr, src_addr, size);
-}
-
-static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return UnmapMemory(system, dst_addr, src_addr, size);
-}
-
-template <typename T>
-Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) {
- auto& process = *system.CurrentProcess();
- auto& handle_table = process.GetHandleTable();
-
- // Declare the session we're going to allocate.
- T* session;
-
- // Reserve a new session from the process resource limit.
- // FIXME: LimitableResource_SessionCountMax
- KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax);
- if (session_reservation.Succeeded()) {
- session = T::Create(system.Kernel());
- } else {
- return ResultLimitReached;
-
- // // We couldn't reserve a session. Check that we support dynamically expanding the
- // // resource limit.
- // R_UNLESS(process.GetResourceLimit() ==
- // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached);
- // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
-
- // // Try to allocate a session from unused slab memory.
- // session = T::CreateFromUnusedSlabMemory();
- // R_UNLESS(session != nullptr, ResultLimitReached);
- // ON_RESULT_FAILURE { session->Close(); };
-
- // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to
- // // prevent request exhaustion.
- // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's
- // // no reason to not do this statically.
- // if constexpr (std::same_as<T, KSession>) {
- // for (size_t i = 0; i < 2; i++) {
- // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory();
- // R_UNLESS(request != nullptr, ResultLimitReached);
- // request->Close();
- // }
- // }
-
- // We successfully allocated a session, so add the object we allocated to the resource
- // limit.
- // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1);
- }
-
- // Check that we successfully created a session.
- R_UNLESS(session != nullptr, ResultOutOfResource);
-
- // Initialize the session.
- session->Initialize(nullptr, fmt::format("{}", name));
-
- // Commit the session reservation.
- session_reservation.Commit();
-
- // Ensure that we clean up the session (and its only references are handle table) on function
- // end.
- SCOPE_EXIT({
- session->GetClientSession().Close();
- session->GetServerSession().Close();
- });
-
- // Register the session.
- T::Register(system.Kernel(), session);
-
- // Add the server session to the handle table.
- R_TRY(handle_table.Add(out_server, &session->GetServerSession()));
-
- // Add the client session to the handle table.
- const auto result = handle_table.Add(out_client, &session->GetClientSession());
-
- if (!R_SUCCEEDED(result)) {
- // Ensure that we maintaing a clean handle state on exit.
- handle_table.Remove(*out_server);
- }
-
- return result;
-}
-
-static Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client,
- u32 is_light, u64 name) {
- if (is_light) {
- // return CreateSession<KLightSession>(system, out_server, out_client, name);
- return ResultUnknown;
- } else {
- return CreateSession<KSession>(system, out_server, out_client, name);
- }
-}
-
-/// Connect to an OS service given the port name, returns the handle to the port to out
-static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
- auto& memory = system.Memory();
- if (!memory.IsValidVirtualAddress(port_name_address)) {
- LOG_ERROR(Kernel_SVC,
- "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
- port_name_address);
- return ResultNotFound;
- }
-
- static constexpr std::size_t PortNameMaxLength = 11;
- // Read 1 char beyond the max allowed port name to detect names that are too long.
- const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1);
- if (port_name.size() > PortNameMaxLength) {
- LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
- port_name.size());
- return ResultOutOfRange;
- }
-
- LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
-
- // Get the current handle table.
- auto& kernel = system.Kernel();
- auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
-
- // Find the client port.
- auto port = kernel.CreateNamedServicePort(port_name);
- if (!port) {
- LOG_ERROR(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
- return ResultNotFound;
- }
-
- // Reserve a handle for the port.
- // NOTE: Nintendo really does write directly to the output handle here.
- R_TRY(handle_table.Reserve(out));
- auto handle_guard = SCOPE_GUARD({ handle_table.Unreserve(*out); });
-
- // Create a session.
- KClientSession* session{};
- R_TRY(port->CreateSession(std::addressof(session)));
-
- kernel.RegisterNamedServiceHandler(port_name, &port->GetParent()->GetServerPort());
-
- // Register the session in the table, close the extra reference.
- handle_table.Register(*out, session);
- session->Close();
-
- // We succeeded.
- handle_guard.Cancel();
- return ResultSuccess;
-}
-
-static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
- u32 port_name_address) {
-
- return ConnectToNamedPort(system, out_handle, port_name_address);
-}
-
-/// Makes a blocking IPC call to a service.
-static Result SendSyncRequest(Core::System& system, Handle handle) {
- auto& kernel = system.Kernel();
-
- // Create the wait queue.
- KThreadQueue wait_queue(kernel);
-
- // Get the client session from its handle.
- KScopedAutoObject session =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
- R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
-
- LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
-
- return session->SendSyncRequest();
-}
-
-static Result SendSyncRequest32(Core::System& system, Handle handle) {
- return SendSyncRequest(system, handle);
-}
-
-static Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles,
- s32 num_handles, Handle reply_target, s64 timeout_ns) {
- auto& kernel = system.Kernel();
- auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable();
-
- // Convert handle list to object table.
- std::vector<KSynchronizationObject*> objs(num_handles);
- R_UNLESS(
- handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles),
- ResultInvalidHandle);
-
- // Ensure handles are closed when we're done.
- SCOPE_EXIT({
- for (auto i = 0; i < num_handles; ++i) {
- objs[i]->Close();
- }
- });
-
- // Reply to the target, if one is specified.
- if (reply_target != InvalidHandle) {
- KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
- R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
-
- // If we fail to reply, we want to set the output index to -1.
- // ON_RESULT_FAILURE { *out_index = -1; };
-
- // Send the reply.
- // R_TRY(session->SendReply());
-
- Result rc = session->SendReply();
- if (!R_SUCCEEDED(rc)) {
- *out_index = -1;
- return rc;
- }
- }
-
- // Wait for a message.
- while (true) {
- // Wait for an object.
- s32 index;
- Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(),
- static_cast<s32>(objs.size()), timeout_ns);
- if (result == ResultTimedOut) {
- return result;
- }
-
- // Receive the request.
- if (R_SUCCEEDED(result)) {
- KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
- if (session != nullptr) {
- result = session->ReceiveRequest();
- if (result == ResultNotFound) {
- continue;
- }
- }
- }
-
- *out_index = index;
- return result;
- }
-}
-
-/// Get the ID for the specified thread.
-static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Get the thread's id.
- *out_thread_id = thread->GetId();
- return ResultSuccess;
-}
-
-static Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
- Handle thread_handle) {
- u64 out_thread_id{};
- const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
-
- *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
- *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
-
- return result;
-}
-
-/// Gets the ID of the specified process or a specified thread's owning process.
-static Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
-
- // Get the object from the handle table.
- KScopedAutoObject obj =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KAutoObject>(
- static_cast<Handle>(handle));
- R_UNLESS(obj.IsNotNull(), ResultInvalidHandle);
-
- // Get the process from the object.
- KProcess* process = nullptr;
- if (KProcess* p = obj->DynamicCast<KProcess*>(); p != nullptr) {
- // The object is a process, so we can use it directly.
- process = p;
- } else if (KThread* t = obj->DynamicCast<KThread*>(); t != nullptr) {
- // The object is a thread, so we want to use its parent.
- process = reinterpret_cast<KThread*>(obj.GetPointerUnsafe())->GetOwnerProcess();
- } else {
- // TODO(bunnei): This should also handle debug objects before returning.
- UNIMPLEMENTED_MSG("Debug objects not implemented");
- }
-
- // Make sure the target process exists.
- R_UNLESS(process != nullptr, ResultInvalidHandle);
-
- // Get the process id.
- *out_process_id = process->GetId();
-
- return ResultSuccess;
-}
-
-static Result GetProcessId32(Core::System& system, u32* out_process_id_low,
- u32* out_process_id_high, Handle handle) {
- u64 out_process_id{};
- const auto result = GetProcessId(system, &out_process_id, handle);
- *out_process_id_low = static_cast<u32>(out_process_id);
- *out_process_id_high = static_cast<u32>(out_process_id >> 32);
- return result;
-}
-
-/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
- s32 num_handles, s64 nano_seconds) {
- LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
- handles_address, num_handles, nano_seconds);
-
- // Ensure number of handles is valid.
- R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
-
- auto& kernel = system.Kernel();
- std::vector<KSynchronizationObject*> objs(num_handles);
- const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
- Handle* handles = system.Memory().GetPointer<Handle>(handles_address);
-
- // Copy user handles.
- if (num_handles > 0) {
- // Convert the handles to objects.
- R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
- num_handles),
- ResultInvalidHandle);
- for (const auto& obj : objs) {
- kernel.RegisterInUseObject(obj);
- }
- }
-
- // Ensure handles are closed when we're done.
- SCOPE_EXIT({
- for (s32 i = 0; i < num_handles; ++i) {
- kernel.UnregisterInUseObject(objs[i]);
- objs[i]->Close();
- }
- });
-
- return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()),
- nano_seconds);
-}
-
-static Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 num_handles, u32 timeout_high, s32* index) {
- const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
- return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
-}
-
-/// Resumes a thread waiting on WaitSynchronization
-static Result CancelSynchronization(Core::System& system, Handle handle) {
- LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Cancel the thread's wait.
- thread->WaitCancel();
- return ResultSuccess;
-}
-
-static Result CancelSynchronization32(Core::System& system, Handle handle) {
- return CancelSynchronization(system, handle);
-}
-
-/// Attempts to locks a mutex
-static Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
- LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
- thread_handle, address, tag);
-
- // Validate the input address.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
- address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(u32))) {
- LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
- return ResultInvalidAddress;
- }
-
- return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
-}
-
-static Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
- return ArbitrateLock(system, thread_handle, address, tag);
-}
-
-/// Unlock a mutex
-static Result ArbitrateUnlock(Core::System& system, VAddr address) {
- LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
-
- // Validate the input address.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC,
- "Attempting to arbitrate an unlock on a kernel address (address={:08X})",
- address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(u32))) {
- LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
- return ResultInvalidAddress;
- }
-
- return system.Kernel().CurrentProcess()->SignalToAddress(address);
-}
-
-static Result ArbitrateUnlock32(Core::System& system, u32 address) {
- return ArbitrateUnlock(system, address);
-}
-
-/// Break program execution
-static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
- BreakReason break_reason =
- static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag));
- bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0;
-
- bool has_dumped_buffer{};
- std::vector<u8> debug_buffer;
-
- const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
- if (sz == 0 || addr == 0 || has_dumped_buffer) {
- return;
- }
-
- auto& memory = system.Memory();
-
- // This typically is an error code so we're going to assume this is the case
- if (sz == sizeof(u32)) {
- LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
- } else {
- // We don't know what's in here so we'll hexdump it
- debug_buffer.resize(sz);
- memory.ReadBlock(addr, debug_buffer.data(), sz);
- std::string hexdump;
- for (std::size_t i = 0; i < debug_buffer.size(); i++) {
- hexdump += fmt::format("{:02X} ", debug_buffer[i]);
- if (i != 0 && i % 16 == 0) {
- hexdump += '\n';
- }
- }
- LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
- }
- has_dumped_buffer = true;
- };
- switch (break_reason) {
- case BreakReason::Panic:
- LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1,
- info2);
- handle_debug_buffer(info1, info2);
- break;
- case BreakReason::Assert:
- LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
- info1, info2);
- handle_debug_buffer(info1, info2);
- break;
- case BreakReason::User:
- LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2);
- handle_debug_buffer(info1, info2);
- break;
- case BreakReason::PreLoadDll:
- LOG_INFO(Debug_Emulated,
- "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1,
- info2);
- break;
- case BreakReason::PostLoadDll:
- LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
- info2);
- break;
- case BreakReason::PreUnloadDll:
- LOG_INFO(Debug_Emulated,
- "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1,
- info2);
- break;
- case BreakReason::PostUnloadDll:
- LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}",
- info1, info2);
- break;
- case BreakReason::CppException:
- LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
- break;
- default:
- LOG_WARNING(
- Debug_Emulated,
- "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}",
- reason, info1, info2);
- handle_debug_buffer(info1, info2);
- break;
- }
-
- system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2,
- has_dumped_buffer ? std::make_optional(debug_buffer)
- : std::nullopt);
-
- if (!notification_only) {
- LOG_CRITICAL(
- Debug_Emulated,
- "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
- reason, info1, info2);
-
- handle_debug_buffer(info1, info2);
-
- auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
- const auto thread_processor_id = current_thread->GetActiveCore();
- system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
- }
-
- if (system.DebuggerEnabled()) {
- auto* thread = system.Kernel().GetCurrentEmuThread();
- system.GetDebugger().NotifyThreadStopped(thread);
- thread->RequestSuspend(Kernel::SuspendType::Debug);
- }
-}
-
-static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
- Break(system, reason, info1, info2);
-}
-
-/// Used to output a message on a debug hardware unit - does nothing on a retail unit
-static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
- if (len == 0) {
- return;
- }
-
- std::string str(len, '\0');
- system.Memory().ReadBlock(address, str.data(), str.size());
- LOG_DEBUG(Debug_Emulated, "{}", str);
-}
-
-static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
- OutputDebugString(system, address, len);
-}
-
-/// Gets system/memory information for the current process
-static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
- u64 info_sub_id) {
- LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
- info_sub_id, handle);
-
- const auto info_id_type = static_cast<InfoType>(info_id);
-
- switch (info_id_type) {
- case InfoType::CoreMask:
- case InfoType::PriorityMask:
- case InfoType::AliasRegionAddress:
- case InfoType::AliasRegionSize:
- case InfoType::HeapRegionAddress:
- case InfoType::HeapRegionSize:
- case InfoType::AslrRegionAddress:
- case InfoType::AslrRegionSize:
- case InfoType::StackRegionAddress:
- case InfoType::StackRegionSize:
- case InfoType::TotalMemorySize:
- case InfoType::UsedMemorySize:
- case InfoType::SystemResourceSizeTotal:
- case InfoType::SystemResourceSizeUsed:
- case InfoType::ProgramId:
- case InfoType::UserExceptionContextAddress:
- case InfoType::TotalNonSystemMemorySize:
- case InfoType::UsedNonSystemMemorySize:
- case InfoType::IsApplication:
- case InfoType::FreeThreadCount: {
- if (info_sub_id != 0) {
- LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
- info_sub_id);
- return ResultInvalidEnumValue;
- }
-
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
- info_id, info_sub_id, handle);
- return ResultInvalidHandle;
- }
-
- switch (info_id_type) {
- case InfoType::CoreMask:
- *result = process->GetCoreMask();
- return ResultSuccess;
-
- case InfoType::PriorityMask:
- *result = process->GetPriorityMask();
- return ResultSuccess;
-
- case InfoType::AliasRegionAddress:
- *result = process->PageTable().GetAliasRegionStart();
- return ResultSuccess;
-
- case InfoType::AliasRegionSize:
- *result = process->PageTable().GetAliasRegionSize();
- return ResultSuccess;
-
- case InfoType::HeapRegionAddress:
- *result = process->PageTable().GetHeapRegionStart();
- return ResultSuccess;
-
- case InfoType::HeapRegionSize:
- *result = process->PageTable().GetHeapRegionSize();
- return ResultSuccess;
-
- case InfoType::AslrRegionAddress:
- *result = process->PageTable().GetAliasCodeRegionStart();
- return ResultSuccess;
-
- case InfoType::AslrRegionSize:
- *result = process->PageTable().GetAliasCodeRegionSize();
- return ResultSuccess;
-
- case InfoType::StackRegionAddress:
- *result = process->PageTable().GetStackRegionStart();
- return ResultSuccess;
-
- case InfoType::StackRegionSize:
- *result = process->PageTable().GetStackRegionSize();
- return ResultSuccess;
-
- case InfoType::TotalMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailable();
- return ResultSuccess;
-
- case InfoType::UsedMemorySize:
- *result = process->GetTotalPhysicalMemoryUsed();
- return ResultSuccess;
-
- case InfoType::SystemResourceSizeTotal:
- *result = process->GetSystemResourceSize();
- return ResultSuccess;
-
- case InfoType::SystemResourceSizeUsed:
- LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
- *result = process->GetSystemResourceUsage();
- return ResultSuccess;
-
- case InfoType::ProgramId:
- *result = process->GetProgramID();
- return ResultSuccess;
-
- case InfoType::UserExceptionContextAddress:
- *result = process->GetProcessLocalRegionAddress();
- return ResultSuccess;
-
- case InfoType::TotalNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
- return ResultSuccess;
-
- case InfoType::UsedNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
- return ResultSuccess;
-
- case InfoType::FreeThreadCount:
- *result = process->GetFreeThreadCount();
- return ResultSuccess;
-
- default:
- break;
- }
-
- LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ResultInvalidEnumValue;
- }
-
- case InfoType::DebuggerAttached:
- *result = 0;
- return ResultSuccess;
-
- case InfoType::ResourceLimit: {
- if (handle != 0) {
- LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
- return ResultInvalidHandle;
- }
-
- if (info_sub_id != 0) {
- LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
- info_sub_id);
- return ResultInvalidCombination;
- }
-
- KProcess* const current_process = system.Kernel().CurrentProcess();
- KHandleTable& handle_table = current_process->GetHandleTable();
- const auto resource_limit = current_process->GetResourceLimit();
- if (!resource_limit) {
- *result = Svc::InvalidHandle;
- // Yes, the kernel considers this a successful operation.
- return ResultSuccess;
- }
-
- Handle resource_handle{};
- R_TRY(handle_table.Add(&resource_handle, resource_limit));
-
- *result = resource_handle;
- return ResultSuccess;
- }
-
- case InfoType::RandomEntropy:
- if (handle != 0) {
- LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
- handle);
- return ResultInvalidHandle;
- }
-
- if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) {
- LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
- KProcess::RANDOM_ENTROPY_SIZE, info_sub_id);
- return ResultInvalidCombination;
- }
-
- *result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
- return ResultSuccess;
-
- case InfoType::InitialProcessIdRange:
- LOG_WARNING(Kernel_SVC,
- "(STUBBED) Attempted to query privileged process id bounds, returned 0");
- *result = 0;
- return ResultSuccess;
-
- case InfoType::ThreadTickCount: {
- constexpr u64 num_cpus = 4;
- if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
- LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
- info_sub_id);
- return ResultInvalidCombination;
- }
-
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(
- static_cast<Handle>(handle));
- if (thread.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
- static_cast<Handle>(handle));
- return ResultInvalidHandle;
- }
-
- const auto& core_timing = system.CoreTiming();
- const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
- const bool same_thread = current_thread == thread.GetPointerUnsafe();
-
- const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
- u64 out_ticks = 0;
- if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
- const u64 thread_ticks = current_thread->GetCpuTime();
-
- out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
- } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
- out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
- }
-
- *result = out_ticks;
- return ResultSuccess;
- }
- case InfoType::IdleTickCount: {
- // Verify the input handle is invalid.
- R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
-
- // Verify the requested core is valid.
- const bool core_valid =
- (info_sub_id == 0xFFFFFFFFFFFFFFFF) ||
- (info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
- R_UNLESS(core_valid, ResultInvalidCombination);
-
- // Get the idle tick count.
- *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
- return ResultSuccess;
- }
- case InfoType::MesosphereCurrentProcess: {
- // Verify the input handle is invalid.
- R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
-
- // Verify the sub-type is valid.
- R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
-
- // Get the handle table.
- KProcess* current_process = system.Kernel().CurrentProcess();
- KHandleTable& handle_table = current_process->GetHandleTable();
-
- // Get a new handle for the current process.
- Handle tmp;
- R_TRY(handle_table.Add(&tmp, current_process));
-
- // Set the output.
- *result = tmp;
-
- // We succeeded.
- return ResultSuccess;
- }
- default:
- LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
- return ResultInvalidEnumValue;
- }
-}
-
-static Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
- u32 info_id, u32 handle, u32 sub_id_high) {
- const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
- u64 res_value{};
-
- const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
- *result_high = static_cast<u32>(res_value >> 32);
- *result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
-
- return result;
-}
-
-/// Maps memory at a desired address
-static Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
- LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
-
- if (!Common::Is4KBAligned(addr)) {
- LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ResultInvalidAddress;
- }
-
- if (!Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ResultInvalidSize;
- }
-
- if (size == 0) {
- LOG_ERROR(Kernel_SVC, "Size is zero");
- return ResultInvalidSize;
- }
-
- if (!(addr < addr + size)) {
- LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ResultInvalidMemoryRegion;
- }
-
- KProcess* const current_process{system.Kernel().CurrentProcess()};
- auto& page_table{current_process->PageTable()};
-
- if (current_process->GetSystemResourceSize() == 0) {
- LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ResultInvalidState;
- }
-
- if (!page_table.IsInsideAddressSpace(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
-
- if (page_table.IsOutsideAliasRegion(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
-
- return page_table.MapPhysicalMemory(addr, size);
-}
-
-static Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return MapPhysicalMemory(system, addr, size);
-}
-
-/// Unmaps memory previously mapped via MapPhysicalMemory
-static Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
- LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
-
- if (!Common::Is4KBAligned(addr)) {
- LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
- return ResultInvalidAddress;
- }
-
- if (!Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
- return ResultInvalidSize;
- }
-
- if (size == 0) {
- LOG_ERROR(Kernel_SVC, "Size is zero");
- return ResultInvalidSize;
- }
-
- if (!(addr < addr + size)) {
- LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
- return ResultInvalidMemoryRegion;
- }
-
- KProcess* const current_process{system.Kernel().CurrentProcess()};
- auto& page_table{current_process->PageTable()};
-
- if (current_process->GetSystemResourceSize() == 0) {
- LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
- return ResultInvalidState;
- }
-
- if (!page_table.IsInsideAddressSpace(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
-
- if (page_table.IsOutsideAliasRegion(addr, size)) {
- LOG_ERROR(Kernel_SVC,
- "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
- size);
- return ResultInvalidMemoryRegion;
- }
-
- return page_table.UnmapPhysicalMemory(addr, size);
-}
-
-static Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return UnmapPhysicalMemory(system, addr, size);
-}
-
-/// Sets the thread activity
-static Result SetThreadActivity(Core::System& system, Handle thread_handle,
- ThreadActivity thread_activity) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
- thread_activity);
-
- // Validate the activity.
- constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
- return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
- };
- R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue);
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Check that the activity is being set on a non-current thread for the current process.
- R_UNLESS(thread->GetOwnerProcess() == system.Kernel().CurrentProcess(), ResultInvalidHandle);
- R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy);
-
- // Set the activity.
- R_TRY(thread->SetActivity(thread_activity));
-
- return ResultSuccess;
-}
-
-static Result SetThreadActivity32(Core::System& system, Handle thread_handle,
- Svc::ThreadActivity thread_activity) {
- return SetThreadActivity(system, thread_handle, thread_activity);
-}
-
-/// Gets the thread context
-static Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
- LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
- thread_handle);
-
- auto& kernel = system.Kernel();
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Require the handle be to a non-current thread in the current process.
- const auto* current_process = kernel.CurrentProcess();
- R_UNLESS(current_process == thread->GetOwnerProcess(), ResultInvalidId);
-
- // Verify that the thread isn't terminated.
- R_UNLESS(thread->GetState() != ThreadState::Terminated, ResultTerminationRequested);
-
- /// Check that the thread is not the current one.
- /// NOTE: Nintendo does not check this, and thus the following loop will deadlock.
- R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(kernel), ResultInvalidId);
-
- // Try to get the thread context until the thread isn't current on any core.
- while (true) {
- KScopedSchedulerLock sl{kernel};
-
- // TODO(bunnei): Enforce that thread is suspended for debug here.
-
- // If the thread's raw state isn't runnable, check if it's current on some core.
- if (thread->GetRawState() != ThreadState::Runnable) {
- bool current = false;
- for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
- current = true;
- break;
- }
- }
-
- // If the thread is current, retry until it isn't.
- if (current) {
- continue;
- }
- }
-
- // Get the thread context.
- std::vector<u8> context;
- R_TRY(thread->GetThreadContext3(context));
-
- // Copy the thread context to user space.
- system.Memory().WriteBlock(out_context, context.data(), context.size());
-
- return ResultSuccess;
- }
-
- return ResultSuccess;
-}
-
-static Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
- return GetThreadContext(system, out_context, thread_handle);
-}
-
-/// Gets the priority for the specified thread
-static Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
- LOG_TRACE(Kernel_SVC, "called");
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Get the thread's priority.
- *out_priority = thread->GetPriority();
- return ResultSuccess;
-}
-
-static Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
- return GetThreadPriority(system, out_priority, handle);
-}
-
-/// Sets the priority for the specified thread
-static Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
- // Get the current process.
- KProcess& process = *system.Kernel().CurrentProcess();
-
- // Validate the priority.
- R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
- ResultInvalidPriority);
- R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
-
- // Get the thread from its handle.
- KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Set the thread priority.
- thread->SetBasePriority(priority);
- return ResultSuccess;
-}
-
-static Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
- return SetThreadPriority(system, thread_handle, priority);
-}
-
-/// Get which CPU core is executing the current thread
-static u32 GetCurrentProcessorNumber(Core::System& system) {
- LOG_TRACE(Kernel_SVC, "called");
- return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
-}
-
-static u32 GetCurrentProcessorNumber32(Core::System& system) {
- return GetCurrentProcessorNumber(system);
-}
-
-namespace {
-
-constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) {
- switch (perm) {
- case Svc::MemoryPermission::Read:
- case Svc::MemoryPermission::ReadWrite:
- return true;
- default:
- return false;
- }
-}
-
-[[maybe_unused]] constexpr bool IsValidRemoteSharedMemoryPermission(Svc::MemoryPermission perm) {
- return IsValidSharedMemoryPermission(perm) || perm == Svc::MemoryPermission::DontCare;
-}
-
-constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
- switch (perm) {
- case Svc::MemoryPermission::None:
- case Svc::MemoryPermission::Read:
- case Svc::MemoryPermission::ReadWrite:
- case Svc::MemoryPermission::ReadExecute:
- return true;
- default:
- return false;
- }
-}
-
-constexpr bool IsValidMapCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::ReadWrite;
-}
-
-constexpr bool IsValidMapToOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::Read || perm == Svc::MemoryPermission::ReadExecute;
-}
-
-constexpr bool IsValidUnmapCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::None;
-}
-
-constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
- return perm == Svc::MemoryPermission::None;
-}
-
-} // Anonymous namespace
-
-static Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
- Svc::MemoryPermission map_perm) {
- LOG_TRACE(Kernel_SVC,
- "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
- shmem_handle, address, size, map_perm);
-
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Validate the permission.
- R_UNLESS(IsValidSharedMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
-
- // Get the current process.
- auto& process = *system.Kernel().CurrentProcess();
- auto& page_table = process.PageTable();
-
- // Get the shared memory.
- KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
- R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
-
- // Verify that the mapping is in range.
- R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
-
- // Add the shared memory to the process.
- R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
-
- // Ensure that we clean up the shared memory if we fail to map it.
- auto guard =
- SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); });
-
- // Map the shared memory.
- R_TRY(shmem->Map(process, address, size, map_perm));
-
- // We succeeded.
- guard.Cancel();
- return ResultSuccess;
-}
-
-static Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
- Svc::MemoryPermission map_perm) {
- return MapSharedMemory(system, shmem_handle, address, size, map_perm);
-}
-
-static Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size) {
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Get the current process.
- auto& process = *system.Kernel().CurrentProcess();
- auto& page_table = process.PageTable();
-
- // Get the shared memory.
- KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
- R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
-
- // Verify that the mapping is in range.
- R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
-
- // Unmap the shared memory.
- R_TRY(shmem->Unmap(process, address, size));
-
- // Remove the shared memory from the process.
- process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
-
- return ResultSuccess;
-}
-
-static Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size) {
- return UnmapSharedMemory(system, shmem_handle, address, size);
-}
-
-static Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
- u64 size, Svc::MemoryPermission perm) {
- LOG_TRACE(Kernel_SVC,
- "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
- process_handle, address, size, perm);
-
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
- R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
- R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
-
- // Validate the memory permission.
- R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Get the process from its handle.
- KScopedAutoObject process =
- system.CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
- R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
-
- // Validate that the address is in range.
- auto& page_table = process->PageTable();
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
-
- // Set the memory permission.
- return page_table.SetProcessMemoryPermission(address, size, perm);
-}
-
-static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
- LOG_TRACE(Kernel_SVC,
- "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
- dst_address, process_handle, src_address, size);
-
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
- R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
-
- // Get the processes.
- KProcess* dst_process = system.CurrentProcess();
- KScopedAutoObject src_process =
- dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
- R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
-
- // Get the page tables.
- auto& dst_pt = dst_process->PageTable();
- auto& src_pt = src_process->PageTable();
-
- // Validate that the mapping is in range.
- R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
- R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
- ResultInvalidMemoryRegion);
-
- // Create a new page group.
- KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
- R_TRY(src_pt.MakeAndOpenPageGroup(
- std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
- KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Map the group.
- R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
- KMemoryPermission::UserReadWrite));
-
- return ResultSuccess;
-}
-
-static Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
- LOG_TRACE(Kernel_SVC,
- "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
- dst_address, process_handle, src_address, size);
-
- // Validate the address/size.
- R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
- R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
-
- // Get the processes.
- KProcess* dst_process = system.CurrentProcess();
- KScopedAutoObject src_process =
- dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
- R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
-
- // Get the page tables.
- auto& dst_pt = dst_process->PageTable();
- auto& src_pt = src_process->PageTable();
-
- // Validate that the mapping is in range.
- R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
- R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
- ResultInvalidMemoryRegion);
-
- // Unmap the memory.
- R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
-
- return ResultSuccess;
-}
-
-static Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
-
- // Get kernel instance.
- auto& kernel = system.Kernel();
-
- // Validate address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Create the code memory.
-
- KCodeMemory* code_mem = KCodeMemory::Create(kernel);
- R_UNLESS(code_mem != nullptr, ResultOutOfResource);
-
- // Verify that the region is in range.
- R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
- ResultInvalidCurrentMemory);
-
- // Initialize the code memory.
- R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
-
- // Register the code memory.
- KCodeMemory::Register(kernel, code_mem);
-
- // Add the code memory to the handle table.
- R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
-
- code_mem->Close();
-
- return ResultSuccess;
-}
-
-static Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
- return CreateCodeMemory(system, out, address, size);
-}
-
-static Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
- VAddr address, size_t size, Svc::MemoryPermission perm) {
-
- LOG_TRACE(Kernel_SVC,
- "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
- "permission=0x{:X}",
- code_memory_handle, operation, address, size, perm);
-
- // Validate the address / size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Get the code memory from its handle.
- KScopedAutoObject code_mem =
- system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
- R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
-
- // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
- // This enables homebrew usage of these SVCs for JIT.
-
- // Perform the operation.
- switch (static_cast<CodeMemoryOperation>(operation)) {
- case CodeMemoryOperation::Map: {
- // Check that the region is in range.
- R_UNLESS(
- system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
- ResultInvalidMemoryRegion);
-
- // Check the memory permission.
- R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Map the memory.
- R_TRY(code_mem->Map(address, size));
- } break;
- case CodeMemoryOperation::Unmap: {
- // Check that the region is in range.
- R_UNLESS(
- system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
- ResultInvalidMemoryRegion);
-
- // Check the memory permission.
- R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Unmap the memory.
- R_TRY(code_mem->Unmap(address, size));
- } break;
- case CodeMemoryOperation::MapToOwner: {
- // Check that the region is in range.
- R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
- KMemoryState::GeneratedCode),
- ResultInvalidMemoryRegion);
-
- // Check the memory permission.
- R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Map the memory to its owner.
- R_TRY(code_mem->MapToOwner(address, size, perm));
- } break;
- case CodeMemoryOperation::UnmapFromOwner: {
- // Check that the region is in range.
- R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
- KMemoryState::GeneratedCode),
- ResultInvalidMemoryRegion);
-
- // Check the memory permission.
- R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
-
- // Unmap the memory from its owner.
- R_TRY(code_mem->UnmapFromOwner(address, size));
- } break;
- default:
- return ResultInvalidEnumValue;
- }
-
- return ResultSuccess;
-}
-
-static Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
- u64 address, u64 size, Svc::MemoryPermission perm) {
- return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
-}
-
-static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, Handle process_handle, VAddr address) {
- LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
- process_handle);
- return ResultInvalidHandle;
- }
-
- auto& memory{system.Memory()};
- const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
-
- memory.Write64(memory_info_address + 0x00, memory_info.base_address);
- memory.Write64(memory_info_address + 0x08, memory_info.size);
- memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff);
- memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attribute));
- memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.permission));
- memory.Write32(memory_info_address + 0x1c, memory_info.ipc_count);
- memory.Write32(memory_info_address + 0x20, memory_info.device_count);
- memory.Write32(memory_info_address + 0x24, 0);
-
- // Page info appears to be currently unused by the kernel and is always set to zero.
- memory.Write32(page_info_address, 0);
-
- return ResultSuccess;
-}
-
-static Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
- VAddr query_address) {
- LOG_TRACE(Kernel_SVC,
- "called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
- "query_address=0x{:016X}",
- memory_info_address, page_info_address, query_address);
-
- return QueryProcessMemory(system, memory_info_address, page_info_address, CurrentProcess,
- query_address);
-}
-
-static Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
- u32 query_address) {
- return QueryMemory(system, memory_info_address, page_info_address, query_address);
-}
-
-static Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
- LOG_DEBUG(Kernel_SVC,
- "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
- "src_address=0x{:016X}, size=0x{:016X}",
- process_handle, dst_address, src_address, size);
-
- if (!Common::Is4KBAligned(src_address)) {
- LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
- src_address);
- return ResultInvalidAddress;
- }
-
- if (!Common::Is4KBAligned(dst_address)) {
- LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
- dst_address);
- return ResultInvalidAddress;
- }
-
- if (size == 0 || !Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
- return ResultInvalidSize;
- }
-
- if (!IsValidAddressRange(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range overflows the address space (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (!IsValidAddressRange(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range overflows the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
-
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
- process_handle);
- return ResultInvalidHandle;
- }
-
- auto& page_table = process->PageTable();
- if (!page_table.IsInsideAddressSpace(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range is not within the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (!page_table.IsInsideASLRRegion(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidMemoryRegion;
- }
-
- return page_table.MapCodeMemory(dst_address, src_address, size);
-}
-
-static Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
- LOG_DEBUG(Kernel_SVC,
- "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
- "size=0x{:016X}",
- process_handle, dst_address, src_address, size);
-
- if (!Common::Is4KBAligned(dst_address)) {
- LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
- dst_address);
- return ResultInvalidAddress;
- }
-
- if (!Common::Is4KBAligned(src_address)) {
- LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
- src_address);
- return ResultInvalidAddress;
- }
-
- if (size == 0 || !Common::Is4KBAligned(size)) {
- LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
- return ResultInvalidSize;
- }
-
- if (!IsValidAddressRange(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range overflows the address space (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (!IsValidAddressRange(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range overflows the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
-
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
- process_handle);
- return ResultInvalidHandle;
- }
-
- auto& page_table = process->PageTable();
- if (!page_table.IsInsideAddressSpace(src_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Source address range is not within the address space (src_address=0x{:016X}, "
- "size=0x{:016X}).",
- src_address, size);
- return ResultInvalidCurrentMemory;
- }
-
- if (!page_table.IsInsideASLRRegion(dst_address, size)) {
- LOG_ERROR(Kernel_SVC,
- "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
- "size=0x{:016X}).",
- dst_address, size);
- return ResultInvalidMemoryRegion;
- }
-
- return page_table.UnmapCodeMemory(dst_address, src_address, size,
- KPageTable::ICacheInvalidationStrategy::InvalidateAll);
-}
-
-/// Exits the current process
-static void ExitProcess(Core::System& system) {
- auto* current_process = system.Kernel().CurrentProcess();
-
- LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
- ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
- "Process has already exited");
-
- system.Exit();
-}
-
-static void ExitProcess32(Core::System& system) {
- ExitProcess(system);
-}
-
-namespace {
-
-constexpr bool IsValidVirtualCoreId(int32_t core_id) {
- return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
-}
-
-} // Anonymous namespace
-
-/// Creates a new thread
-static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_bottom, u32 priority, s32 core_id) {
- LOG_DEBUG(Kernel_SVC,
- "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
- "priority=0x{:08X}, core_id=0x{:08X}",
- entry_point, arg, stack_bottom, priority, core_id);
-
- // Adjust core id, if it's the default magic.
- auto& kernel = system.Kernel();
- auto& process = *kernel.CurrentProcess();
- if (core_id == IdealCoreUseProcessValue) {
- core_id = process.GetIdealCoreId();
- }
-
- // Validate arguments.
- if (!IsValidVirtualCoreId(core_id)) {
- LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
- return ResultInvalidCoreId;
- }
- if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
- LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
- return ResultInvalidCoreId;
- }
-
- if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
- LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority);
- return ResultInvalidPriority;
- }
- if (!process.CheckThreadPriority(priority)) {
- LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
- return ResultInvalidPriority;
- }
-
- // Reserve a new thread from the process resource limit (waiting up to 100ms).
- KScopedResourceReservation thread_reservation(
- kernel.CurrentProcess(), LimitableResource::ThreadCountMax, 1,
- system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
- if (!thread_reservation.Succeeded()) {
- LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
- return ResultLimitReached;
- }
-
- // Create the thread.
- KThread* thread = KThread::Create(kernel);
- if (!thread) {
- LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
- return ResultOutOfResource;
- }
- SCOPE_EXIT({ thread->Close(); });
-
- // Initialize the thread.
- {
- KScopedLightLock lk{process.GetStateLock()};
- R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom,
- priority, core_id, &process));
- }
-
- // Set the thread name for debugging purposes.
- thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle));
-
- // Commit the thread reservation.
- thread_reservation.Commit();
-
- // Register the new thread.
- KThread::Register(kernel, thread);
-
- // Add the thread to the handle table.
- R_TRY(process.GetHandleTable().Add(out_handle, thread));
-
- return ResultSuccess;
-}
-
-static Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
- u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
- return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
-}
-
-/// Starts the thread for the provided handle
-static Result StartThread(Core::System& system, Handle thread_handle) {
- LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Try to start the thread.
- R_TRY(thread->Run());
-
- // If we succeeded, persist a reference to the thread.
- thread->Open();
- system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
-
- return ResultSuccess;
-}
-
-static Result StartThread32(Core::System& system, Handle thread_handle) {
- return StartThread(system, thread_handle);
-}
-
-/// Called when a thread exits
-static void ExitThread(Core::System& system) {
- LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
-
- auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
- system.GlobalSchedulerContext().RemoveThread(current_thread);
- current_thread->Exit();
- system.Kernel().UnregisterInUseObject(current_thread);
-}
-
-static void ExitThread32(Core::System& system) {
- ExitThread(system);
-}
-
-/// Sleep the current thread
-static void SleepThread(Core::System& system, s64 nanoseconds) {
- auto& kernel = system.Kernel();
- const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
-
- LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
-
- // When the input tick is positive, sleep.
- if (nanoseconds > 0) {
- // Convert the timeout from nanoseconds to ticks.
- // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
-
- // Sleep.
- // NOTE: Nintendo does not check the result of this sleep.
- static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
- } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
- KScheduler::YieldWithoutCoreMigration(kernel);
- } else if (yield_type == Svc::YieldType::WithCoreMigration) {
- KScheduler::YieldWithCoreMigration(kernel);
- } else if (yield_type == Svc::YieldType::ToAnyThread) {
- KScheduler::YieldToAnyThread(kernel);
- } else {
- // Nintendo does nothing at all if an otherwise invalid value is passed.
- ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
- }
-}
-
-static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
- const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
- SleepThread(system, nanoseconds);
-}
-
-/// Wait process wide key atomic
-static Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
- s64 timeout_ns) {
- LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
- cv_key, tag, timeout_ns);
-
- // Validate input.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(s32))) {
- LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
- return ResultInvalidAddress;
- }
-
- // Convert timeout from nanoseconds to ticks.
- s64 timeout{};
- if (timeout_ns > 0) {
- const s64 offset_tick(timeout_ns);
- if (offset_tick > 0) {
- timeout = offset_tick + 2;
- if (timeout <= 0) {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = timeout_ns;
- }
-
- // Wait on the condition variable.
- return system.Kernel().CurrentProcess()->WaitConditionVariable(
- address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
-}
-
-static Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
- u32 timeout_ns_low, u32 timeout_ns_high) {
- const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
- return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
-}
-
-/// Signal process wide key
-static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
- LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
-
- // Signal the condition variable.
- return system.Kernel().CurrentProcess()->SignalConditionVariable(
- Common::AlignDown(cv_key, sizeof(u32)), count);
-}
-
-static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
- SignalProcessWideKey(system, cv_key, count);
-}
-
-namespace {
-
-constexpr bool IsValidSignalType(Svc::SignalType type) {
- switch (type) {
- case Svc::SignalType::Signal:
- case Svc::SignalType::SignalAndIncrementIfEqual:
- case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
- return true;
- default:
- return false;
- }
-}
-
-constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
- switch (type) {
- case Svc::ArbitrationType::WaitIfLessThan:
- case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
- case Svc::ArbitrationType::WaitIfEqual:
- return true;
- default:
- return false;
- }
-}
-
-} // namespace
-
-// Wait for an address (via Address Arbiter)
-static Result WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
- s32 value, s64 timeout_ns) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
- address, arb_type, value, timeout_ns);
-
- // Validate input.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(s32))) {
- LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
- return ResultInvalidAddress;
- }
- if (!IsValidArbitrationType(arb_type)) {
- LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
- return ResultInvalidEnumValue;
- }
-
- // Convert timeout from nanoseconds to ticks.
- s64 timeout{};
- if (timeout_ns > 0) {
- const s64 offset_tick(timeout_ns);
- if (offset_tick > 0) {
- timeout = offset_tick + 2;
- if (timeout <= 0) {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = std::numeric_limits<s64>::max();
- }
- } else {
- timeout = timeout_ns;
- }
-
- return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
-}
-
-static Result WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
- s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
- const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
- return WaitForAddress(system, address, arb_type, value, timeout);
-}
-
-// Signals to an address (via Address Arbiter)
-static Result SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
- s32 value, s32 count) {
- LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
- address, signal_type, value, count);
-
- // Validate input.
- if (IsKernelAddress(address)) {
- LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
- return ResultInvalidCurrentMemory;
- }
- if (!Common::IsAligned(address, sizeof(s32))) {
- LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
- return ResultInvalidAddress;
- }
- if (!IsValidSignalType(signal_type)) {
- LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
- return ResultInvalidEnumValue;
- }
-
- return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
- count);
-}
-
-static void SynchronizePreemptionState(Core::System& system) {
- auto& kernel = system.Kernel();
-
- // Lock the scheduler.
- KScopedSchedulerLock sl{kernel};
-
- // If the current thread is pinned, unpin it.
- KProcess* cur_process = system.Kernel().CurrentProcess();
- const auto core_id = GetCurrentCoreId(kernel);
-
- if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) {
- // Clear the current thread's interrupt flag.
- GetCurrentThread(kernel).ClearInterruptFlag();
-
- // Unpin the current thread.
- cur_process->UnpinCurrentThread(core_id);
- }
-}
-
-static Result SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
- s32 value, s32 count) {
- return SignalToAddress(system, address, signal_type, value, count);
-}
-
-static void KernelDebug([[maybe_unused]] Core::System& system,
- [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
- [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
- // Intentionally do nothing, as this does nothing in released kernel binaries.
-}
-
-static void ChangeKernelTraceState([[maybe_unused]] Core::System& system,
- [[maybe_unused]] u32 trace_state) {
- // Intentionally do nothing, as this does nothing in released kernel binaries.
-}
-
-/// This returns the total CPU ticks elapsed since the CPU was powered-on
-static u64 GetSystemTick(Core::System& system) {
- LOG_TRACE(Kernel_SVC, "called");
-
- auto& core_timing = system.CoreTiming();
-
- // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
- const u64 result{core_timing.GetClockTicks()};
-
- if (!system.Kernel().IsMulticore()) {
- core_timing.AddTicks(400U);
- }
-
- return result;
-}
-
-static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
- const auto time = GetSystemTick(system);
- *time_low = static_cast<u32>(time);
- *time_high = static_cast<u32>(time >> 32);
-}
-
-/// Close a handle
-static Result CloseHandle(Core::System& system, Handle handle) {
- LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
-
- // Remove the handle.
- R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle),
- ResultInvalidHandle);
-
- return ResultSuccess;
-}
-
-static Result CloseHandle32(Core::System& system, Handle handle) {
- return CloseHandle(system, handle);
-}
-
-/// Clears the signaled state of an event or process.
-static Result ResetSignal(Core::System& system, Handle handle) {
- LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
-
- // Get the current handle table.
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
-
- // Try to reset as readable event.
- {
- KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
- if (readable_event.IsNotNull()) {
- return readable_event->Reset();
- }
- }
-
- // Try to reset as process.
- {
- KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
- if (process.IsNotNull()) {
- return process->Reset();
- }
- }
-
- LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle);
-
- return ResultInvalidHandle;
-}
-
-static Result ResetSignal32(Core::System& system, Handle handle) {
- return ResetSignal(system, handle);
-}
-
-namespace {
-
-constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
- switch (perm) {
- case MemoryPermission::None:
- case MemoryPermission::Read:
- case MemoryPermission::ReadWrite:
- return true;
- default:
- return false;
- }
-}
-
-} // Anonymous namespace
-
-/// Creates a TransferMemory object
-static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
- MemoryPermission map_perm) {
- auto& kernel = system.Kernel();
-
- // Validate the size.
- R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
- R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
-
- // Validate the permissions.
- R_UNLESS(IsValidTransferMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
-
- // Get the current process and handle table.
- auto& process = *kernel.CurrentProcess();
- auto& handle_table = process.GetHandleTable();
-
- // Reserve a new transfer memory from the process resource limit.
- KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
- LimitableResource::TransferMemoryCountMax);
- R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);
-
- // Create the transfer memory.
- KTransferMemory* trmem = KTransferMemory::Create(kernel);
- R_UNLESS(trmem != nullptr, ResultOutOfResource);
-
- // Ensure the only reference is in the handle table when we're done.
- SCOPE_EXIT({ trmem->Close(); });
-
- // Ensure that the region is in range.
- R_UNLESS(process.PageTable().Contains(address, size), ResultInvalidCurrentMemory);
-
- // Initialize the transfer memory.
- R_TRY(trmem->Initialize(address, size, map_perm));
-
- // Commit the reservation.
- trmem_reservation.Commit();
-
- // Register the transfer memory.
- KTransferMemory::Register(kernel, trmem);
-
- // Add the transfer memory to the handle table.
- R_TRY(handle_table.Add(out, trmem));
-
- return ResultSuccess;
-}
-
-static Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
- MemoryPermission map_perm) {
- return CreateTransferMemory(system, out, address, size, map_perm);
-}
-
-static Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
- u64* out_affinity_mask) {
- LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Get the core mask.
- R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
-
- return ResultSuccess;
-}
-
-static Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
- u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
- u64 out_affinity_mask{};
- const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
- *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
- *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
- return result;
-}
-
-static Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
- u64 affinity_mask) {
- // Determine the core id/affinity mask.
- if (core_id == IdealCoreUseProcessValue) {
- core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
- affinity_mask = (1ULL << core_id);
- } else {
- // Validate the affinity mask.
- const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask();
- R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId);
- R_UNLESS(affinity_mask != 0, ResultInvalidCombination);
-
- // Validate the core id.
- if (IsValidVirtualCoreId(core_id)) {
- R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination);
- } else {
- R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare,
- ResultInvalidCoreId);
- }
- }
-
- // Get the thread from its handle.
- KScopedAutoObject thread =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
- R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
-
- // Set the core mask.
- R_TRY(thread->SetCoreMask(core_id, affinity_mask));
-
- return ResultSuccess;
-}
-
-static Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
- u32 affinity_mask_low, u32 affinity_mask_high) {
- const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
- return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
-}
-
-static Result SignalEvent(Core::System& system, Handle event_handle) {
- LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
-
- // Get the current handle table.
- const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
-
- // Get the event.
- KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
- R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
-
- return event->Signal();
-}
-
-static Result SignalEvent32(Core::System& system, Handle event_handle) {
- return SignalEvent(system, event_handle);
-}
-
-static Result ClearEvent(Core::System& system, Handle event_handle) {
- LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
-
- // Get the current handle table.
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
-
- // Try to clear the writable event.
- {
- KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
- if (event.IsNotNull()) {
- return event->Clear();
- }
- }
-
- // Try to clear the readable event.
- {
- KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
- if (readable_event.IsNotNull()) {
- return readable_event->Clear();
- }
- }
-
- LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
-
- return ResultInvalidHandle;
-}
-
-static Result ClearEvent32(Core::System& system, Handle event_handle) {
- return ClearEvent(system, event_handle);
-}
-
-static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
- LOG_DEBUG(Kernel_SVC, "called");
-
- // Get the kernel reference and handle table.
- auto& kernel = system.Kernel();
- auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
-
- // Reserve a new event from the process resource limit
- KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
- LimitableResource::EventCountMax);
- R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);
-
- // Create a new event.
- KEvent* event = KEvent::Create(kernel);
- R_UNLESS(event != nullptr, ResultOutOfResource);
-
- // Initialize the event.
- event->Initialize(kernel.CurrentProcess());
-
- // Commit the thread reservation.
- event_reservation.Commit();
-
- // Ensure that we clean up the event (and its only references are handle table) on function end.
- SCOPE_EXIT({
- event->GetReadableEvent().Close();
- event->Close();
- });
-
- // Register the event.
- KEvent::Register(kernel, event);
-
- // Add the event to the handle table.
- R_TRY(handle_table.Add(out_write, event));
-
- // Ensure that we maintaing a clean handle state on exit.
- auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
-
- // Add the readable event to the handle table.
- R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
-
- // We succeeded.
- handle_guard.Cancel();
- return ResultSuccess;
-}
-
-static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
- return CreateEvent(system, out_write, out_read);
-}
-
-static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
- LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
-
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
- KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
- if (process.IsNull()) {
- LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
- process_handle);
- return ResultInvalidHandle;
- }
-
- const auto info_type = static_cast<ProcessInfoType>(type);
- if (info_type != ProcessInfoType::ProcessState) {
- LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", type);
- return ResultInvalidEnumValue;
- }
-
- *out = static_cast<u64>(process->GetState());
- return ResultSuccess;
-}
-
-static Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
- LOG_DEBUG(Kernel_SVC, "called");
-
- // Create a new resource limit.
- auto& kernel = system.Kernel();
- KResourceLimit* resource_limit = KResourceLimit::Create(kernel);
- R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
-
- // Ensure we don't leak a reference to the limit.
- SCOPE_EXIT({ resource_limit->Close(); });
-
- // Initialize the resource limit.
- resource_limit->Initialize(&system.CoreTiming());
-
- // Register the limit.
- KResourceLimit::Register(kernel, resource_limit);
-
- // Add the limit to the handle table.
- R_TRY(kernel.CurrentProcess()->GetHandleTable().Add(out_handle, resource_limit));
-
- return ResultSuccess;
-}
-
-static Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
- Handle resource_limit_handle, LimitableResource which) {
- LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
- which);
-
- // Validate the resource.
- R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
-
- // Get the resource limit.
- auto& kernel = system.Kernel();
- KScopedAutoObject resource_limit =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
- R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
-
- // Get the limit value.
- *out_limit_value = resource_limit->GetLimitValue(which);
-
- return ResultSuccess;
-}
-
-static Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
- Handle resource_limit_handle, LimitableResource which) {
- LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
- which);
-
- // Validate the resource.
- R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
-
- // Get the resource limit.
- auto& kernel = system.Kernel();
- KScopedAutoObject resource_limit =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
- R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
-
- // Get the current value.
- *out_current_value = resource_limit->GetCurrentValue(which);
-
- return ResultSuccess;
-}
-
-static Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
- LimitableResource which, u64 limit_value) {
- LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
- resource_limit_handle, which, limit_value);
-
- // Validate the resource.
- R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
-
- // Get the resource limit.
- auto& kernel = system.Kernel();
- KScopedAutoObject resource_limit =
- kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
- R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
-
- // Set the limit value.
- R_TRY(resource_limit->SetLimitValue(which, limit_value));
-
- return ResultSuccess;
-}
-
-static Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
- u32 out_process_ids_size) {
- LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
- out_process_ids, out_process_ids_size);
-
- // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
- if ((out_process_ids_size & 0xF0000000) != 0) {
- LOG_ERROR(Kernel_SVC,
- "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
- out_process_ids_size);
- return ResultOutOfRange;
- }
-
- const auto& kernel = system.Kernel();
- const auto total_copy_size = out_process_ids_size * sizeof(u64);
-
- if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace(
- out_process_ids, total_copy_size)) {
- LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
- out_process_ids, out_process_ids + total_copy_size);
- return ResultInvalidCurrentMemory;
- }
-
- auto& memory = system.Memory();
- const auto& process_list = kernel.GetProcessList();
- const auto num_processes = process_list.size();
- const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
-
- for (std::size_t i = 0; i < copy_amount; ++i) {
- memory.Write64(out_process_ids, process_list[i]->GetProcessID());
- out_process_ids += sizeof(u64);
- }
-
- *out_num_processes = static_cast<u32>(num_processes);
- return ResultSuccess;
-}
-
-static Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
- u32 out_thread_ids_size, Handle debug_handle) {
- // TODO: Handle this case when debug events are supported.
- UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
-
- LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
- out_thread_ids, out_thread_ids_size);
-
- // If the size is negative or larger than INT32_MAX / sizeof(u64)
- if ((out_thread_ids_size & 0xF0000000) != 0) {
- LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
- out_thread_ids_size);
- return ResultOutOfRange;
- }
-
- auto* const current_process = system.Kernel().CurrentProcess();
- const auto total_copy_size = out_thread_ids_size * sizeof(u64);
-
- if (out_thread_ids_size > 0 &&
- !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
- LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
- out_thread_ids, out_thread_ids + total_copy_size);
- return ResultInvalidCurrentMemory;
- }
-
- auto& memory = system.Memory();
- const auto& thread_list = current_process->GetThreadList();
- const auto num_threads = thread_list.size();
- const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
-
- auto list_iter = thread_list.cbegin();
- for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
- memory.Write64(out_thread_ids, (*list_iter)->GetThreadID());
- out_thread_ids += sizeof(u64);
- }
-
- *out_num_threads = static_cast<u32>(num_threads);
- return ResultSuccess;
-}
-
-static Result FlushProcessDataCache32(Core::System& system, Handle process_handle, u64 address,
- u64 size) {
- // Validate address/size.
- R_UNLESS(size > 0, ResultInvalidSize);
- R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
- R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
-
- // Get the process from its handle.
- KScopedAutoObject process =
- system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
- R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
-
- // Verify the region is within range.
- auto& page_table = process->PageTable();
- R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
-
- // Perform the operation.
- R_RETURN(system.Memory().FlushDataCache(*process, address, size));
-}
-
-namespace {
struct FunctionDef {
using Func = void(Core::System&);
@@ -2699,6 +18,7 @@ struct FunctionDef {
Func* func;
const char* name;
};
+
} // namespace
static const FunctionDef SVC_Table_32[] = {
diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h
index 13f061b83..b599f9a3d 100644
--- a/src/core/hle/kernel/svc.h
+++ b/src/core/hle/kernel/svc.h
@@ -4,6 +4,8 @@
#pragma once
#include "common/common_types.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/hle/result.h"
namespace Core {
class System;
@@ -13,4 +15,158 @@ namespace Kernel::Svc {
void Call(Core::System& system, u32 immediate);
+Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size);
+Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm);
+Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr);
+Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size);
+Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size);
+Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ VAddr query_address);
+void ExitProcess(Core::System& system);
+Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
+ VAddr stack_bottom, u32 priority, s32 core_id);
+Result StartThread(Core::System& system, Handle thread_handle);
+void ExitThread(Core::System& system);
+void SleepThread(Core::System& system, s64 nanoseconds);
+Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle);
+Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority);
+Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask);
+Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
+ u64 affinity_mask);
+u32 GetCurrentProcessorNumber(Core::System& system);
+Result SignalEvent(Core::System& system, Handle event_handle);
+Result ClearEvent(Core::System& system, Handle event_handle);
+Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
+ MemoryPermission map_perm);
+Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size);
+Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
+ MemoryPermission map_perm);
+Result CloseHandle(Core::System& system, Handle handle);
+Result ResetSignal(Core::System& system, Handle handle);
+Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, s32 num_handles,
+ s64 nano_seconds);
+Result CancelSynchronization(Core::System& system, Handle handle);
+Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag);
+Result ArbitrateUnlock(Core::System& system, VAddr address);
+Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
+ s64 timeout_ns);
+void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count);
+u64 GetSystemTick(Core::System& system);
+Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address);
+Result SendSyncRequest(Core::System& system, Handle handle);
+Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle);
+Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle);
+void Break(Core::System& system, u32 reason, u64 info1, u64 info2);
+void OutputDebugString(Core::System& system, VAddr address, u64 len);
+Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id);
+Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size);
+Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size);
+Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which);
+Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
+ Handle resource_limit_handle, LimitableResource which);
+Result SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity);
+Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle);
+Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
+ s64 timeout_ns);
+Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value,
+ s32 count);
+void SynchronizePreemptionState(Core::System& system);
+void KernelDebug(Core::System& system, u32 kernel_debug_type, u64 param1, u64 param2, u64 param3);
+void ChangeKernelTraceState(Core::System& system, u32 trace_state);
+Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u32 is_light,
+ u64 name);
+Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles, s32 num_handles,
+ Handle reply_target, s64 timeout_ns);
+Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read);
+Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size);
+Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
+ VAddr address, size_t size, MemoryPermission perm);
+Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size);
+Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
+ u32 out_thread_ids_size, Handle debug_handle);
+Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
+ u64 size, MemoryPermission perm);
+Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size);
+Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size);
+Result QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ Handle process_handle, VAddr address);
+Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size);
+Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size);
+Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type);
+Result CreateResourceLimit(Core::System& system, Handle* out_handle);
+Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, u64 limit_value);
+
+//
+
+Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size);
+Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, u32 attr);
+Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size);
+Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size);
+Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
+ u32 query_address);
+void ExitProcess32(Core::System& system);
+Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority, u32 entry_point,
+ u32 arg, u32 stack_top, s32 processor_id);
+Result StartThread32(Core::System& system, Handle thread_handle);
+void ExitThread32(Core::System& system);
+void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high);
+Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle);
+Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority);
+Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high);
+Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
+ u32 affinity_mask_low, u32 affinity_mask_high);
+u32 GetCurrentProcessorNumber32(Core::System& system);
+Result SignalEvent32(Core::System& system, Handle event_handle);
+Result ClearEvent32(Core::System& system, Handle event_handle);
+Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
+ MemoryPermission map_perm);
+Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size);
+Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
+ MemoryPermission map_perm);
+Result CloseHandle32(Core::System& system, Handle handle);
+Result ResetSignal32(Core::System& system, Handle handle);
+Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
+ s32 num_handles, u32 timeout_high, s32* index);
+Result CancelSynchronization32(Core::System& system, Handle handle);
+Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag);
+Result ArbitrateUnlock32(Core::System& system, u32 address);
+Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high);
+void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count);
+void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high);
+Result ConnectToNamedPort32(Core::System& system, Handle* out_handle, u32 port_name_address);
+Result SendSyncRequest32(Core::System& system, Handle handle);
+Result GetProcessId32(Core::System& system, u32* out_process_id_low, u32* out_process_id_high,
+ Handle handle);
+Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
+ Handle thread_handle);
+void Break32(Core::System& system, u32 reason, u32 info1, u32 info2);
+void OutputDebugString32(Core::System& system, u32 address, u32 len);
+Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
+ u32 info_id, u32 handle, u32 sub_id_high);
+Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size);
+Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size);
+Result SetThreadActivity32(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity);
+Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle);
+Result WaitForAddress32(Core::System& system, u32 address, ArbitrationType arb_type, s32 value,
+ u32 timeout_ns_low, u32 timeout_ns_high);
+Result SignalToAddress32(Core::System& system, u32 address, SignalType signal_type, s32 value,
+ s32 count);
+Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read);
+Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size);
+Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
+ u64 address, u64 size, MemoryPermission perm);
+Result FlushProcessDataCache32(Core::System& system, Handle process_handle, u64 address, u64 size);
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_activity.cpp b/src/core/hle/kernel/svc/svc_activity.cpp
new file mode 100644
index 000000000..8774a5c98
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_activity.cpp
@@ -0,0 +1,44 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+/// Sets the thread activity
+Result SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
+ thread_activity);
+
+ // Validate the activity.
+ constexpr auto IsValidThreadActivity = [](ThreadActivity activity) {
+ return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused;
+ };
+ R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Check that the activity is being set on a non-current thread for the current process.
+ R_UNLESS(thread->GetOwnerProcess() == system.Kernel().CurrentProcess(), ResultInvalidHandle);
+ R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy);
+
+ // Set the activity.
+ R_TRY(thread->SetActivity(thread_activity));
+
+ return ResultSuccess;
+}
+
+Result SetThreadActivity32(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
+ return SetThreadActivity(system, thread_handle, thread_activity);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
new file mode 100644
index 000000000..842107726
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
@@ -0,0 +1,113 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidSignalType(Svc::SignalType type) {
+ switch (type) {
+ case Svc::SignalType::Signal:
+ case Svc::SignalType::SignalAndIncrementIfEqual:
+ case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
+ return true;
+ default:
+ return false;
+ }
+}
+
+constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
+ switch (type) {
+ case Svc::ArbitrationType::WaitIfLessThan:
+ case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
+ case Svc::ArbitrationType::WaitIfEqual:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+// Wait for an address (via Address Arbiter)
+Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
+ s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
+ address, arb_type, value, timeout_ns);
+
+ // Validate input.
+ if (IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Wait address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+ if (!IsValidArbitrationType(arb_type)) {
+ LOG_ERROR(Kernel_SVC, "Invalid arbitration type specified (type={})", arb_type);
+ return ResultInvalidEnumValue;
+ }
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
+ }
+
+ return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
+}
+
+Result WaitForAddress32(Core::System& system, u32 address, ArbitrationType arb_type, s32 value,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
+ const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
+ return WaitForAddress(system, address, arb_type, value, timeout);
+}
+
+// Signals to an address (via Address Arbiter)
+Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value,
+ s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
+ address, signal_type, value, count);
+
+ // Validate input.
+ if (IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Signaled address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+ if (!IsValidSignalType(signal_type)) {
+ LOG_ERROR(Kernel_SVC, "Invalid signal type specified (type={})", signal_type);
+ return ResultInvalidEnumValue;
+ }
+
+ return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
+ count);
+}
+
+Result SignalToAddress32(Core::System& system, u32 address, SignalType signal_type, s32 value,
+ s32 count) {
+ return SignalToAddress(system, address, signal_type, value, count);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_address_translation.cpp b/src/core/hle/kernel/svc/svc_address_translation.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_address_translation.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_cache.cpp b/src/core/hle/kernel/svc/svc_cache.cpp
new file mode 100644
index 000000000..42167d35b
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_cache.cpp
@@ -0,0 +1,31 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Kernel::Svc {
+
+Result FlushProcessDataCache32(Core::System& system, Handle process_handle, u64 address, u64 size) {
+ // Validate address/size.
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
+ R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
+
+ // Get the process from its handle.
+ KScopedAutoObject process =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Verify the region is within range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Perform the operation.
+ R_RETURN(system.Memory().FlushDataCache(*process, address, size));
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
new file mode 100644
index 000000000..4cb21e101
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -0,0 +1,154 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_code_memory.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidMapCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::ReadWrite;
+}
+
+constexpr bool IsValidMapToOwnerCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::Read || perm == MemoryPermission::ReadExecute;
+}
+
+constexpr bool IsValidUnmapCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::None;
+}
+
+constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(MemoryPermission perm) {
+ return perm == MemoryPermission::None;
+}
+
+} // namespace
+
+Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
+
+ // Get kernel instance.
+ auto& kernel = system.Kernel();
+
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Create the code memory.
+
+ KCodeMemory* code_mem = KCodeMemory::Create(kernel);
+ R_UNLESS(code_mem != nullptr, ResultOutOfResource);
+
+ // Verify that the region is in range.
+ R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
+ ResultInvalidCurrentMemory);
+
+ // Initialize the code memory.
+ R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
+
+ // Register the code memory.
+ KCodeMemory::Register(kernel, code_mem);
+
+ // Add the code memory to the handle table.
+ R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
+
+ code_mem->Close();
+
+ return ResultSuccess;
+}
+
+Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
+ return CreateCodeMemory(system, out, address, size);
+}
+
+Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
+ VAddr address, size_t size, MemoryPermission perm) {
+
+ LOG_TRACE(Kernel_SVC,
+ "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
+ "permission=0x{:X}",
+ code_memory_handle, operation, address, size, perm);
+
+ // Validate the address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Get the code memory from its handle.
+ KScopedAutoObject code_mem =
+ system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
+ R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
+
+ // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
+ // This enables homebrew usage of these SVCs for JIT.
+
+ // Perform the operation.
+ switch (static_cast<CodeMemoryOperation>(operation)) {
+ case CodeMemoryOperation::Map: {
+ // Check that the region is in range.
+ R_UNLESS(
+ system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Map the memory.
+ R_TRY(code_mem->Map(address, size));
+ } break;
+ case CodeMemoryOperation::Unmap: {
+ // Check that the region is in range.
+ R_UNLESS(
+ system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Unmap the memory.
+ R_TRY(code_mem->Unmap(address, size));
+ } break;
+ case CodeMemoryOperation::MapToOwner: {
+ // Check that the region is in range.
+ R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
+ KMemoryState::GeneratedCode),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Map the memory to its owner.
+ R_TRY(code_mem->MapToOwner(address, size, perm));
+ } break;
+ case CodeMemoryOperation::UnmapFromOwner: {
+ // Check that the region is in range.
+ R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
+ KMemoryState::GeneratedCode),
+ ResultInvalidMemoryRegion);
+
+ // Check the memory permission.
+ R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Unmap the memory from its owner.
+ R_TRY(code_mem->UnmapFromOwner(address, size));
+ } break;
+ default:
+ return ResultInvalidEnumValue;
+ }
+
+ return ResultSuccess;
+}
+
+Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
+ u64 address, u64 size, MemoryPermission perm) {
+ return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp
new file mode 100644
index 000000000..d6cfc87c5
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp
@@ -0,0 +1,69 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_results.h"
+
+namespace Kernel::Svc {
+
+/// Wait process wide key atomic
+Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
+ s64 timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
+ cv_key, tag, timeout_ns);
+
+ // Validate input.
+ if (IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(s32))) {
+ LOG_ERROR(Kernel_SVC, "Address must be 4 byte aligned (address={:08X})", address);
+ return ResultInvalidAddress;
+ }
+
+ // Convert timeout from nanoseconds to ticks.
+ s64 timeout{};
+ if (timeout_ns > 0) {
+ const s64 offset_tick(timeout_ns);
+ if (offset_tick > 0) {
+ timeout = offset_tick + 2;
+ if (timeout <= 0) {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = std::numeric_limits<s64>::max();
+ }
+ } else {
+ timeout = timeout_ns;
+ }
+
+ // Wait on the condition variable.
+ return system.Kernel().CurrentProcess()->WaitConditionVariable(
+ address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
+}
+
+Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
+ const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
+ return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
+}
+
+/// Signal process wide key
+void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
+ LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
+
+ // Signal the condition variable.
+ return system.Kernel().CurrentProcess()->SignalConditionVariable(
+ Common::AlignDown(cv_key, sizeof(u32)), count);
+}
+
+void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
+ SignalProcessWideKey(system, cv_key, count);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_debug.cpp b/src/core/hle/kernel/svc/svc_debug.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_debug.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_debug_string.cpp b/src/core/hle/kernel/svc/svc_debug_string.cpp
new file mode 100644
index 000000000..486e62cc4
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_debug_string.cpp
@@ -0,0 +1,25 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/svc.h"
+#include "core/memory.h"
+
+namespace Kernel::Svc {
+
+/// Used to output a message on a debug hardware unit - does nothing on a retail unit
+void OutputDebugString(Core::System& system, VAddr address, u64 len) {
+ if (len == 0) {
+ return;
+ }
+
+ std::string str(len, '\0');
+ system.Memory().ReadBlock(address, str.data(), str.size());
+ LOG_DEBUG(Debug_Emulated, "{}", str);
+}
+
+void OutputDebugString32(Core::System& system, u32 address, u32 len) {
+ OutputDebugString(system, address, len);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_device_address_space.cpp b/src/core/hle/kernel/svc/svc_device_address_space.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_device_address_space.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_event.cpp b/src/core/hle/kernel/svc/svc_event.cpp
new file mode 100644
index 000000000..885f02f50
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_event.cpp
@@ -0,0 +1,111 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result SignalEvent(Core::System& system, Handle event_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+
+ // Get the current handle table.
+ const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+
+ // Get the event.
+ KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
+ R_UNLESS(event.IsNotNull(), ResultInvalidHandle);
+
+ return event->Signal();
+}
+
+Result SignalEvent32(Core::System& system, Handle event_handle) {
+ return SignalEvent(system, event_handle);
+}
+
+Result ClearEvent(Core::System& system, Handle event_handle) {
+ LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
+
+ // Get the current handle table.
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+
+ // Try to clear the writable event.
+ {
+ KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
+ if (event.IsNotNull()) {
+ return event->Clear();
+ }
+ }
+
+ // Try to clear the readable event.
+ {
+ KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
+ if (readable_event.IsNotNull()) {
+ return readable_event->Clear();
+ }
+ }
+
+ LOG_ERROR(Kernel_SVC, "Event handle does not exist, event_handle=0x{:08X}", event_handle);
+
+ return ResultInvalidHandle;
+}
+
+Result ClearEvent32(Core::System& system, Handle event_handle) {
+ return ClearEvent(system, event_handle);
+}
+
+Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+ LOG_DEBUG(Kernel_SVC, "called");
+
+ // Get the kernel reference and handle table.
+ auto& kernel = system.Kernel();
+ auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+
+ // Reserve a new event from the process resource limit
+ KScopedResourceReservation event_reservation(kernel.CurrentProcess(),
+ LimitableResource::EventCountMax);
+ R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);
+
+ // Create a new event.
+ KEvent* event = KEvent::Create(kernel);
+ R_UNLESS(event != nullptr, ResultOutOfResource);
+
+ // Initialize the event.
+ event->Initialize(kernel.CurrentProcess());
+
+ // Commit the thread reservation.
+ event_reservation.Commit();
+
+ // Ensure that we clean up the event (and its only references are handle table) on function end.
+ SCOPE_EXIT({
+ event->GetReadableEvent().Close();
+ event->Close();
+ });
+
+ // Register the event.
+ KEvent::Register(kernel, event);
+
+ // Add the event to the handle table.
+ R_TRY(handle_table.Add(out_write, event));
+
+ // Ensure that we maintaing a clean handle state on exit.
+ auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); });
+
+ // Add the readable event to the handle table.
+ R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
+
+ // We succeeded.
+ handle_guard.Cancel();
+ return ResultSuccess;
+}
+
+Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
+ return CreateEvent(system, out_write, out_read);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_exception.cpp b/src/core/hle/kernel/svc/svc_exception.cpp
new file mode 100644
index 000000000..fb9f133c1
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_exception.cpp
@@ -0,0 +1,121 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/debugger/debugger.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+#include "core/hle/kernel/svc_types.h"
+#include "core/memory.h"
+#include "core/reporter.h"
+
+namespace Kernel::Svc {
+
+/// Break program execution
+void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
+ BreakReason break_reason =
+ static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag));
+ bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0;
+
+ bool has_dumped_buffer{};
+ std::vector<u8> debug_buffer;
+
+ const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
+ if (sz == 0 || addr == 0 || has_dumped_buffer) {
+ return;
+ }
+
+ auto& memory = system.Memory();
+
+ // This typically is an error code so we're going to assume this is the case
+ if (sz == sizeof(u32)) {
+ LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
+ } else {
+ // We don't know what's in here so we'll hexdump it
+ debug_buffer.resize(sz);
+ memory.ReadBlock(addr, debug_buffer.data(), sz);
+ std::string hexdump;
+ for (std::size_t i = 0; i < debug_buffer.size(); i++) {
+ hexdump += fmt::format("{:02X} ", debug_buffer[i]);
+ if (i != 0 && i % 16 == 0) {
+ hexdump += '\n';
+ }
+ }
+ LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
+ }
+ has_dumped_buffer = true;
+ };
+ switch (break_reason) {
+ case BreakReason::Panic:
+ LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1,
+ info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ case BreakReason::Assert:
+ LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
+ info1, info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ case BreakReason::User:
+ LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ case BreakReason::PreLoadDll:
+ LOG_INFO(Debug_Emulated,
+ "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1,
+ info2);
+ break;
+ case BreakReason::PostLoadDll:
+ LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
+ info2);
+ break;
+ case BreakReason::PreUnloadDll:
+ LOG_INFO(Debug_Emulated,
+ "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1,
+ info2);
+ break;
+ case BreakReason::PostUnloadDll:
+ LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}",
+ info1, info2);
+ break;
+ case BreakReason::CppException:
+ LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
+ break;
+ default:
+ LOG_WARNING(
+ Debug_Emulated,
+ "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}",
+ reason, info1, info2);
+ handle_debug_buffer(info1, info2);
+ break;
+ }
+
+ system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2,
+ has_dumped_buffer ? std::make_optional(debug_buffer)
+ : std::nullopt);
+
+ if (!notification_only) {
+ LOG_CRITICAL(
+ Debug_Emulated,
+ "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
+ reason, info1, info2);
+
+ handle_debug_buffer(info1, info2);
+
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
+ const auto thread_processor_id = current_thread->GetActiveCore();
+ system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
+ }
+
+ if (system.DebuggerEnabled()) {
+ auto* thread = system.Kernel().GetCurrentEmuThread();
+ system.GetDebugger().NotifyThreadStopped(thread);
+ thread->RequestSuspend(Kernel::SuspendType::Debug);
+ }
+}
+
+void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
+ Break(system, reason, info1, info2);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
new file mode 100644
index 000000000..df5dd85a4
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -0,0 +1,282 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Gets system/memory information for the current process
+Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) {
+ LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
+ info_sub_id, handle);
+
+ const auto info_id_type = static_cast<InfoType>(info_id);
+
+ switch (info_id_type) {
+ case InfoType::CoreMask:
+ case InfoType::PriorityMask:
+ case InfoType::AliasRegionAddress:
+ case InfoType::AliasRegionSize:
+ case InfoType::HeapRegionAddress:
+ case InfoType::HeapRegionSize:
+ case InfoType::AslrRegionAddress:
+ case InfoType::AslrRegionSize:
+ case InfoType::StackRegionAddress:
+ case InfoType::StackRegionSize:
+ case InfoType::TotalMemorySize:
+ case InfoType::UsedMemorySize:
+ case InfoType::SystemResourceSizeTotal:
+ case InfoType::SystemResourceSizeUsed:
+ case InfoType::ProgramId:
+ case InfoType::UserExceptionContextAddress:
+ case InfoType::TotalNonSystemMemorySize:
+ case InfoType::UsedNonSystemMemorySize:
+ case InfoType::IsApplication:
+ case InfoType::FreeThreadCount: {
+ if (info_sub_id != 0) {
+ LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
+ info_sub_id);
+ return ResultInvalidEnumValue;
+ }
+
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
+ info_id, info_sub_id, handle);
+ return ResultInvalidHandle;
+ }
+
+ switch (info_id_type) {
+ case InfoType::CoreMask:
+ *result = process->GetCoreMask();
+ return ResultSuccess;
+
+ case InfoType::PriorityMask:
+ *result = process->GetPriorityMask();
+ return ResultSuccess;
+
+ case InfoType::AliasRegionAddress:
+ *result = process->PageTable().GetAliasRegionStart();
+ return ResultSuccess;
+
+ case InfoType::AliasRegionSize:
+ *result = process->PageTable().GetAliasRegionSize();
+ return ResultSuccess;
+
+ case InfoType::HeapRegionAddress:
+ *result = process->PageTable().GetHeapRegionStart();
+ return ResultSuccess;
+
+ case InfoType::HeapRegionSize:
+ *result = process->PageTable().GetHeapRegionSize();
+ return ResultSuccess;
+
+ case InfoType::AslrRegionAddress:
+ *result = process->PageTable().GetAliasCodeRegionStart();
+ return ResultSuccess;
+
+ case InfoType::AslrRegionSize:
+ *result = process->PageTable().GetAliasCodeRegionSize();
+ return ResultSuccess;
+
+ case InfoType::StackRegionAddress:
+ *result = process->PageTable().GetStackRegionStart();
+ return ResultSuccess;
+
+ case InfoType::StackRegionSize:
+ *result = process->PageTable().GetStackRegionSize();
+ return ResultSuccess;
+
+ case InfoType::TotalMemorySize:
+ *result = process->GetTotalPhysicalMemoryAvailable();
+ return ResultSuccess;
+
+ case InfoType::UsedMemorySize:
+ *result = process->GetTotalPhysicalMemoryUsed();
+ return ResultSuccess;
+
+ case InfoType::SystemResourceSizeTotal:
+ *result = process->GetSystemResourceSize();
+ return ResultSuccess;
+
+ case InfoType::SystemResourceSizeUsed:
+ LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
+ *result = process->GetSystemResourceUsage();
+ return ResultSuccess;
+
+ case InfoType::ProgramId:
+ *result = process->GetProgramID();
+ return ResultSuccess;
+
+ case InfoType::UserExceptionContextAddress:
+ *result = process->GetProcessLocalRegionAddress();
+ return ResultSuccess;
+
+ case InfoType::TotalNonSystemMemorySize:
+ *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
+ return ResultSuccess;
+
+ case InfoType::UsedNonSystemMemorySize:
+ *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
+ return ResultSuccess;
+
+ case InfoType::FreeThreadCount:
+ *result = process->GetFreeThreadCount();
+ return ResultSuccess;
+
+ default:
+ break;
+ }
+
+ LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
+ return ResultInvalidEnumValue;
+ }
+
+ case InfoType::DebuggerAttached:
+ *result = 0;
+ return ResultSuccess;
+
+ case InfoType::ResourceLimit: {
+ if (handle != 0) {
+ LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
+ return ResultInvalidHandle;
+ }
+
+ if (info_sub_id != 0) {
+ LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
+ info_sub_id);
+ return ResultInvalidCombination;
+ }
+
+ KProcess* const current_process = system.Kernel().CurrentProcess();
+ KHandleTable& handle_table = current_process->GetHandleTable();
+ const auto resource_limit = current_process->GetResourceLimit();
+ if (!resource_limit) {
+ *result = Svc::InvalidHandle;
+ // Yes, the kernel considers this a successful operation.
+ return ResultSuccess;
+ }
+
+ Handle resource_handle{};
+ R_TRY(handle_table.Add(&resource_handle, resource_limit));
+
+ *result = resource_handle;
+ return ResultSuccess;
+ }
+
+ case InfoType::RandomEntropy:
+ if (handle != 0) {
+ LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
+ handle);
+ return ResultInvalidHandle;
+ }
+
+ if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) {
+ LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
+ KProcess::RANDOM_ENTROPY_SIZE, info_sub_id);
+ return ResultInvalidCombination;
+ }
+
+ *result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
+ return ResultSuccess;
+
+ case InfoType::InitialProcessIdRange:
+ LOG_WARNING(Kernel_SVC,
+ "(STUBBED) Attempted to query privileged process id bounds, returned 0");
+ *result = 0;
+ return ResultSuccess;
+
+ case InfoType::ThreadTickCount: {
+ constexpr u64 num_cpus = 4;
+ if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
+ LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
+ info_sub_id);
+ return ResultInvalidCombination;
+ }
+
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(
+ static_cast<Handle>(handle));
+ if (thread.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
+ static_cast<Handle>(handle));
+ return ResultInvalidHandle;
+ }
+
+ const auto& core_timing = system.CoreTiming();
+ const auto& scheduler = *system.Kernel().CurrentScheduler();
+ const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
+ const bool same_thread = current_thread == thread.GetPointerUnsafe();
+
+ const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
+ u64 out_ticks = 0;
+ if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
+ const u64 thread_ticks = current_thread->GetCpuTime();
+
+ out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
+ } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
+ out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
+ }
+
+ *result = out_ticks;
+ return ResultSuccess;
+ }
+ case InfoType::IdleTickCount: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the requested core is valid.
+ const bool core_valid =
+ (info_sub_id == 0xFFFFFFFFFFFFFFFF) ||
+ (info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
+ R_UNLESS(core_valid, ResultInvalidCombination);
+
+ // Get the idle tick count.
+ *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
+ return ResultSuccess;
+ }
+ case InfoType::MesosphereCurrentProcess: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the sub-type is valid.
+ R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+
+ // Get the handle table.
+ KProcess* current_process = system.Kernel().CurrentProcess();
+ KHandleTable& handle_table = current_process->GetHandleTable();
+
+ // Get a new handle for the current process.
+ Handle tmp;
+ R_TRY(handle_table.Add(&tmp, current_process));
+
+ // Set the output.
+ *result = tmp;
+
+ // We succeeded.
+ return ResultSuccess;
+ }
+ default:
+ LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
+ return ResultInvalidEnumValue;
+ }
+}
+
+Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
+ u32 info_id, u32 handle, u32 sub_id_high) {
+ const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
+ u64 res_value{};
+
+ const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
+ *result_high = static_cast<u32>(res_value >> 32);
+ *result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
+
+ return result;
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_interrupt_event.cpp b/src/core/hle/kernel/svc/svc_interrupt_event.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_interrupt_event.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_io_pool.cpp b/src/core/hle/kernel/svc/svc_io_pool.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_io_pool.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
new file mode 100644
index 000000000..dbb68e89a
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -0,0 +1,89 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_server_session.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Makes a blocking IPC call to a service.
+Result SendSyncRequest(Core::System& system, Handle handle) {
+ auto& kernel = system.Kernel();
+
+ // Get the client session from its handle.
+ KScopedAutoObject session =
+ kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
+ R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
+
+ LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
+
+ return session->SendSyncRequest();
+}
+
+Result SendSyncRequest32(Core::System& system, Handle handle) {
+ return SendSyncRequest(system, handle);
+}
+
+Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles, s32 num_handles,
+ Handle reply_target, s64 timeout_ns) {
+ auto& kernel = system.Kernel();
+ auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable();
+
+ // Convert handle list to object table.
+ std::vector<KSynchronizationObject*> objs(num_handles);
+ R_UNLESS(
+ handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles),
+ ResultInvalidHandle);
+
+ // Ensure handles are closed when we're done.
+ SCOPE_EXIT({
+ for (auto i = 0; i < num_handles; ++i) {
+ objs[i]->Close();
+ }
+ });
+
+ // Reply to the target, if one is specified.
+ if (reply_target != InvalidHandle) {
+ KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target);
+ R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
+
+ // If we fail to reply, we want to set the output index to -1.
+ ON_RESULT_FAILURE {
+ *out_index = -1;
+ };
+
+ // Send the reply.
+ R_TRY(session->SendReply());
+ }
+
+ // Wait for a message.
+ while (true) {
+ // Wait for an object.
+ s32 index;
+ Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(),
+ static_cast<s32>(objs.size()), timeout_ns);
+ if (result == ResultTimedOut) {
+ return result;
+ }
+
+ // Receive the request.
+ if (R_SUCCEEDED(result)) {
+ KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
+ if (session != nullptr) {
+ result = session->ReceiveRequest();
+ if (result == ResultNotFound) {
+ continue;
+ }
+ }
+ }
+
+ *out_index = index;
+ return result;
+ }
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_kernel_debug.cpp b/src/core/hle/kernel/svc/svc_kernel_debug.cpp
new file mode 100644
index 000000000..454255e7a
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_kernel_debug.cpp
@@ -0,0 +1,19 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+void KernelDebug([[maybe_unused]] Core::System& system, [[maybe_unused]] u32 kernel_debug_type,
+ [[maybe_unused]] u64 param1, [[maybe_unused]] u64 param2,
+ [[maybe_unused]] u64 param3) {
+ // Intentionally do nothing, as this does nothing in released kernel binaries.
+}
+
+void ChangeKernelTraceState([[maybe_unused]] Core::System& system,
+ [[maybe_unused]] u32 trace_state) {
+ // Intentionally do nothing, as this does nothing in released kernel binaries.
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_light_ipc.cpp b/src/core/hle/kernel/svc/svc_light_ipc.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_light_ipc.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
new file mode 100644
index 000000000..45f2a6553
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -0,0 +1,57 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Attempts to locks a mutex
+Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
+ LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
+ thread_handle, address, tag);
+
+ // Validate the input address.
+ if (IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
+ address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(u32))) {
+ LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
+ return ResultInvalidAddress;
+ }
+
+ return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
+}
+
+Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
+ return ArbitrateLock(system, thread_handle, address, tag);
+}
+
+/// Unlock a mutex
+Result ArbitrateUnlock(Core::System& system, VAddr address) {
+ LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
+
+ // Validate the input address.
+ if (IsKernelAddress(address)) {
+ LOG_ERROR(Kernel_SVC,
+ "Attempting to arbitrate an unlock on a kernel address (address={:08X})",
+ address);
+ return ResultInvalidCurrentMemory;
+ }
+ if (!Common::IsAligned(address, sizeof(u32))) {
+ LOG_ERROR(Kernel_SVC, "Input address must be 4 byte aligned (address: {:08X})", address);
+ return ResultInvalidAddress;
+ }
+
+ return system.Kernel().CurrentProcess()->SignalToAddress(address);
+}
+
+Result ArbitrateUnlock32(Core::System& system, u32 address) {
+ return ArbitrateUnlock(system, address);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
new file mode 100644
index 000000000..f78b1239b
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -0,0 +1,189 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::None:
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Checks if address + size is greater than the given address
+// This can return false if the size causes an overflow of a 64-bit type
+// or if the given size is zero.
+constexpr bool IsValidAddressRange(VAddr address, u64 size) {
+ return address + size > address;
+}
+
+// Helper function that performs the common sanity checks for svcMapMemory
+// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
+// in the same order.
+Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
+ u64 size) {
+ if (!Common::Is4KBAligned(dst_addr)) {
+ LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
+ return ResultInvalidAddress;
+ }
+
+ if (!Common::Is4KBAligned(src_addr)) {
+ LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
+ return ResultInvalidSize;
+ }
+
+ if (size == 0) {
+ LOG_ERROR(Kernel_SVC, "Size is 0");
+ return ResultInvalidSize;
+ }
+
+ if (!Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
+ return ResultInvalidSize;
+ }
+
+ if (!IsValidAddressRange(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
+ dst_addr, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (!IsValidAddressRange(src_addr, size)) {
+ LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
+ src_addr, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (!manager.IsInsideAddressSpace(src_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
+ src_addr, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (manager.IsOutsideStackRegion(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
+ dst_addr, size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ if (manager.IsInsideHeapRegion(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination does not fit within the heap region, addr=0x{:016X}, "
+ "size=0x{:016X}",
+ dst_addr, size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ if (manager.IsInsideAliasRegion(dst_addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination does not fit within the map region, addr=0x{:016X}, "
+ "size=0x{:016X}",
+ dst_addr, size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ return ResultSuccess;
+}
+
+} // namespace
+
+Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm) {
+ LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
+ perm);
+
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permission.
+ R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Validate that the region is in range for the current process.
+ auto& page_table = system.Kernel().CurrentProcess()->PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory attribute.
+ return page_table.SetMemoryPermission(address, size, perm);
+}
+
+Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) {
+ LOG_DEBUG(Kernel_SVC,
+ "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
+ size, mask, attr);
+
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the attribute and mask.
+ constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
+ R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
+ R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
+
+ // Validate that the region is in range for the current process.
+ auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory attribute.
+ return page_table.SetMemoryAttribute(address, size, mask, attr);
+}
+
+Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, u32 attr) {
+ return SetMemoryAttribute(system, address, size, mask, attr);
+}
+
+/// Maps a memory range into a different range.
+Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
+ src_addr, size);
+
+ auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
+
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ result.IsError()) {
+ return result;
+ }
+
+ return page_table.MapMemory(dst_addr, src_addr, size);
+}
+
+Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+ return MapMemory(system, dst_addr, src_addr, size);
+}
+
+/// Unmaps a region that was previously mapped with svcMapMemory
+Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
+ src_addr, size);
+
+ auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
+
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ result.IsError()) {
+ return result;
+ }
+
+ return page_table.UnmapMemory(dst_addr, src_addr, size);
+}
+
+Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+ return UnmapMemory(system, dst_addr, src_addr, size);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
new file mode 100644
index 000000000..0fc262203
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -0,0 +1,137 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Set the process heap to a given Size. It can both extend and shrink the heap.
+Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
+
+ // Validate size.
+ R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize);
+ R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
+
+ // Set the heap size.
+ R_TRY(system.Kernel().CurrentProcess()->PageTable().SetHeapSize(out_address, size));
+
+ return ResultSuccess;
+}
+
+Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
+ VAddr temp_heap_addr{};
+ const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
+ *heap_addr = static_cast<u32>(temp_heap_addr);
+ return result;
+}
+
+/// Maps memory at a desired address
+Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
+
+ if (!Common::Is4KBAligned(addr)) {
+ LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
+ return ResultInvalidAddress;
+ }
+
+ if (!Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
+ return ResultInvalidSize;
+ }
+
+ if (size == 0) {
+ LOG_ERROR(Kernel_SVC, "Size is zero");
+ return ResultInvalidSize;
+ }
+
+ if (!(addr < addr + size)) {
+ LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
+ return ResultInvalidMemoryRegion;
+ }
+
+ KProcess* const current_process{system.Kernel().CurrentProcess()};
+ auto& page_table{current_process->PageTable()};
+
+ if (current_process->GetSystemResourceSize() == 0) {
+ LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
+ return ResultInvalidState;
+ }
+
+ if (!page_table.IsInsideAddressSpace(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ if (page_table.IsOutsideAliasRegion(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ return page_table.MapPhysicalMemory(addr, size);
+}
+
+Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+ return MapPhysicalMemory(system, addr, size);
+}
+
+/// Unmaps memory previously mapped via MapPhysicalMemory
+Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+ LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
+
+ if (!Common::Is4KBAligned(addr)) {
+ LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
+ return ResultInvalidAddress;
+ }
+
+ if (!Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
+ return ResultInvalidSize;
+ }
+
+ if (size == 0) {
+ LOG_ERROR(Kernel_SVC, "Size is zero");
+ return ResultInvalidSize;
+ }
+
+ if (!(addr < addr + size)) {
+ LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
+ return ResultInvalidMemoryRegion;
+ }
+
+ KProcess* const current_process{system.Kernel().CurrentProcess()};
+ auto& page_table{current_process->PageTable()};
+
+ if (current_process->GetSystemResourceSize() == 0) {
+ LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
+ return ResultInvalidState;
+ }
+
+ if (!page_table.IsInsideAddressSpace(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ if (page_table.IsOutsideAliasRegion(addr, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr,
+ size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ return page_table.UnmapPhysicalMemory(addr, size);
+}
+
+Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+ return UnmapPhysicalMemory(system, addr, size);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
new file mode 100644
index 000000000..cdfe0dd16
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -0,0 +1,71 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_client_port.h"
+#include "core/hle/kernel/k_client_session.h"
+#include "core/hle/kernel/k_port.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Connect to an OS service given the port name, returns the handle to the port to out
+Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
+ auto& memory = system.Memory();
+ if (!memory.IsValidVirtualAddress(port_name_address)) {
+ LOG_ERROR(Kernel_SVC,
+ "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
+ port_name_address);
+ return ResultNotFound;
+ }
+
+ static constexpr std::size_t PortNameMaxLength = 11;
+ // Read 1 char beyond the max allowed port name to detect names that are too long.
+ const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1);
+ if (port_name.size() > PortNameMaxLength) {
+ LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
+ port_name.size());
+ return ResultOutOfRange;
+ }
+
+ LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
+
+ // Get the current handle table.
+ auto& kernel = system.Kernel();
+ auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+
+ // Find the client port.
+ auto port = kernel.CreateNamedServicePort(port_name);
+ if (!port) {
+ LOG_ERROR(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
+ return ResultNotFound;
+ }
+
+ // Reserve a handle for the port.
+ // NOTE: Nintendo really does write directly to the output handle here.
+ R_TRY(handle_table.Reserve(out));
+ auto handle_guard = SCOPE_GUARD({ handle_table.Unreserve(*out); });
+
+ // Create a session.
+ KClientSession* session{};
+ R_TRY(port->CreateSession(std::addressof(session)));
+
+ kernel.RegisterNamedServiceHandler(port_name, &port->GetParent()->GetServerPort());
+
+ // Register the session in the table, close the extra reference.
+ handle_table.Register(*out, session);
+ session->Close();
+
+ // We succeeded.
+ handle_guard.Cancel();
+ return ResultSuccess;
+}
+
+Result ConnectToNamedPort32(Core::System& system, Handle* out_handle, u32 port_name_address) {
+
+ return ConnectToNamedPort(system, out_handle, port_name_address);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_power_management.cpp b/src/core/hle/kernel/svc/svc_power_management.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_power_management.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp
new file mode 100644
index 000000000..d6c8b4561
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_process.cpp
@@ -0,0 +1,124 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Exits the current process
+void ExitProcess(Core::System& system) {
+ auto* current_process = system.Kernel().CurrentProcess();
+
+ LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
+ ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
+ "Process has already exited");
+
+ system.Exit();
+}
+
+void ExitProcess32(Core::System& system) {
+ ExitProcess(system);
+}
+
+/// Gets the ID of the specified process or a specified thread's owning process.
+Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
+ LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
+
+ // Get the object from the handle table.
+ KScopedAutoObject obj =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KAutoObject>(
+ static_cast<Handle>(handle));
+ R_UNLESS(obj.IsNotNull(), ResultInvalidHandle);
+
+ // Get the process from the object.
+ KProcess* process = nullptr;
+ if (KProcess* p = obj->DynamicCast<KProcess*>(); p != nullptr) {
+ // The object is a process, so we can use it directly.
+ process = p;
+ } else if (KThread* t = obj->DynamicCast<KThread*>(); t != nullptr) {
+ // The object is a thread, so we want to use its parent.
+ process = reinterpret_cast<KThread*>(obj.GetPointerUnsafe())->GetOwnerProcess();
+ } else {
+ // TODO(bunnei): This should also handle debug objects before returning.
+ UNIMPLEMENTED_MSG("Debug objects not implemented");
+ }
+
+ // Make sure the target process exists.
+ R_UNLESS(process != nullptr, ResultInvalidHandle);
+
+ // Get the process id.
+ *out_process_id = process->GetId();
+
+ return ResultSuccess;
+}
+
+Result GetProcessId32(Core::System& system, u32* out_process_id_low, u32* out_process_id_high,
+ Handle handle) {
+ u64 out_process_id{};
+ const auto result = GetProcessId(system, &out_process_id, handle);
+ *out_process_id_low = static_cast<u32>(out_process_id);
+ *out_process_id_high = static_cast<u32>(out_process_id >> 32);
+ return result;
+}
+
+Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size) {
+ LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
+ out_process_ids, out_process_ids_size);
+
+ // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
+ if ((out_process_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC,
+ "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
+ out_process_ids_size);
+ return ResultOutOfRange;
+ }
+
+ const auto& kernel = system.Kernel();
+ const auto total_copy_size = out_process_ids_size * sizeof(u64);
+
+ if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace(
+ out_process_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_process_ids, out_process_ids + total_copy_size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ auto& memory = system.Memory();
+ const auto& process_list = kernel.GetProcessList();
+ const auto num_processes = process_list.size();
+ const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
+
+ for (std::size_t i = 0; i < copy_amount; ++i) {
+ memory.Write64(out_process_ids, process_list[i]->GetProcessID());
+ out_process_ids += sizeof(u64);
+ }
+
+ *out_num_processes = static_cast<u32>(num_processes);
+ return ResultSuccess;
+}
+
+Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
+ LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
+
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
+ process_handle);
+ return ResultInvalidHandle;
+ }
+
+ const auto info_type = static_cast<ProcessInfoType>(type);
+ if (info_type != ProcessInfoType::ProcessState) {
+ LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", type);
+ return ResultInvalidEnumValue;
+ }
+
+ *out = static_cast<u64>(process->GetState());
+ return ResultSuccess;
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
new file mode 100644
index 000000000..b6ac43af2
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -0,0 +1,274 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidAddressRange(VAddr address, u64 size) {
+ return address + size > address;
+}
+
+constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
+ switch (perm) {
+ case Svc::MemoryPermission::None:
+ case Svc::MemoryPermission::Read:
+ case Svc::MemoryPermission::ReadWrite:
+ case Svc::MemoryPermission::ReadExecute:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
+ u64 size, Svc::MemoryPermission perm) {
+ LOG_TRACE(Kernel_SVC,
+ "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
+ process_handle, address, size, perm);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+ R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
+ R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
+
+ // Validate the memory permission.
+ R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Get the process from its handle.
+ KScopedAutoObject process =
+ system.CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
+ R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
+
+ // Validate that the address is in range.
+ auto& page_table = process->PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory permission.
+ return page_table.SetProcessMemoryPermission(address, size, perm);
+}
+
+Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
+ LOG_TRACE(Kernel_SVC,
+ "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
+ dst_address, process_handle, src_address, size);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+
+ // Get the processes.
+ KProcess* dst_process = system.CurrentProcess();
+ KScopedAutoObject src_process =
+ dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
+ R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+
+ // Get the page tables.
+ auto& dst_pt = dst_process->PageTable();
+ auto& src_pt = src_process->PageTable();
+
+ // Validate that the mapping is in range.
+ R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
+ ResultInvalidMemoryRegion);
+
+ // Create a new page group.
+ KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};
+ R_TRY(src_pt.MakeAndOpenPageGroup(
+ std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Map the group.
+ R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode,
+ KMemoryPermission::UserReadWrite));
+
+ return ResultSuccess;
+}
+
+Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
+ LOG_TRACE(Kernel_SVC,
+ "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
+ dst_address, process_handle, src_address, size);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
+ R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
+
+ // Get the processes.
+ KProcess* dst_process = system.CurrentProcess();
+ KScopedAutoObject src_process =
+ dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
+ R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
+
+ // Get the page tables.
+ auto& dst_pt = dst_process->PageTable();
+ auto& src_pt = src_process->PageTable();
+
+ // Validate that the mapping is in range.
+ R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
+ ResultInvalidMemoryRegion);
+
+ // Unmap the memory.
+ R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
+
+ return ResultSuccess;
+}
+
+Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
+ "src_address=0x{:016X}, size=0x{:016X}",
+ process_handle, dst_address, src_address, size);
+
+ if (!Common::Is4KBAligned(src_address)) {
+ LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
+ src_address);
+ return ResultInvalidAddress;
+ }
+
+ if (!Common::Is4KBAligned(dst_address)) {
+ LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
+ dst_address);
+ return ResultInvalidAddress;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
+ return ResultInvalidSize;
+ }
+
+ if (!IsValidAddressRange(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range overflows the address space (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (!IsValidAddressRange(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range overflows the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
+ process_handle);
+ return ResultInvalidHandle;
+ }
+
+ auto& page_table = process->PageTable();
+ if (!page_table.IsInsideAddressSpace(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range is not within the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (!page_table.IsInsideASLRRegion(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ return page_table.MapCodeMemory(dst_address, src_address, size);
+}
+
+Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
+ "size=0x{:016X}",
+ process_handle, dst_address, src_address, size);
+
+ if (!Common::Is4KBAligned(dst_address)) {
+ LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
+ dst_address);
+ return ResultInvalidAddress;
+ }
+
+ if (!Common::Is4KBAligned(src_address)) {
+ LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
+ src_address);
+ return ResultInvalidAddress;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
+ return ResultInvalidSize;
+ }
+
+ if (!IsValidAddressRange(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range overflows the address space (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (!IsValidAddressRange(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range overflows the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
+ process_handle);
+ return ResultInvalidHandle;
+ }
+
+ auto& page_table = process->PageTable();
+ if (!page_table.IsInsideAddressSpace(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range is not within the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ if (!page_table.IsInsideASLRRegion(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ResultInvalidMemoryRegion;
+ }
+
+ return page_table.UnmapCodeMemory(dst_address, src_address, size,
+ KPageTable::ICacheInvalidationStrategy::InvalidateAll);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_processor.cpp b/src/core/hle/kernel/svc/svc_processor.cpp
new file mode 100644
index 000000000..8561cf74f
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_processor.cpp
@@ -0,0 +1,21 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Get which CPU core is executing the current thread
+u32 GetCurrentProcessorNumber(Core::System& system) {
+ LOG_TRACE(Kernel_SVC, "called");
+ return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex());
+}
+
+u32 GetCurrentProcessorNumber32(Core::System& system) {
+ return GetCurrentProcessorNumber(system);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
new file mode 100644
index 000000000..aac3b2eca
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -0,0 +1,55 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ VAddr query_address) {
+ LOG_TRACE(Kernel_SVC,
+ "called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
+ "query_address=0x{:016X}",
+ memory_info_address, page_info_address, query_address);
+
+ return QueryProcessMemory(system, memory_info_address, page_info_address, CurrentProcess,
+ query_address);
+}
+
+Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
+ u32 query_address) {
+ return QueryMemory(system, memory_info_address, page_info_address, query_address);
+}
+
+Result QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ Handle process_handle, VAddr address) {
+ LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
+ if (process.IsNull()) {
+ LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
+ process_handle);
+ return ResultInvalidHandle;
+ }
+
+ auto& memory{system.Memory()};
+ const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
+
+ memory.Write64(memory_info_address + 0x00, memory_info.base_address);
+ memory.Write64(memory_info_address + 0x08, memory_info.size);
+ memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff);
+ memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attribute));
+ memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.permission));
+ memory.Write32(memory_info_address + 0x1c, memory_info.ipc_count);
+ memory.Write32(memory_info_address + 0x20, memory_info.device_count);
+ memory.Write32(memory_info_address + 0x24, 0);
+
+ // Page info appears to be currently unused by the kernel and is always set to zero.
+ memory.Write32(page_info_address, 0);
+
+ return ResultSuccess;
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_register.cpp b/src/core/hle/kernel/svc/svc_register.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_register.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp
new file mode 100644
index 000000000..679ba10fa
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp
@@ -0,0 +1,95 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
+ LOG_DEBUG(Kernel_SVC, "called");
+
+ // Create a new resource limit.
+ auto& kernel = system.Kernel();
+ KResourceLimit* resource_limit = KResourceLimit::Create(kernel);
+ R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
+
+ // Ensure we don't leak a reference to the limit.
+ SCOPE_EXIT({ resource_limit->Close(); });
+
+ // Initialize the resource limit.
+ resource_limit->Initialize(&system.CoreTiming());
+
+ // Register the limit.
+ KResourceLimit::Register(kernel, resource_limit);
+
+ // Add the limit to the handle table.
+ R_TRY(kernel.CurrentProcess()->GetHandleTable().Add(out_handle, resource_limit));
+
+ return ResultSuccess;
+}
+
+Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
+ which);
+
+ // Validate the resource.
+ R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+
+ // Get the resource limit.
+ auto& kernel = system.Kernel();
+ KScopedAutoObject resource_limit =
+ kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
+ R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+
+ // Get the limit value.
+ *out_limit_value = resource_limit->GetLimitValue(which);
+
+ return ResultSuccess;
+}
+
+Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
+ LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
+ which);
+
+ // Validate the resource.
+ R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+
+ // Get the resource limit.
+ auto& kernel = system.Kernel();
+ KScopedAutoObject resource_limit =
+ kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
+ R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+
+ // Get the current value.
+ *out_current_value = resource_limit->GetCurrentValue(which);
+
+ return ResultSuccess;
+}
+
+Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, u64 limit_value) {
+ LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
+ resource_limit_handle, which, limit_value);
+
+ // Validate the resource.
+ R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue);
+
+ // Get the resource limit.
+ auto& kernel = system.Kernel();
+ KScopedAutoObject resource_limit =
+ kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
+ R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle);
+
+ // Set the limit value.
+ R_TRY(resource_limit->SetLimitValue(which, limit_value));
+
+ return ResultSuccess;
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_secure_monitor_call.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_session.cpp b/src/core/hle/kernel/svc/svc_session.cpp
new file mode 100644
index 000000000..dac8ce33c
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_session.cpp
@@ -0,0 +1,103 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_session.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+template <typename T>
+Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) {
+ auto& process = *system.CurrentProcess();
+ auto& handle_table = process.GetHandleTable();
+
+ // Declare the session we're going to allocate.
+ T* session;
+
+ // Reserve a new session from the process resource limit.
+ // FIXME: LimitableResource_SessionCountMax
+ KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax);
+ if (session_reservation.Succeeded()) {
+ session = T::Create(system.Kernel());
+ } else {
+ return ResultLimitReached;
+
+ // // We couldn't reserve a session. Check that we support dynamically expanding the
+ // // resource limit.
+ // R_UNLESS(process.GetResourceLimit() ==
+ // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached);
+ // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached());
+
+ // // Try to allocate a session from unused slab memory.
+ // session = T::CreateFromUnusedSlabMemory();
+ // R_UNLESS(session != nullptr, ResultLimitReached);
+ // ON_RESULT_FAILURE { session->Close(); };
+
+ // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to
+ // // prevent request exhaustion.
+ // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's
+ // // no reason to not do this statically.
+ // if constexpr (std::same_as<T, KSession>) {
+ // for (size_t i = 0; i < 2; i++) {
+ // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory();
+ // R_UNLESS(request != nullptr, ResultLimitReached);
+ // request->Close();
+ // }
+ // }
+
+ // We successfully allocated a session, so add the object we allocated to the resource
+ // limit.
+ // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1);
+ }
+
+ // Check that we successfully created a session.
+ R_UNLESS(session != nullptr, ResultOutOfResource);
+
+ // Initialize the session.
+ session->Initialize(nullptr, fmt::format("{}", name));
+
+ // Commit the session reservation.
+ session_reservation.Commit();
+
+ // Ensure that we clean up the session (and its only references are handle table) on function
+ // end.
+ SCOPE_EXIT({
+ session->GetClientSession().Close();
+ session->GetServerSession().Close();
+ });
+
+ // Register the session.
+ T::Register(system.Kernel(), session);
+
+ // Add the server session to the handle table.
+ R_TRY(handle_table.Add(out_server, &session->GetServerSession()));
+
+ // Add the client session to the handle table.
+ const auto result = handle_table.Add(out_client, &session->GetClientSession());
+
+ if (!R_SUCCEEDED(result)) {
+ // Ensure that we maintaing a clean handle state on exit.
+ handle_table.Remove(*out_server);
+ }
+
+ return result;
+}
+
+} // namespace
+
+Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u32 is_light,
+ u64 name) {
+ if (is_light) {
+ // return CreateSession<KLightSession>(system, out_server, out_client, name);
+ return ResultUnknown;
+ } else {
+ return CreateSession<KSession>(system, out_server, out_client, name);
+ }
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_shared_memory.cpp b/src/core/hle/kernel/svc/svc_shared_memory.cpp
new file mode 100644
index 000000000..d465bcbe7
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_shared_memory.cpp
@@ -0,0 +1,106 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_shared_memory.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidSharedMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+[[maybe_unused]] constexpr bool IsValidRemoteSharedMemoryPermission(MemoryPermission perm) {
+ return IsValidSharedMemoryPermission(perm) || perm == MemoryPermission::DontCare;
+}
+
+} // namespace
+
+Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
+ Svc::MemoryPermission map_perm) {
+ LOG_TRACE(Kernel_SVC,
+ "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
+ shmem_handle, address, size, map_perm);
+
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permission.
+ R_UNLESS(IsValidSharedMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
+
+ // Get the current process.
+ auto& process = *system.Kernel().CurrentProcess();
+ auto& page_table = process.PageTable();
+
+ // Get the shared memory.
+ KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
+ R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
+
+ // Verify that the mapping is in range.
+ R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
+
+ // Add the shared memory to the process.
+ R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
+
+ // Ensure that we clean up the shared memory if we fail to map it.
+ auto guard =
+ SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); });
+
+ // Map the shared memory.
+ R_TRY(shmem->Map(process, address, size, map_perm));
+
+ // We succeeded.
+ guard.Cancel();
+ return ResultSuccess;
+}
+
+Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
+ Svc::MemoryPermission map_perm) {
+ return MapSharedMemory(system, shmem_handle, address, size, map_perm);
+}
+
+Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size) {
+ // Validate the address/size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Get the current process.
+ auto& process = *system.Kernel().CurrentProcess();
+ auto& page_table = process.PageTable();
+
+ // Get the shared memory.
+ KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle);
+ R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle);
+
+ // Verify that the mapping is in range.
+ R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion);
+
+ // Unmap the shared memory.
+ R_TRY(shmem->Unmap(process, address, size));
+
+ // Remove the shared memory from the process.
+ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
+
+ return ResultSuccess;
+}
+
+Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size) {
+ return UnmapSharedMemory(system, shmem_handle, address, size);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
new file mode 100644
index 000000000..1bf6a612a
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -0,0 +1,139 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_readable_event.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// Close a handle
+Result CloseHandle(Core::System& system, Handle handle) {
+ LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
+
+ // Remove the handle.
+ R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle),
+ ResultInvalidHandle);
+
+ return ResultSuccess;
+}
+
+Result CloseHandle32(Core::System& system, Handle handle) {
+ return CloseHandle(system, handle);
+}
+
+/// Clears the signaled state of an event or process.
+Result ResetSignal(Core::System& system, Handle handle) {
+ LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
+
+ // Get the current handle table.
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+
+ // Try to reset as readable event.
+ {
+ KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
+ if (readable_event.IsNotNull()) {
+ return readable_event->Reset();
+ }
+ }
+
+ // Try to reset as process.
+ {
+ KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
+ if (process.IsNotNull()) {
+ return process->Reset();
+ }
+ }
+
+ LOG_ERROR(Kernel_SVC, "invalid handle (0x{:08X})", handle);
+
+ return ResultInvalidHandle;
+}
+
+Result ResetSignal32(Core::System& system, Handle handle) {
+ return ResetSignal(system, handle);
+}
+
+/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
+Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, s32 num_handles,
+ s64 nano_seconds) {
+ LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
+ handles_address, num_handles, nano_seconds);
+
+ // Ensure number of handles is valid.
+ R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
+
+ auto& kernel = system.Kernel();
+ std::vector<KSynchronizationObject*> objs(num_handles);
+ const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
+ Handle* handles = system.Memory().GetPointer<Handle>(handles_address);
+
+ // Copy user handles.
+ if (num_handles > 0) {
+ // Convert the handles to objects.
+ R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
+ num_handles),
+ ResultInvalidHandle);
+ for (const auto& obj : objs) {
+ kernel.RegisterInUseObject(obj);
+ }
+ }
+
+ // Ensure handles are closed when we're done.
+ SCOPE_EXIT({
+ for (s32 i = 0; i < num_handles; ++i) {
+ kernel.UnregisterInUseObject(objs[i]);
+ objs[i]->Close();
+ }
+ });
+
+ return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()),
+ nano_seconds);
+}
+
+Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
+ s32 num_handles, u32 timeout_high, s32* index) {
+ const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
+ return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
+}
+
+/// Resumes a thread waiting on WaitSynchronization
+Result CancelSynchronization(Core::System& system, Handle handle) {
+ LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Cancel the thread's wait.
+ thread->WaitCancel();
+ return ResultSuccess;
+}
+
+Result CancelSynchronization32(Core::System& system, Handle handle) {
+ return CancelSynchronization(system, handle);
+}
+
+void SynchronizePreemptionState(Core::System& system) {
+ auto& kernel = system.Kernel();
+
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // If the current thread is pinned, unpin it.
+ KProcess* cur_process = system.Kernel().CurrentProcess();
+ const auto core_id = GetCurrentCoreId(kernel);
+
+ if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) {
+ // Clear the current thread's interrupt flag.
+ GetCurrentThread(kernel).ClearInterruptFlag();
+
+ // Unpin the current thread.
+ cur_process->UnpinCurrentThread(core_id);
+ }
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
new file mode 100644
index 000000000..dd9f8e8b1
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -0,0 +1,396 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidVirtualCoreId(int32_t core_id) {
+ return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
+}
+
+} // Anonymous namespace
+
+/// Creates a new thread
+Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
+ VAddr stack_bottom, u32 priority, s32 core_id) {
+ LOG_DEBUG(Kernel_SVC,
+ "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
+ "priority=0x{:08X}, core_id=0x{:08X}",
+ entry_point, arg, stack_bottom, priority, core_id);
+
+ // Adjust core id, if it's the default magic.
+ auto& kernel = system.Kernel();
+ auto& process = *kernel.CurrentProcess();
+ if (core_id == IdealCoreUseProcessValue) {
+ core_id = process.GetIdealCoreId();
+ }
+
+ // Validate arguments.
+ if (!IsValidVirtualCoreId(core_id)) {
+ LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id);
+ return ResultInvalidCoreId;
+ }
+ if (((1ULL << core_id) & process.GetCoreMask()) == 0) {
+ LOG_ERROR(Kernel_SVC, "Core ID doesn't fall within allowable cores (id={})", core_id);
+ return ResultInvalidCoreId;
+ }
+
+ if (HighestThreadPriority > priority || priority > LowestThreadPriority) {
+ LOG_ERROR(Kernel_SVC, "Invalid priority specified (priority={})", priority);
+ return ResultInvalidPriority;
+ }
+ if (!process.CheckThreadPriority(priority)) {
+ LOG_ERROR(Kernel_SVC, "Invalid allowable thread priority (priority={})", priority);
+ return ResultInvalidPriority;
+ }
+
+ // Reserve a new thread from the process resource limit (waiting up to 100ms).
+ KScopedResourceReservation thread_reservation(
+ kernel.CurrentProcess(), LimitableResource::ThreadCountMax, 1,
+ system.CoreTiming().GetGlobalTimeNs().count() + 100000000);
+ if (!thread_reservation.Succeeded()) {
+ LOG_ERROR(Kernel_SVC, "Could not reserve a new thread");
+ return ResultLimitReached;
+ }
+
+ // Create the thread.
+ KThread* thread = KThread::Create(kernel);
+ if (!thread) {
+ LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
+ return ResultOutOfResource;
+ }
+ SCOPE_EXIT({ thread->Close(); });
+
+ // Initialize the thread.
+ {
+ KScopedLightLock lk{process.GetStateLock()};
+ R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom,
+ priority, core_id, &process));
+ }
+
+ // Set the thread name for debugging purposes.
+ thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Register the new thread.
+ KThread::Register(kernel, thread);
+
+ // Add the thread to the handle table.
+ R_TRY(process.GetHandleTable().Add(out_handle, thread));
+
+ return ResultSuccess;
+}
+
+Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority, u32 entry_point,
+ u32 arg, u32 stack_top, s32 processor_id) {
+ return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
+}
+
+/// Starts the thread for the provided handle
+Result StartThread(Core::System& system, Handle thread_handle) {
+ LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Try to start the thread.
+ R_TRY(thread->Run());
+
+ // If we succeeded, persist a reference to the thread.
+ thread->Open();
+ system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
+
+ return ResultSuccess;
+}
+
+Result StartThread32(Core::System& system, Handle thread_handle) {
+ return StartThread(system, thread_handle);
+}
+
+/// Called when a thread exits
+void ExitThread(Core::System& system) {
+ LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
+
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
+ system.GlobalSchedulerContext().RemoveThread(current_thread);
+ current_thread->Exit();
+ system.Kernel().UnregisterInUseObject(current_thread);
+}
+
+void ExitThread32(Core::System& system) {
+ ExitThread(system);
+}
+
+/// Sleep the current thread
+void SleepThread(Core::System& system, s64 nanoseconds) {
+ auto& kernel = system.Kernel();
+ const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
+
+ LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
+
+ // When the input tick is positive, sleep.
+ if (nanoseconds > 0) {
+ // Convert the timeout from nanoseconds to ticks.
+ // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
+
+ // Sleep.
+ // NOTE: Nintendo does not check the result of this sleep.
+ static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
+ } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
+ KScheduler::YieldWithoutCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::WithCoreMigration) {
+ KScheduler::YieldWithCoreMigration(kernel);
+ } else if (yield_type == Svc::YieldType::ToAnyThread) {
+ KScheduler::YieldToAnyThread(kernel);
+ } else {
+ // Nintendo does nothing at all if an otherwise invalid value is passed.
+ ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
+ }
+}
+
+void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
+ const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
+ SleepThread(system, nanoseconds);
+}
+
+/// Gets the thread context
+Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
+ LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
+ thread_handle);
+
+ auto& kernel = system.Kernel();
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ kernel.CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Require the handle be to a non-current thread in the current process.
+ const auto* current_process = kernel.CurrentProcess();
+ R_UNLESS(current_process == thread->GetOwnerProcess(), ResultInvalidId);
+
+ // Verify that the thread isn't terminated.
+ R_UNLESS(thread->GetState() != ThreadState::Terminated, ResultTerminationRequested);
+
+ /// Check that the thread is not the current one.
+ /// NOTE: Nintendo does not check this, and thus the following loop will deadlock.
+ R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(kernel), ResultInvalidId);
+
+ // Try to get the thread context until the thread isn't current on any core.
+ while (true) {
+ KScopedSchedulerLock sl{kernel};
+
+ // TODO(bunnei): Enforce that thread is suspended for debug here.
+
+ // If the thread's raw state isn't runnable, check if it's current on some core.
+ if (thread->GetRawState() != ThreadState::Runnable) {
+ bool current = false;
+ for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
+ current = true;
+ break;
+ }
+ }
+
+ // If the thread is current, retry until it isn't.
+ if (current) {
+ continue;
+ }
+ }
+
+ // Get the thread context.
+ std::vector<u8> context;
+ R_TRY(thread->GetThreadContext3(context));
+
+ // Copy the thread context to user space.
+ system.Memory().WriteBlock(out_context, context.data(), context.size());
+
+ return ResultSuccess;
+ }
+
+ return ResultSuccess;
+}
+
+Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
+ return GetThreadContext(system, out_context, thread_handle);
+}
+
+/// Gets the priority for the specified thread
+Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
+ LOG_TRACE(Kernel_SVC, "called");
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Get the thread's priority.
+ *out_priority = thread->GetPriority();
+ return ResultSuccess;
+}
+
+Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
+ return GetThreadPriority(system, out_priority, handle);
+}
+
+/// Sets the priority for the specified thread
+Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
+ // Get the current process.
+ KProcess& process = *system.Kernel().CurrentProcess();
+
+ // Validate the priority.
+ R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority,
+ ResultInvalidPriority);
+ R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Set the thread priority.
+ thread->SetBasePriority(priority);
+ return ResultSuccess;
+}
+
+Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
+ return SetThreadPriority(system, thread_handle, priority);
+}
+
+Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
+ u32 out_thread_ids_size, Handle debug_handle) {
+ // TODO: Handle this case when debug events are supported.
+ UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
+
+ LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
+ out_thread_ids, out_thread_ids_size);
+
+ // If the size is negative or larger than INT32_MAX / sizeof(u64)
+ if ((out_thread_ids_size & 0xF0000000) != 0) {
+ LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
+ out_thread_ids_size);
+ return ResultOutOfRange;
+ }
+
+ auto* const current_process = system.Kernel().CurrentProcess();
+ const auto total_copy_size = out_thread_ids_size * sizeof(u64);
+
+ if (out_thread_ids_size > 0 &&
+ !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) {
+ LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
+ out_thread_ids, out_thread_ids + total_copy_size);
+ return ResultInvalidCurrentMemory;
+ }
+
+ auto& memory = system.Memory();
+ const auto& thread_list = current_process->GetThreadList();
+ const auto num_threads = thread_list.size();
+ const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
+
+ auto list_iter = thread_list.cbegin();
+ for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
+ memory.Write64(out_thread_ids, (*list_iter)->GetThreadID());
+ out_thread_ids += sizeof(u64);
+ }
+
+ *out_num_threads = static_cast<u32>(num_threads);
+ return ResultSuccess;
+}
+
+Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask) {
+ LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Get the core mask.
+ R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
+
+ return ResultSuccess;
+}
+
+Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
+ u64 out_affinity_mask{};
+ const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
+ *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
+ *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
+ return result;
+}
+
+Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
+ u64 affinity_mask) {
+ // Determine the core id/affinity mask.
+ if (core_id == IdealCoreUseProcessValue) {
+ core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
+ affinity_mask = (1ULL << core_id);
+ } else {
+ // Validate the affinity mask.
+ const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask();
+ R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId);
+ R_UNLESS(affinity_mask != 0, ResultInvalidCombination);
+
+ // Validate the core id.
+ if (IsValidVirtualCoreId(core_id)) {
+ R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination);
+ } else {
+ R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare,
+ ResultInvalidCoreId);
+ }
+ }
+
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Set the core mask.
+ R_TRY(thread->SetCoreMask(core_id, affinity_mask));
+
+ return ResultSuccess;
+}
+
+Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
+ u32 affinity_mask_low, u32 affinity_mask_high) {
+ const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
+ return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
+}
+
+/// Get the ID for the specified thread.
+Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
+ // Get the thread from its handle.
+ KScopedAutoObject thread =
+ system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
+ R_UNLESS(thread.IsNotNull(), ResultInvalidHandle);
+
+ // Get the thread's id.
+ *out_thread_id = thread->GetId();
+ return ResultSuccess;
+}
+
+Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
+ Handle thread_handle) {
+ u64 out_thread_id{};
+ const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
+
+ *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
+ *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
+
+ return result;
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_thread_profiler.cpp b/src/core/hle/kernel/svc/svc_thread_profiler.cpp
new file mode 100644
index 000000000..299e22ae6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_thread_profiler.cpp
@@ -0,0 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_tick.cpp b/src/core/hle/kernel/svc/svc_tick.cpp
new file mode 100644
index 000000000..e9b4fd5a6
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_tick.cpp
@@ -0,0 +1,33 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+
+/// This returns the total CPU ticks elapsed since the CPU was powered-on
+u64 GetSystemTick(Core::System& system) {
+ LOG_TRACE(Kernel_SVC, "called");
+
+ auto& core_timing = system.CoreTiming();
+
+ // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
+ const u64 result{core_timing.GetClockTicks()};
+
+ if (!system.Kernel().IsMulticore()) {
+ core_timing.AddTicks(400U);
+ }
+
+ return result;
+}
+
+void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
+ const auto time = GetSystemTick(system);
+ *time_low = static_cast<u32>(time);
+ *time_high = static_cast<u32>(time >> 32);
+}
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
new file mode 100644
index 000000000..b14ae24a1
--- /dev/null
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -0,0 +1,79 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_transfer_memory.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Kernel::Svc {
+namespace {
+
+constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::None:
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // Anonymous namespace
+
+/// Creates a TransferMemory object
+Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
+ MemoryPermission map_perm) {
+ auto& kernel = system.Kernel();
+
+ // Validate the size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permissions.
+ R_UNLESS(IsValidTransferMemoryPermission(map_perm), ResultInvalidNewMemoryPermission);
+
+ // Get the current process and handle table.
+ auto& process = *kernel.CurrentProcess();
+ auto& handle_table = process.GetHandleTable();
+
+ // Reserve a new transfer memory from the process resource limit.
+ KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(),
+ LimitableResource::TransferMemoryCountMax);
+ R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);
+
+ // Create the transfer memory.
+ KTransferMemory* trmem = KTransferMemory::Create(kernel);
+ R_UNLESS(trmem != nullptr, ResultOutOfResource);
+
+ // Ensure the only reference is in the handle table when we're done.
+ SCOPE_EXIT({ trmem->Close(); });
+
+ // Ensure that the region is in range.
+ R_UNLESS(process.PageTable().Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Initialize the transfer memory.
+ R_TRY(trmem->Initialize(address, size, map_perm));
+
+ // Commit the reservation.
+ trmem_reservation.Commit();
+
+ // Register the transfer memory.
+ KTransferMemory::Register(kernel, trmem);
+
+ // Add the transfer memory to the handle table.
+ R_TRY(handle_table.Add(out, trmem));
+
+ return ResultSuccess;
+}
+
+Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
+ MemoryPermission map_perm) {
+ return CreateTransferMemory(system, out, address, size, map_perm);
+}
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 33eebcef6..e90c35601 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -3,6 +3,9 @@
#pragma once
+#include <bitset>
+
+#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
@@ -496,6 +499,19 @@ enum class MemoryMapping : u32 {
Memory = 2,
};
+enum class MapDeviceAddressSpaceFlag : u32 {
+ None = (0U << 0),
+ NotIoRegister = (1U << 0),
+};
+DECLARE_ENUM_FLAG_OPERATORS(MapDeviceAddressSpaceFlag);
+
+union MapDeviceAddressSpaceOption {
+ u32 raw;
+ BitField<0, 16, MemoryPermission> permission;
+ BitField<16, 1, MapDeviceAddressSpaceFlag> flags;
+ BitField<17, 15, u32> reserved;
+};
+
enum class KernelDebugType : u32 {
Thread = 0,
ThreadCallStack = 1,
@@ -592,4 +608,7 @@ struct CreateProcessParameter {
};
static_assert(sizeof(CreateProcessParameter) == 0x30);
+constexpr size_t NumSupervisorCalls = 0xC0;
+using SvcAccessFlagSet = std::bitset<NumSupervisorCalls>;
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_version.h b/src/core/hle/kernel/svc_version.h
new file mode 100644
index 000000000..e4f47b34b
--- /dev/null
+++ b/src/core/hle/kernel/svc_version.h
@@ -0,0 +1,58 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+#include "common/literals.h"
+
+namespace Kernel::Svc {
+
+constexpr inline u32 ConvertToSvcMajorVersion(u32 sdk) {
+ return sdk + 4;
+}
+constexpr inline u32 ConvertToSdkMajorVersion(u32 svc) {
+ return svc - 4;
+}
+
+constexpr inline u32 ConvertToSvcMinorVersion(u32 sdk) {
+ return sdk;
+}
+constexpr inline u32 ConvertToSdkMinorVersion(u32 svc) {
+ return svc;
+}
+
+union KernelVersion {
+ u32 value;
+ BitField<0, 4, u32> minor_version;
+ BitField<4, 13, u32> major_version;
+};
+
+constexpr inline u32 EncodeKernelVersion(u32 major, u32 minor) {
+ return decltype(KernelVersion::minor_version)::FormatValue(minor) |
+ decltype(KernelVersion::major_version)::FormatValue(major);
+}
+
+constexpr inline u32 GetKernelMajorVersion(u32 encoded) {
+ return std::bit_cast<decltype(KernelVersion::major_version)>(encoded).Value();
+}
+
+constexpr inline u32 GetKernelMinorVersion(u32 encoded) {
+ return std::bit_cast<decltype(KernelVersion::minor_version)>(encoded).Value();
+}
+
+// Nintendo doesn't support programs targeting SVC versions < 3.0.
+constexpr inline u32 RequiredKernelMajorVersion = 3;
+constexpr inline u32 RequiredKernelMinorVersion = 0;
+constexpr inline u32 RequiredKernelVersion =
+ EncodeKernelVersion(RequiredKernelMajorVersion, RequiredKernelMinorVersion);
+
+// This is the highest SVC version supported, to be updated on new kernel releases.
+// NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor.
+constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(15);
+constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion(3);
+constexpr inline u32 SupportedKernelVersion =
+ EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion);
+
+} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 1ea8c7fbc..052be40dd 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -172,11 +172,11 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetResourceLimitLimitValue.
-template <Result func(Core::System&, u64*, Handle, LimitableResource)>
+template <Result func(Core::System&, u64*, Handle, Svc::LimitableResource)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
- static_cast<LimitableResource>(Param(system, 2)))
+ static_cast<Svc::LimitableResource>(Param(system, 2)))
.raw;
system.CurrentArmInterface().SetReg(1, param_1);
@@ -189,10 +189,10 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetResourceLimitLimitValue
-template <Result func(Core::System&, Handle, LimitableResource, u64)>
+template <Result func(Core::System&, Handle, Svc::LimitableResource, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
- static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
+ static_cast<Svc::LimitableResource>(Param(system, 1)), Param(system, 2))
.raw);
}
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 22999c942..ebcf6e164 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -1124,7 +1124,7 @@ void IStorageAccessor::Write(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const u64 offset{rp.Pop<u64>()};
- const std::vector<u8> data{ctx.ReadBuffer()};
+ const auto data{ctx.ReadBuffer()};
const std::size_t size{std::min<u64>(data.size(), backing.GetSize() - offset)};
LOG_DEBUG(Service_AM, "called, offset={}, size={}", offset, size);
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 3a1c231b6..0ee28752c 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -112,7 +112,7 @@ private:
void RequestUpdate(Kernel::HLERequestContext& ctx) {
LOG_TRACE(Service_Audio, "called");
- std::vector<u8> input{ctx.ReadBuffer(0)};
+ const auto input{ctx.ReadBuffer(0)};
// These buffers are written manually to avoid an issue with WriteBuffer throwing errors for
// checking size 0. Performance size is 0 for most games.
diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp
index 825fb8bcc..e01f87356 100644
--- a/src/core/hle/service/audio/hwopus.cpp
+++ b/src/core/hle/service/audio/hwopus.cpp
@@ -93,7 +93,7 @@ private:
ctx.WriteBuffer(samples);
}
- bool DecodeOpusData(u32& consumed, u32& sample_count, const std::vector<u8>& input,
+ bool DecodeOpusData(u32& consumed, u32& sample_count, std::span<const u8> input,
std::vector<opus_int16>& output, u64* out_performance_time) const {
const auto start_time = std::chrono::steady_clock::now();
const std::size_t raw_output_sz = output.size() * sizeof(opus_int16);
diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp
index d183e5829..fb8686859 100644
--- a/src/core/hle/service/es/es.cpp
+++ b/src/core/hle/service/es/es.cpp
@@ -122,7 +122,7 @@ private:
void ImportTicket(Kernel::HLERequestContext& ctx) {
const auto ticket = ctx.ReadBuffer();
- const auto cert = ctx.ReadBuffer(1);
+ [[maybe_unused]] const auto cert = ctx.ReadBuffer(1);
if (ticket.size() < sizeof(Core::Crypto::Ticket)) {
LOG_ERROR(Service_ETicket, "The input buffer is not large enough!");
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index fbb16a7da..447d624e1 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -190,7 +190,7 @@ private:
return;
}
- const std::vector<u8> data = ctx.ReadBuffer();
+ const auto data = ctx.ReadBuffer();
ASSERT_MSG(
static_cast<s64>(data.size()) <= length,
@@ -401,11 +401,8 @@ public:
}
void RenameFile(Kernel::HLERequestContext& ctx) {
- std::vector<u8> buffer = ctx.ReadBuffer(0);
- const std::string src_name = Common::StringFromBuffer(buffer);
-
- buffer = ctx.ReadBuffer(1);
- const std::string dst_name = Common::StringFromBuffer(buffer);
+ const std::string src_name = Common::StringFromBuffer(ctx.ReadBuffer(0));
+ const std::string dst_name = Common::StringFromBuffer(ctx.ReadBuffer(1));
LOG_DEBUG(Service_FS, "called. file '{}' to file '{}'", src_name, dst_name);
@@ -1086,7 +1083,7 @@ void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
}
void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) {
- const auto raw = ctx.ReadBuffer();
+ const auto raw = ctx.ReadBufferCopy();
auto log = Common::StringFromFixedZeroTerminatedBuffer(
reinterpret_cast<const char*>(raw.data()), raw.size());
diff --git a/src/core/hle/service/glue/arp.cpp b/src/core/hle/service/glue/arp.cpp
index 49b6d45fe..ce21b69e3 100644
--- a/src/core/hle/service/glue/arp.cpp
+++ b/src/core/hle/service/glue/arp.cpp
@@ -228,7 +228,8 @@ private:
return;
}
- control = ctx.ReadBuffer();
+ // TODO: Can this be a span?
+ control = ctx.ReadBufferCopy();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 5713f1288..513ea485a 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -428,6 +428,9 @@ void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
return;
}
+ // This function is unique to yuzu for the turbo buttons to work properly
+ controller.device->TurboButtonUpdate();
+
auto& pad_entry = controller.npad_pad_state;
auto& trigger_entry = controller.npad_trigger_state;
const auto button_state = controller.device->GetNpadButtons();
@@ -755,11 +758,12 @@ Core::HID::NpadStyleTag Controller_NPad::GetSupportedStyleSet() const {
return hid_core.GetSupportedStyleTag();
}
-void Controller_NPad::SetSupportedNpadIdTypes(u8* data, std::size_t length) {
+void Controller_NPad::SetSupportedNpadIdTypes(std::span<const u8> data) {
+ const auto length = data.size();
ASSERT(length > 0 && (length % sizeof(u32)) == 0);
supported_npad_id_types.clear();
supported_npad_id_types.resize(length / sizeof(u32));
- std::memcpy(supported_npad_id_types.data(), data, length);
+ std::memcpy(supported_npad_id_types.data(), data.data(), length);
}
void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index 1a589cca2..1f7d33459 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -6,6 +6,7 @@
#include <array>
#include <atomic>
#include <mutex>
+#include <span>
#include "common/bit_field.h"
#include "common/common_types.h"
@@ -95,7 +96,7 @@ public:
void SetSupportedStyleSet(Core::HID::NpadStyleTag style_set);
Core::HID::NpadStyleTag GetSupportedStyleSet() const;
- void SetSupportedNpadIdTypes(u8* data, std::size_t length);
+ void SetSupportedNpadIdTypes(std::span<const u8> data);
void GetSupportedNpadIdTypes(u32* data, std::size_t max_length);
std::size_t GetSupportedNpadIdTypesSize() const;
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index bf28440c6..f15f1a6bb 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -1026,7 +1026,7 @@ void Hid::SetSupportedNpadIdType(Kernel::HLERequestContext& ctx) {
const auto applet_resource_user_id{rp.Pop<u64>()};
applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetSupportedNpadIdTypes(ctx.ReadBuffer().data(), ctx.GetReadBufferSize());
+ .SetSupportedNpadIdTypes(ctx.ReadBuffer());
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
@@ -2104,7 +2104,7 @@ void Hid::WritePalmaRgbLedPatternEntry(Kernel::HLERequestContext& ctx) {
const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
const auto unknown{rp.Pop<u64>()};
- const auto buffer = ctx.ReadBuffer();
+ [[maybe_unused]] const auto buffer = ctx.ReadBuffer();
LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, unknown={}",
connection_handle.npad_id, unknown);
diff --git a/src/core/hle/service/hid/hidbus/hidbus_base.h b/src/core/hle/service/hid/hidbus/hidbus_base.h
index d3960f506..65e301137 100644
--- a/src/core/hle/service/hid/hidbus/hidbus_base.h
+++ b/src/core/hle/service/hid/hidbus/hidbus_base.h
@@ -4,6 +4,7 @@
#pragma once
#include <array>
+#include <span>
#include "common/common_types.h"
#include "core/hle/result.h"
@@ -150,7 +151,7 @@ public:
}
// Assigns a command from data
- virtual bool SetCommand(const std::vector<u8>& data) {
+ virtual bool SetCommand(std::span<const u8> data) {
return {};
}
diff --git a/src/core/hle/service/hid/hidbus/ringcon.cpp b/src/core/hle/service/hid/hidbus/ringcon.cpp
index 78ed47014..35847cbdd 100644
--- a/src/core/hle/service/hid/hidbus/ringcon.cpp
+++ b/src/core/hle/service/hid/hidbus/ringcon.cpp
@@ -116,7 +116,7 @@ std::vector<u8> RingController::GetReply() const {
}
}
-bool RingController::SetCommand(const std::vector<u8>& data) {
+bool RingController::SetCommand(std::span<const u8> data) {
if (data.size() < 4) {
LOG_ERROR(Service_HID, "Command size not supported {}", data.size());
command = RingConCommands::Error;
diff --git a/src/core/hle/service/hid/hidbus/ringcon.h b/src/core/hle/service/hid/hidbus/ringcon.h
index 845ce85a5..c2fb386b1 100644
--- a/src/core/hle/service/hid/hidbus/ringcon.h
+++ b/src/core/hle/service/hid/hidbus/ringcon.h
@@ -4,6 +4,7 @@
#pragma once
#include <array>
+#include <span>
#include "common/common_types.h"
#include "core/hle/service/hid/hidbus/hidbus_base.h"
@@ -31,7 +32,7 @@ public:
u8 GetDeviceId() const override;
// Assigns a command from data
- bool SetCommand(const std::vector<u8>& data) override;
+ bool SetCommand(std::span<const u8> data) override;
// Returns a reply from a command
std::vector<u8> GetReply() const override;
diff --git a/src/core/hle/service/hid/hidbus/starlink.cpp b/src/core/hle/service/hid/hidbus/starlink.cpp
index dd439f60a..d0e760314 100644
--- a/src/core/hle/service/hid/hidbus/starlink.cpp
+++ b/src/core/hle/service/hid/hidbus/starlink.cpp
@@ -42,7 +42,7 @@ std::vector<u8> Starlink::GetReply() const {
return {};
}
-bool Starlink::SetCommand(const std::vector<u8>& data) {
+bool Starlink::SetCommand(std::span<const u8> data) {
LOG_ERROR(Service_HID, "Command not implemented");
return false;
}
diff --git a/src/core/hle/service/hid/hidbus/starlink.h b/src/core/hle/service/hid/hidbus/starlink.h
index 0b1b7ba49..07c800e6e 100644
--- a/src/core/hle/service/hid/hidbus/starlink.h
+++ b/src/core/hle/service/hid/hidbus/starlink.h
@@ -29,7 +29,7 @@ public:
u8 GetDeviceId() const override;
// Assigns a command from data
- bool SetCommand(const std::vector<u8>& data) override;
+ bool SetCommand(std::span<const u8> data) override;
// Returns a reply from a command
std::vector<u8> GetReply() const override;
diff --git a/src/core/hle/service/hid/hidbus/stubbed.cpp b/src/core/hle/service/hid/hidbus/stubbed.cpp
index e477443e3..07632c872 100644
--- a/src/core/hle/service/hid/hidbus/stubbed.cpp
+++ b/src/core/hle/service/hid/hidbus/stubbed.cpp
@@ -43,7 +43,7 @@ std::vector<u8> HidbusStubbed::GetReply() const {
return {};
}
-bool HidbusStubbed::SetCommand(const std::vector<u8>& data) {
+bool HidbusStubbed::SetCommand(std::span<const u8> data) {
LOG_ERROR(Service_HID, "Command not implemented");
return false;
}
diff --git a/src/core/hle/service/hid/hidbus/stubbed.h b/src/core/hle/service/hid/hidbus/stubbed.h
index 91165ceff..38eaa0ecc 100644
--- a/src/core/hle/service/hid/hidbus/stubbed.h
+++ b/src/core/hle/service/hid/hidbus/stubbed.h
@@ -29,7 +29,7 @@ public:
u8 GetDeviceId() const override;
// Assigns a command from data
- bool SetCommand(const std::vector<u8>& data) override;
+ bool SetCommand(std::span<const u8> data) override;
// Returns a reply from a command
std::vector<u8> GetReply() const override;
diff --git a/src/core/hle/service/jit/jit.cpp b/src/core/hle/service/jit/jit.cpp
index 8f2920c51..1295a44c7 100644
--- a/src/core/hle/service/jit/jit.cpp
+++ b/src/core/hle/service/jit/jit.cpp
@@ -62,7 +62,7 @@ public:
const auto parameters{rp.PopRaw<InputParameters>()};
// Optional input/output buffers
- std::vector<u8> input_buffer{ctx.CanReadBuffer() ? ctx.ReadBuffer() : std::vector<u8>()};
+ const auto input_buffer{ctx.CanReadBuffer() ? ctx.ReadBuffer() : std::span<const u8>()};
std::vector<u8> output_buffer(ctx.CanWriteBuffer() ? ctx.GetWriteBufferSize() : 0);
// Function call prototype:
@@ -132,7 +132,7 @@ public:
const auto command{rp.PopRaw<u64>()};
// Optional input/output buffers
- std::vector<u8> input_buffer{ctx.CanReadBuffer() ? ctx.ReadBuffer() : std::vector<u8>()};
+ const auto input_buffer{ctx.CanReadBuffer() ? ctx.ReadBuffer() : std::span<const u8>()};
std::vector<u8> output_buffer(ctx.CanWriteBuffer() ? ctx.GetWriteBufferSize() : 0);
// Function call prototype:
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index c49c61cff..e5099d61f 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -412,7 +412,7 @@ public:
}
void SetAdvertiseData(Kernel::HLERequestContext& ctx) {
- std::vector<u8> read_buffer = ctx.ReadBuffer();
+ const auto read_buffer = ctx.ReadBuffer();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(lan_discovery.SetAdvertiseData(read_buffer));
@@ -464,7 +464,7 @@ public:
parameters.security_config.passphrase_size,
parameters.security_config.security_mode, parameters.local_communication_version);
- const std::vector<u8> read_buffer = ctx.ReadBuffer();
+ const auto read_buffer = ctx.ReadBuffer();
if (read_buffer.size() != sizeof(NetworkInfo)) {
LOG_ERROR(Frontend, "NetworkInfo doesn't match read_buffer size!");
IPC::ResponseBuilder rb{ctx, 2};
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index 204b0e757..c562e04d2 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -3,7 +3,9 @@
#pragma once
+#include <span>
#include <vector>
+
#include "common/common_types.h"
#include "core/hle/service/nvdrv/nvdata.h"
@@ -31,7 +33,7 @@ public:
* @param output A buffer where the output data will be written to.
* @returns The result code of the ioctl.
*/
- virtual NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ virtual NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) = 0;
/**
@@ -42,8 +44,8 @@ public:
* @param output A buffer where the output data will be written to.
* @returns The result code of the ioctl.
*/
- virtual NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) = 0;
+ virtual NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) = 0;
/**
* Handles an ioctl3 request.
@@ -53,7 +55,7 @@ public:
* @param inline_output A buffer where the inlined output data will be written to.
* @returns The result code of the ioctl.
*/
- virtual NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ virtual NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) = 0;
/**
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 4122fc98d..5a5b2e305 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -17,19 +17,19 @@ nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
: nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
nvdisp_disp0::~nvdisp_disp0() = default;
-NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvdisp_disp0::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvdisp_disp0::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index 04217ab12..81bd7960a 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -25,12 +25,12 @@ public:
explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core);
~nvdisp_disp0() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index b635e6ed1..681bd0867 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -27,7 +27,7 @@ nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Con
nvhost_as_gpu::~nvhost_as_gpu() = default;
-NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 'A':
@@ -60,13 +60,13 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>
return NvResult::NotImplemented;
}
-NvResult nvhost_as_gpu::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
switch (command.group) {
case 'A':
@@ -87,7 +87,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::vector<u8>& output) {
IoctlAllocAsEx params{};
std::memcpy(&params, input.data(), input.size());
@@ -141,7 +141,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
return NvResult::Success;
}
-NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::vector<u8>& output) {
IoctlAllocSpace params{};
std::memcpy(&params, input.data(), input.size());
@@ -220,7 +220,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
mapping_map.erase(offset);
}
-NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::vector<u8>& output) {
IoctlFreeSpace params{};
std::memcpy(&params, input.data(), input.size());
@@ -266,7 +266,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
return NvResult::Success;
}
-NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::vector<u8>& output) {
const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
@@ -320,7 +320,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
return NvResult::Success;
}
-NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::vector<u8>& output) {
IoctlMapBufferEx params{};
std::memcpy(&params, input.data(), input.size());
@@ -424,7 +424,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
return NvResult::Success;
}
-NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::vector<u8>& output) {
IoctlUnmapBuffer params{};
std::memcpy(&params, input.data(), input.size());
@@ -463,7 +463,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
return NvResult::Success;
}
-NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::BindChannel(std::span<const u8> input, std::vector<u8>& output) {
IoctlBindChannel params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
@@ -492,7 +492,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
};
}
-NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::vector<u8>& output) {
IoctlGetVaRegions params{};
std::memcpy(&params, input.data(), input.size());
@@ -511,7 +511,7 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u
return NvResult::Success;
}
-NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
+NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::vector<u8>& output,
std::vector<u8>& inline_output) {
IoctlGetVaRegions params{};
std::memcpy(&params, input.data(), input.size());
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 86fe71c75..1aba8d579 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -47,12 +47,12 @@ public:
explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core);
~nvhost_as_gpu() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
@@ -138,17 +138,17 @@ private:
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
"IoctlGetVaRegions is incorrect size");
- NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult AllocAsEx(std::span<const u8> input, std::vector<u8>& output);
+ NvResult AllocateSpace(std::span<const u8> input, std::vector<u8>& output);
+ NvResult Remap(std::span<const u8> input, std::vector<u8>& output);
+ NvResult MapBufferEx(std::span<const u8> input, std::vector<u8>& output);
+ NvResult UnmapBuffer(std::span<const u8> input, std::vector<u8>& output);
+ NvResult FreeSpace(std::span<const u8> input, std::vector<u8>& output);
+ NvResult BindChannel(std::span<const u8> input, std::vector<u8>& output);
void GetVARegionsImpl(IoctlGetVaRegions& params);
- NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
+ NvResult GetVARegions(std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetVARegions(std::span<const u8> input, std::vector<u8>& output,
std::vector<u8>& inline_output);
void FreeMappingLocked(u64 offset);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index eee11fab8..0cdde82a7 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -34,7 +34,7 @@ nvhost_ctrl::~nvhost_ctrl() {
}
}
-NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 0x0:
@@ -63,13 +63,13 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
return NvResult::NotImplemented;
}
-NvResult nvhost_ctrl::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_ctrl::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_outpu) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
@@ -79,7 +79,7 @@ void nvhost_ctrl::OnOpen(DeviceFD fd) {}
void nvhost_ctrl::OnClose(DeviceFD fd) {}
-NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl::NvOsGetConfigU32(std::span<const u8> input, std::vector<u8>& output) {
IocGetConfigParams params{};
std::memcpy(&params, input.data(), sizeof(params));
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
@@ -87,7 +87,7 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector
return NvResult::ConfigVarNotFound; // Returns error on production mode
}
-NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
+NvResult nvhost_ctrl::IocCtrlEventWait(std::span<const u8> input, std::vector<u8>& output,
bool is_allocation) {
IocCtrlEventWaitParams params{};
std::memcpy(&params, input.data(), sizeof(params));
@@ -231,7 +231,7 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) {
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::vector<u8>& output) {
IocCtrlEventRegisterParams params{};
std::memcpy(&params, input.data(), sizeof(params));
const u32 event_id = params.user_event_id;
@@ -252,8 +252,7 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::ve
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
- std::vector<u8>& output) {
+NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::vector<u8>& output) {
IocCtrlEventUnregisterParams params{};
std::memcpy(&params, input.data(), sizeof(params));
const u32 event_id = params.user_event_id & 0x00FF;
@@ -263,7 +262,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
return FreeEvent(event_id);
}
-NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input,
+NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input,
std::vector<u8>& output) {
IocCtrlEventUnregisterBatchParams params{};
std::memcpy(&params, input.data(), sizeof(params));
@@ -282,7 +281,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input,
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl::IocCtrlClearEventWait(std::span<const u8> input, std::vector<u8>& output) {
IocCtrlEventClearParams params{};
std::memcpy(&params, input.data(), sizeof(params));
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 0b56d7070..dd2e7888a 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -25,12 +25,12 @@ public:
NvCore::Container& core);
~nvhost_ctrl() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
@@ -186,13 +186,13 @@ private:
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
"IocCtrlEventKill is incorrect size");
- NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
+ NvResult NvOsGetConfigU32(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocCtrlEventWait(std::span<const u8> input, std::vector<u8>& output,
bool is_allocation);
- NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult IocCtrlEventRegister(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocCtrlEventUnregister(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocCtrlEventUnregisterBatch(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocCtrlClearEventWait(std::span<const u8> input, std::vector<u8>& output);
NvResult FreeEvent(u32 slot);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index b97813fbc..be3c083db 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -21,7 +21,7 @@ nvhost_ctrl_gpu::~nvhost_ctrl_gpu() {
events_interface.FreeEvent(unknown_event);
}
-NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 'G':
@@ -53,13 +53,13 @@ NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u
return NvResult::NotImplemented;
}
-NvResult nvhost_ctrl_gpu::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
switch (command.group) {
case 'G':
@@ -82,8 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input,
- std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlCharacteristics params{};
std::memcpy(&params, input.data(), input.size());
@@ -128,7 +127,7 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input,
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
+NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::vector<u8>& output,
std::vector<u8>& inline_output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlCharacteristics params{};
@@ -176,7 +175,7 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::vector<u8>& output) {
IoctlGpuGetTpcMasksArgs params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
@@ -187,7 +186,7 @@ NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
+NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::vector<u8>& output,
std::vector<u8>& inline_output) {
IoctlGpuGetTpcMasksArgs params{};
std::memcpy(&params, input.data(), input.size());
@@ -200,7 +199,7 @@ NvResult nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::GetActiveSlotMask(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlActiveSlotMask params{};
@@ -213,7 +212,7 @@ NvResult nvhost_ctrl_gpu::GetActiveSlotMask(const std::vector<u8>& input, std::v
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlZcullGetCtxSize params{};
@@ -225,7 +224,7 @@ NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(const std::vector<u8>& input, std::vec
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlNvgpuGpuZcullGetInfoArgs params{};
@@ -248,7 +247,7 @@ NvResult nvhost_ctrl_gpu::ZCullGetInfo(const std::vector<u8>& input, std::vector
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZBCSetTable(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::ZBCSetTable(std::span<const u8> input, std::vector<u8>& output) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
IoctlZbcSetTable params{};
@@ -264,7 +263,7 @@ NvResult nvhost_ctrl_gpu::ZBCSetTable(const std::vector<u8>& input, std::vector<
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::ZBCQueryTable(std::span<const u8> input, std::vector<u8>& output) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
IoctlZbcQueryTable params{};
@@ -274,7 +273,7 @@ NvResult nvhost_ctrl_gpu::ZBCQueryTable(const std::vector<u8>& input, std::vecto
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::FlushL2(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::FlushL2(std::span<const u8> input, std::vector<u8>& output) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
IoctlFlushL2 params{};
@@ -284,7 +283,7 @@ NvResult nvhost_ctrl_gpu::FlushL2(const std::vector<u8>& input, std::vector<u8>&
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_ctrl_gpu::GetGpuTime(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlGetGpuTime params{};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 1e8f254e2..b9333d9d3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -21,12 +21,12 @@ public:
explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_);
~nvhost_ctrl_gpu() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
@@ -151,21 +151,21 @@ private:
};
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
- NvResult GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
+ NvResult GetCharacteristics(std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetCharacteristics(std::span<const u8> input, std::vector<u8>& output,
std::vector<u8>& inline_output);
- NvResult GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
+ NvResult GetTPCMasks(std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetTPCMasks(std::span<const u8> input, std::vector<u8>& output,
std::vector<u8>& inline_output);
- NvResult GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ZBCSetTable(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult GetActiveSlotMask(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ZCullGetCtxSize(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ZCullGetInfo(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ZBCSetTable(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ZBCQueryTable(std::span<const u8> input, std::vector<u8>& output);
+ NvResult FlushL2(std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetGpuTime(std::span<const u8> input, std::vector<u8>& output);
EventInterface& events_interface;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index e123564c6..d2308fffc 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -46,7 +46,7 @@ nvhost_gpu::~nvhost_gpu() {
syncpoint_manager.FreeSyncpoint(channel_syncpoint);
}
-NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 0x0:
@@ -98,8 +98,8 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
return NvResult::NotImplemented;
};
-NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
switch (command.group) {
case 'H':
switch (command.cmd) {
@@ -112,7 +112,7 @@ NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& i
return NvResult::NotImplemented;
}
-NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
@@ -121,7 +121,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
void nvhost_gpu::OnOpen(DeviceFD fd) {}
void nvhost_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_gpu::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::SetNVMAPfd(std::span<const u8> input, std::vector<u8>& output) {
IoctlSetNvmapFD params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
@@ -130,7 +130,7 @@ NvResult nvhost_gpu::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& o
return NvResult::Success;
}
-NvResult nvhost_gpu::SetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::SetClientData(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlClientData params{};
@@ -139,7 +139,7 @@ NvResult nvhost_gpu::SetClientData(const std::vector<u8>& input, std::vector<u8>
return NvResult::Success;
}
-NvResult nvhost_gpu::GetClientData(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::GetClientData(std::span<const u8> input, std::vector<u8>& output) {
LOG_DEBUG(Service_NVDRV, "called");
IoctlClientData params{};
@@ -149,7 +149,7 @@ NvResult nvhost_gpu::GetClientData(const std::vector<u8>& input, std::vector<u8>
return NvResult::Success;
}
-NvResult nvhost_gpu::ZCullBind(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::ZCullBind(std::span<const u8> input, std::vector<u8>& output) {
std::memcpy(&zcull_params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
zcull_params.mode);
@@ -158,7 +158,7 @@ NvResult nvhost_gpu::ZCullBind(const std::vector<u8>& input, std::vector<u8>& ou
return NvResult::Success;
}
-NvResult nvhost_gpu::SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::SetErrorNotifier(std::span<const u8> input, std::vector<u8>& output) {
IoctlSetErrorNotifier params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
@@ -168,14 +168,14 @@ NvResult nvhost_gpu::SetErrorNotifier(const std::vector<u8>& input, std::vector<
return NvResult::Success;
}
-NvResult nvhost_gpu::SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::SetChannelPriority(std::span<const u8> input, std::vector<u8>& output) {
std::memcpy(&channel_priority, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
return NvResult::Success;
}
-NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::vector<u8>& output) {
IoctlAllocGpfifoEx2 params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV,
@@ -197,7 +197,7 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
return NvResult::Success;
}
-NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::AllocateObjectContext(std::span<const u8> input, std::vector<u8>& output) {
IoctlAllocObjCtx params{};
std::memcpy(&params, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
@@ -293,7 +293,7 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
return NvResult::Success;
}
-NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<u8>& output,
+NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::vector<u8>& output,
bool kickoff) {
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
UNIMPLEMENTED();
@@ -314,8 +314,7 @@ NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<
return SubmitGPFIFOImpl(params, output, std::move(entries));
}
-NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input,
- const std::vector<u8>& input_inline,
+NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
std::vector<u8>& output) {
if (input.size() < sizeof(IoctlSubmitGpfifo)) {
UNIMPLEMENTED();
@@ -328,7 +327,7 @@ NvResult nvhost_gpu::SubmitGPFIFOBase(const std::vector<u8>& input,
return SubmitGPFIFOImpl(params, output, std::move(entries));
}
-NvResult nvhost_gpu::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::GetWaitbase(std::span<const u8> input, std::vector<u8>& output) {
IoctlGetWaitbase params{};
std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
@@ -338,7 +337,7 @@ NvResult nvhost_gpu::GetWaitbase(const std::vector<u8>& input, std::vector<u8>&
return NvResult::Success;
}
-NvResult nvhost_gpu::ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::ChannelSetTimeout(std::span<const u8> input, std::vector<u8>& output) {
IoctlChannelSetTimeout params{};
std::memcpy(&params, input.data(), sizeof(IoctlChannelSetTimeout));
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
@@ -346,7 +345,7 @@ NvResult nvhost_gpu::ChannelSetTimeout(const std::vector<u8>& input, std::vector
return NvResult::Success;
}
-NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_gpu::ChannelSetTimeslice(std::span<const u8> input, std::vector<u8>& output) {
IoctlSetTimeslice params{};
std::memcpy(&params, input.data(), sizeof(IoctlSetTimeslice));
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 1e4ecd55b..3ca58202d 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -40,12 +40,12 @@ public:
NvCore::Container& core);
~nvhost_gpu() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
@@ -186,23 +186,23 @@ private:
u32_le channel_priority{};
u32_le channel_timeslice{};
- NvResult SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult SetClientData(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetClientData(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ZCullBind(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult SetErrorNotifier(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult SetChannelPriority(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult AllocateObjectContext(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult SetNVMAPfd(std::span<const u8> input, std::vector<u8>& output);
+ NvResult SetClientData(std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetClientData(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ZCullBind(std::span<const u8> input, std::vector<u8>& output);
+ NvResult SetErrorNotifier(std::span<const u8> input, std::vector<u8>& output);
+ NvResult SetChannelPriority(std::span<const u8> input, std::vector<u8>& output);
+ NvResult AllocGPFIFOEx2(std::span<const u8> input, std::vector<u8>& output);
+ NvResult AllocateObjectContext(std::span<const u8> input, std::vector<u8>& output);
NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>& output,
Tegra::CommandList&& entries);
- NvResult SubmitGPFIFOBase(const std::vector<u8>& input, std::vector<u8>& output,
+ NvResult SubmitGPFIFOBase(std::span<const u8> input, std::vector<u8>& output,
bool kickoff = false);
- NvResult SubmitGPFIFOBase(const std::vector<u8>& input, const std::vector<u8>& input_inline,
+ NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
std::vector<u8>& output);
- NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult GetWaitbase(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ChannelSetTimeout(std::span<const u8> input, std::vector<u8>& output);
+ NvResult ChannelSetTimeslice(std::span<const u8> input, std::vector<u8>& output);
EventInterface& events_interface;
NvCore::Container& core;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index 1703f9cc3..0c7aee1b8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -15,7 +15,7 @@ nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
nvhost_nvdec::~nvhost_nvdec() = default;
-NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 0x0:
@@ -55,13 +55,13 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
return NvResult::NotImplemented;
}
-NvResult nvhost_nvdec::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_nvdec::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
index c1b4e53e8..0d615bbcb 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
@@ -13,12 +13,12 @@ public:
explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core);
~nvhost_nvdec() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 99eede702..7bcef105b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -23,7 +23,7 @@ namespace {
// Copies count amount of type T from the input vector into the dst vector.
// Returns the number of bytes written into dst.
template <typename T>
-std::size_t SliceVectors(const std::vector<u8>& input, std::vector<T>& dst, std::size_t count,
+std::size_t SliceVectors(std::span<const u8> input, std::vector<T>& dst, std::size_t count,
std::size_t offset) {
if (dst.empty()) {
return 0;
@@ -63,7 +63,7 @@ nvhost_nvdec_common::~nvhost_nvdec_common() {
core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
}
-NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
+NvResult nvhost_nvdec_common::SetNVMAPfd(std::span<const u8> input) {
IoctlSetNvmapFD params{};
std::memcpy(&params, input.data(), sizeof(IoctlSetNvmapFD));
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
@@ -72,7 +72,7 @@ NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
+NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input,
std::vector<u8>& output) {
IoctlSubmit params{};
std::memcpy(&params, input.data(), sizeof(IoctlSubmit));
@@ -121,7 +121,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_nvdec_common::GetSyncpoint(std::span<const u8> input, std::vector<u8>& output) {
IoctlGetSyncpoint params{};
std::memcpy(&params, input.data(), sizeof(IoctlGetSyncpoint));
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
@@ -133,7 +133,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_nvdec_common::GetWaitbase(std::span<const u8> input, std::vector<u8>& output) {
IoctlGetWaitbase params{};
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
@@ -142,7 +142,7 @@ NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vec
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_nvdec_common::MapBuffer(std::span<const u8> input, std::vector<u8>& output) {
IoctlMapBuffer params{};
std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
@@ -159,7 +159,7 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_nvdec_common::UnmapBuffer(std::span<const u8> input, std::vector<u8>& output) {
IoctlMapBuffer params{};
std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
@@ -173,8 +173,7 @@ NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vec
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
- std::vector<u8>& output) {
+NvResult nvhost_nvdec_common::SetSubmitTimeout(std::span<const u8> input, std::vector<u8>& output) {
std::memcpy(&submit_timeout, input.data(), input.size());
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
return NvResult::Success;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index fe76100c8..5af26a26f 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -107,13 +107,13 @@ protected:
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
/// Ioctl command implementations
- NvResult SetNVMAPfd(const std::vector<u8>& input);
- NvResult Submit(DeviceFD fd, const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult SetNVMAPfd(std::span<const u8> input);
+ NvResult Submit(DeviceFD fd, std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetSyncpoint(std::span<const u8> input, std::vector<u8>& output);
+ NvResult GetWaitbase(std::span<const u8> input, std::vector<u8>& output);
+ NvResult MapBuffer(std::span<const u8> input, std::vector<u8>& output);
+ NvResult UnmapBuffer(std::span<const u8> input, std::vector<u8>& output);
+ NvResult SetSubmitTimeout(std::span<const u8> input, std::vector<u8>& output);
Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
index bdbc2f9e1..39f30e7c8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
@@ -12,7 +12,7 @@ namespace Service::Nvidia::Devices {
nvhost_nvjpg::nvhost_nvjpg(Core::System& system_) : nvdevice{system_} {}
nvhost_nvjpg::~nvhost_nvjpg() = default;
-NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 'H':
@@ -31,13 +31,13 @@ NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
return NvResult::NotImplemented;
}
-NvResult nvhost_nvjpg::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_nvjpg::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
@@ -46,7 +46,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
-NvResult nvhost_nvjpg::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvhost_nvjpg::SetNVMAPfd(std::span<const u8> input, std::vector<u8>& output) {
IoctlSetNvmapFD params{};
std::memcpy(&params, input.data(), input.size());
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
index 440e7d371..41b57e872 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
@@ -15,12 +15,12 @@ public:
explicit nvhost_nvjpg(Core::System& system_);
~nvhost_nvjpg() override;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
@@ -33,7 +33,7 @@ private:
s32_le nvmap_fd{};
- NvResult SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult SetNVMAPfd(std::span<const u8> input, std::vector<u8>& output);
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index 73f97136e..b0ea402a7 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -15,7 +15,7 @@ nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
nvhost_vic::~nvhost_vic() = default;
-NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 0x0:
@@ -55,13 +55,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
return NvResult::NotImplemented;
}
-NvResult nvhost_vic::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvhost_vic::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
index f164caafb..b5e350a83 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
@@ -12,12 +12,12 @@ public:
explicit nvhost_vic(Core::System& system_, NvCore::Container& core);
~nvhost_vic();
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index fa29db758..29c1e0f01 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -25,7 +25,7 @@ nvmap::nvmap(Core::System& system_, NvCore::Container& container_)
nvmap::~nvmap() = default;
-NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
switch (command.group) {
case 0x1:
@@ -54,13 +54,13 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
return NvResult::NotImplemented;
}
-NvResult nvmap::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult nvmap::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
}
-NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
UNIMPLEMENTED_MSG("Unimplemented ioctl={:08X}", command.raw);
return NvResult::NotImplemented;
@@ -69,7 +69,7 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
void nvmap::OnOpen(DeviceFD fd) {}
void nvmap::OnClose(DeviceFD fd) {}
-NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvmap::IocCreate(std::span<const u8> input, std::vector<u8>& output) {
IocCreateParams params;
std::memcpy(&params, input.data(), sizeof(params));
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
@@ -89,7 +89,7 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
return NvResult::Success;
}
-NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvmap::IocAlloc(std::span<const u8> input, std::vector<u8>& output) {
IocAllocParams params;
std::memcpy(&params, input.data(), sizeof(params));
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
@@ -137,7 +137,7 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
return result;
}
-NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvmap::IocGetId(std::span<const u8> input, std::vector<u8>& output) {
IocGetIdParams params;
std::memcpy(&params, input.data(), sizeof(params));
@@ -161,7 +161,7 @@ NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output)
return NvResult::Success;
}
-NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvmap::IocFromId(std::span<const u8> input, std::vector<u8>& output) {
IocFromIdParams params;
std::memcpy(&params, input.data(), sizeof(params));
@@ -192,7 +192,7 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
return NvResult::Success;
}
-NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvmap::IocParam(std::span<const u8> input, std::vector<u8>& output) {
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
IocParamParams params;
@@ -241,7 +241,7 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
return NvResult::Success;
}
-NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
+NvResult nvmap::IocFree(std::span<const u8> input, std::vector<u8>& output) {
IocFreeParams params;
std::memcpy(&params, input.data(), sizeof(params));
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index e9bfd0358..82bd3b118 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -26,12 +26,12 @@ public:
nvmap(const nvmap&) = delete;
nvmap& operator=(const nvmap&) = delete;
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) override;
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) override;
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output) override;
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) override;
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output) override;
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
@@ -106,12 +106,12 @@ private:
};
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
- NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult IocCreate(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocAlloc(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocGetId(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocFromId(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocParam(std::span<const u8> input, std::vector<u8>& output);
+ NvResult IocFree(std::span<const u8> input, std::vector<u8>& output);
NvCore::Container& container;
NvCore::NvMap& file;
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 6fc8565c0..52d27e755 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -124,7 +124,7 @@ DeviceFD Module::Open(const std::string& device_name) {
return fd;
}
-NvResult Module::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult Module::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output) {
if (fd < 0) {
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
@@ -141,8 +141,8 @@ NvResult Module::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input
return itr->second->Ioctl1(fd, command, input, output);
}
-NvResult Module::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output) {
+NvResult Module::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output) {
if (fd < 0) {
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
return NvResult::InvalidState;
@@ -158,7 +158,7 @@ NvResult Module::Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input
return itr->second->Ioctl2(fd, command, input, inline_input, output);
}
-NvResult Module::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
+NvResult Module::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input,
std::vector<u8>& output, std::vector<u8>& inline_output) {
if (fd < 0) {
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index f3c81bd88..b09b6e585 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -7,6 +7,7 @@
#include <functional>
#include <list>
#include <memory>
+#include <span>
#include <string>
#include <unordered_map>
#include <vector>
@@ -79,14 +80,13 @@ public:
DeviceFD Open(const std::string& device_name);
/// Sends an ioctl command to the specified file descriptor.
- NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output);
+ NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output);
- NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- const std::vector<u8>& inline_input, std::vector<u8>& output);
+ NvResult Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> input,
+ std::span<const u8> inline_input, std::vector<u8>& output);
- NvResult Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
- std::vector<u8>& output, std::vector<u8>& inline_output);
+ NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::vector<u8>& output,
+ std::vector<u8>& inline_output);
/// Closes a device file descriptor and returns operation success.
NvResult Close(DeviceFD fd);
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index e601b5da1..bcbe05b0d 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -815,8 +815,8 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
void BufferQueueProducer::Transact(Kernel::HLERequestContext& ctx, TransactionId code, u32 flags) {
Status status{Status::NoError};
- Parcel parcel_in{ctx.ReadBuffer()};
- Parcel parcel_out{};
+ InputParcel parcel_in{ctx.ReadBuffer()};
+ OutputParcel parcel_out{};
switch (code) {
case TransactionId::Connect: {
diff --git a/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp b/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp
index 4043c91f1..769e8c0a3 100644
--- a/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp
+++ b/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp
@@ -9,7 +9,7 @@
namespace Service::android {
-QueueBufferInput::QueueBufferInput(Parcel& parcel) {
+QueueBufferInput::QueueBufferInput(InputParcel& parcel) {
parcel.ReadFlattened(*this);
}
diff --git a/src/core/hle/service/nvflinger/graphic_buffer_producer.h b/src/core/hle/service/nvflinger/graphic_buffer_producer.h
index 6ea327bbe..2969f0fd5 100644
--- a/src/core/hle/service/nvflinger/graphic_buffer_producer.h
+++ b/src/core/hle/service/nvflinger/graphic_buffer_producer.h
@@ -14,11 +14,11 @@
namespace Service::android {
-class Parcel;
+class InputParcel;
#pragma pack(push, 1)
struct QueueBufferInput final {
- explicit QueueBufferInput(Parcel& parcel);
+ explicit QueueBufferInput(InputParcel& parcel);
void Deflate(s64* timestamp_, bool* is_auto_timestamp_, Common::Rectangle<s32>* crop_,
NativeWindowScalingMode* scaling_mode_, NativeWindowTransform* transform_,
diff --git a/src/core/hle/service/nvflinger/parcel.h b/src/core/hle/service/nvflinger/parcel.h
index f3fa2587d..d1b6201e0 100644
--- a/src/core/hle/service/nvflinger/parcel.h
+++ b/src/core/hle/service/nvflinger/parcel.h
@@ -4,6 +4,7 @@
#pragma once
#include <memory>
+#include <span>
#include <vector>
#include "common/alignment.h"
@@ -12,18 +13,17 @@
namespace Service::android {
-class Parcel final {
-public:
- static constexpr std::size_t DefaultBufferSize = 0x40;
-
- Parcel() : buffer(DefaultBufferSize) {}
-
- template <typename T>
- explicit Parcel(const T& out_data) : buffer(DefaultBufferSize) {
- Write(out_data);
- }
+struct ParcelHeader {
+ u32 data_size;
+ u32 data_offset;
+ u32 objects_size;
+ u32 objects_offset;
+};
+static_assert(sizeof(ParcelHeader) == 16, "ParcelHeader has wrong size");
- explicit Parcel(std::vector<u8> in_data) : buffer(std::move(in_data)) {
+class InputParcel final {
+public:
+ explicit InputParcel(std::span<const u8> in_data) : read_buffer(std::move(in_data)) {
DeserializeHeader();
[[maybe_unused]] const std::u16string token = ReadInterfaceToken();
}
@@ -31,9 +31,9 @@ public:
template <typename T>
void Read(T& val) {
static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable.");
- ASSERT(read_index + sizeof(T) <= buffer.size());
+ ASSERT(read_index + sizeof(T) <= read_buffer.size());
- std::memcpy(&val, buffer.data() + read_index, sizeof(T));
+ std::memcpy(&val, read_buffer.data() + read_index, sizeof(T));
read_index += sizeof(T);
read_index = Common::AlignUp(read_index, 4);
}
@@ -62,10 +62,10 @@ public:
template <typename T>
T ReadUnaligned() {
static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable.");
- ASSERT(read_index + sizeof(T) <= buffer.size());
+ ASSERT(read_index + sizeof(T) <= read_buffer.size());
T val;
- std::memcpy(&val, buffer.data() + read_index, sizeof(T));
+ std::memcpy(&val, read_buffer.data() + read_index, sizeof(T));
read_index += sizeof(T);
return val;
}
@@ -101,6 +101,31 @@ public:
return token;
}
+ void DeserializeHeader() {
+ ASSERT(read_buffer.size() > sizeof(ParcelHeader));
+
+ ParcelHeader header{};
+ std::memcpy(&header, read_buffer.data(), sizeof(ParcelHeader));
+
+ read_index = header.data_offset;
+ }
+
+private:
+ std::span<const u8> read_buffer;
+ std::size_t read_index = 0;
+};
+
+class OutputParcel final {
+public:
+ static constexpr std::size_t DefaultBufferSize = 0x40;
+
+ OutputParcel() : buffer(DefaultBufferSize) {}
+
+ template <typename T>
+ explicit OutputParcel(const T& out_data) : buffer(DefaultBufferSize) {
+ Write(out_data);
+ }
+
template <typename T>
void Write(const T& val) {
static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable.");
@@ -133,40 +158,20 @@ public:
WriteObject(ptr.get());
}
- void DeserializeHeader() {
- ASSERT(buffer.size() > sizeof(Header));
-
- Header header{};
- std::memcpy(&header, buffer.data(), sizeof(Header));
-
- read_index = header.data_offset;
- }
-
std::vector<u8> Serialize() const {
- ASSERT(read_index == 0);
-
- Header header{};
- header.data_size = static_cast<u32>(write_index - sizeof(Header));
- header.data_offset = sizeof(Header);
+ ParcelHeader header{};
+ header.data_size = static_cast<u32>(write_index - sizeof(ParcelHeader));
+ header.data_offset = sizeof(ParcelHeader);
header.objects_size = 4;
- header.objects_offset = static_cast<u32>(sizeof(Header) + header.data_size);
- std::memcpy(buffer.data(), &header, sizeof(Header));
+ header.objects_offset = static_cast<u32>(sizeof(ParcelHeader) + header.data_size);
+ std::memcpy(buffer.data(), &header, sizeof(ParcelHeader));
return buffer;
}
private:
- struct Header {
- u32 data_size;
- u32 data_offset;
- u32 objects_size;
- u32 objects_offset;
- };
- static_assert(sizeof(Header) == 16, "ParcelHeader has wrong size");
-
mutable std::vector<u8> buffer;
- std::size_t read_index = 0;
- std::size_t write_index = sizeof(Header);
+ std::size_t write_index = sizeof(ParcelHeader);
};
} // namespace Service::android
diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp
index 78f897d3e..01040b32a 100644
--- a/src/core/hle/service/prepo/prepo.cpp
+++ b/src/core/hle/service/prepo/prepo.cpp
@@ -63,7 +63,7 @@ private:
return ctx.ReadBuffer(1);
}
- return std::vector<u8>{};
+ return std::span<const u8>{};
}();
LOG_DEBUG(Service_PREPO,
@@ -90,7 +90,7 @@ private:
return ctx.ReadBuffer(1);
}
- return std::vector<u8>{};
+ return std::span<const u8>{};
}();
LOG_DEBUG(Service_PREPO,
@@ -142,7 +142,7 @@ private:
return ctx.ReadBuffer(1);
}
- return std::vector<u8>{};
+ return std::span<const u8>{};
}();
LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}",
@@ -166,7 +166,7 @@ private:
return ctx.ReadBuffer(1);
}
- return std::vector<u8>{};
+ return std::span<const u8>{};
}();
LOG_DEBUG(Service_PREPO,
diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp
index 9e94a462f..bdb499268 100644
--- a/src/core/hle/service/sockets/bsd.cpp
+++ b/src/core/hle/service/sockets/bsd.cpp
@@ -208,7 +208,6 @@ void BSD::Bind(Kernel::HLERequestContext& ctx) {
const s32 fd = rp.Pop<s32>();
LOG_DEBUG(Service, "called. fd={} addrlen={}", fd, ctx.GetReadBufferSize());
-
BuildErrnoResponse(ctx, BindImpl(fd, ctx.ReadBuffer()));
}
@@ -312,7 +311,7 @@ void BSD::SetSockOpt(Kernel::HLERequestContext& ctx) {
const u32 level = rp.Pop<u32>();
const OptName optname = static_cast<OptName>(rp.Pop<u32>());
- const std::vector<u8> buffer = ctx.ReadBuffer();
+ const auto buffer = ctx.ReadBuffer();
const u8* optval = buffer.empty() ? nullptr : buffer.data();
size_t optlen = buffer.size();
@@ -489,7 +488,7 @@ std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protoco
return {fd, Errno::SUCCESS};
}
-std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::vector<u8> read_buffer,
+std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::span<const u8> read_buffer,
s32 nfds, s32 timeout) {
if (write_buffer.size() < nfds * sizeof(PollFD)) {
return {-1, Errno::INVAL};
@@ -584,7 +583,7 @@ std::pair<s32, Errno> BSD::AcceptImpl(s32 fd, std::vector<u8>& write_buffer) {
return {new_fd, Errno::SUCCESS};
}
-Errno BSD::BindImpl(s32 fd, const std::vector<u8>& addr) {
+Errno BSD::BindImpl(s32 fd, std::span<const u8> addr) {
if (!IsFileDescriptorValid(fd)) {
return Errno::BADF;
}
@@ -595,7 +594,7 @@ Errno BSD::BindImpl(s32 fd, const std::vector<u8>& addr) {
return Translate(file_descriptors[fd]->socket->Bind(Translate(addr_in)));
}
-Errno BSD::ConnectImpl(s32 fd, const std::vector<u8>& addr) {
+Errno BSD::ConnectImpl(s32 fd, std::span<const u8> addr) {
if (!IsFileDescriptorValid(fd)) {
return Errno::BADF;
}
@@ -800,15 +799,15 @@ std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& mess
return {ret, bsd_errno};
}
-std::pair<s32, Errno> BSD::SendImpl(s32 fd, u32 flags, const std::vector<u8>& message) {
+std::pair<s32, Errno> BSD::SendImpl(s32 fd, u32 flags, std::span<const u8> message) {
if (!IsFileDescriptorValid(fd)) {
return {-1, Errno::BADF};
}
return Translate(file_descriptors[fd]->socket->Send(message, flags));
}
-std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, const std::vector<u8>& message,
- const std::vector<u8>& addr) {
+std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, std::span<const u8> message,
+ std::span<const u8> addr) {
if (!IsFileDescriptorValid(fd)) {
return {-1, Errno::BADF};
}
diff --git a/src/core/hle/service/sockets/bsd.h b/src/core/hle/service/sockets/bsd.h
index 81e855e0f..56bb3f8b1 100644
--- a/src/core/hle/service/sockets/bsd.h
+++ b/src/core/hle/service/sockets/bsd.h
@@ -4,6 +4,7 @@
#pragma once
#include <memory>
+#include <span>
#include <vector>
#include "common/common_types.h"
@@ -44,7 +45,7 @@ private:
s32 nfds;
s32 timeout;
- std::vector<u8> read_buffer;
+ std::span<const u8> read_buffer;
std::vector<u8> write_buffer;
s32 ret{};
Errno bsd_errno{};
@@ -65,7 +66,7 @@ private:
void Response(Kernel::HLERequestContext& ctx);
s32 fd;
- std::vector<u8> addr;
+ std::span<const u8> addr;
Errno bsd_errno{};
};
@@ -98,7 +99,7 @@ private:
s32 fd;
u32 flags;
- std::vector<u8> message;
+ std::span<const u8> message;
s32 ret{};
Errno bsd_errno{};
};
@@ -109,8 +110,8 @@ private:
s32 fd;
u32 flags;
- std::vector<u8> message;
- std::vector<u8> addr;
+ std::span<const u8> message;
+ std::span<const u8> addr;
s32 ret{};
Errno bsd_errno{};
};
@@ -143,11 +144,11 @@ private:
void ExecuteWork(Kernel::HLERequestContext& ctx, Work work);
std::pair<s32, Errno> SocketImpl(Domain domain, Type type, Protocol protocol);
- std::pair<s32, Errno> PollImpl(std::vector<u8>& write_buffer, std::vector<u8> read_buffer,
+ std::pair<s32, Errno> PollImpl(std::vector<u8>& write_buffer, std::span<const u8> read_buffer,
s32 nfds, s32 timeout);
std::pair<s32, Errno> AcceptImpl(s32 fd, std::vector<u8>& write_buffer);
- Errno BindImpl(s32 fd, const std::vector<u8>& addr);
- Errno ConnectImpl(s32 fd, const std::vector<u8>& addr);
+ Errno BindImpl(s32 fd, std::span<const u8> addr);
+ Errno ConnectImpl(s32 fd, std::span<const u8> addr);
Errno GetPeerNameImpl(s32 fd, std::vector<u8>& write_buffer);
Errno GetSockNameImpl(s32 fd, std::vector<u8>& write_buffer);
Errno ListenImpl(s32 fd, s32 backlog);
@@ -157,9 +158,9 @@ private:
std::pair<s32, Errno> RecvImpl(s32 fd, u32 flags, std::vector<u8>& message);
std::pair<s32, Errno> RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& message,
std::vector<u8>& addr);
- std::pair<s32, Errno> SendImpl(s32 fd, u32 flags, const std::vector<u8>& message);
- std::pair<s32, Errno> SendToImpl(s32 fd, u32 flags, const std::vector<u8>& message,
- const std::vector<u8>& addr);
+ std::pair<s32, Errno> SendImpl(s32 fd, u32 flags, std::span<const u8> message);
+ std::pair<s32, Errno> SendToImpl(s32 fd, u32 flags, std::span<const u8> message,
+ std::span<const u8> addr);
Errno CloseImpl(s32 fd);
s32 FindFreeFileDescriptorHandle() noexcept;
diff --git a/src/core/hle/service/sockets/sfdnsres.cpp b/src/core/hle/service/sockets/sfdnsres.cpp
index 097c37d7a..e96eda7f3 100644
--- a/src/core/hle/service/sockets/sfdnsres.cpp
+++ b/src/core/hle/service/sockets/sfdnsres.cpp
@@ -243,4 +243,4 @@ void SFDNSRES::GetAddrInfoRequestWithOptions(Kernel::HLERequestContext& ctx) {
rb.Push(0);
}
-} // namespace Service::Sockets \ No newline at end of file
+} // namespace Service::Sockets
diff --git a/src/core/hle/service/ssl/ssl.cpp b/src/core/hle/service/ssl/ssl.cpp
index 3735e0452..dcf47083f 100644
--- a/src/core/hle/service/ssl/ssl.cpp
+++ b/src/core/hle/service/ssl/ssl.cpp
@@ -101,7 +101,7 @@ private:
void ImportServerPki(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto certificate_format = rp.PopEnum<CertificateFormat>();
- const auto pkcs_12_certificates = ctx.ReadBuffer(0);
+ [[maybe_unused]] const auto pkcs_12_certificates = ctx.ReadBuffer(0);
constexpr u64 server_id = 0;
@@ -113,13 +113,13 @@ private:
}
void ImportClientPki(Kernel::HLERequestContext& ctx) {
- const auto pkcs_12_certificate = ctx.ReadBuffer(0);
- const auto ascii_password = [&ctx] {
+ [[maybe_unused]] const auto pkcs_12_certificate = ctx.ReadBuffer(0);
+ [[maybe_unused]] const auto ascii_password = [&ctx] {
if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1);
}
- return std::vector<u8>{};
+ return std::span<const u8>{};
}();
constexpr u64 client_id = 0;
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index bb283e74e..2fb631183 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -603,7 +603,7 @@ private:
return;
}
- const auto parcel = android::Parcel{NativeWindow{*buffer_queue_id}};
+ const auto parcel = android::OutputParcel{NativeWindow{*buffer_queue_id}};
const auto buffer_size = ctx.WriteBuffer(parcel.Serialize());
IPC::ResponseBuilder rb{ctx, 4};
@@ -649,7 +649,7 @@ private:
return;
}
- const auto parcel = android::Parcel{NativeWindow{*buffer_queue_id}};
+ const auto parcel = android::OutputParcel{NativeWindow{*buffer_queue_id}};
const auto buffer_size = ctx.WriteBuffer(parcel.Serialize());
IPC::ResponseBuilder rb{ctx, 6};