summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/CMakeLists.txt11
-rw-r--r--src/audio_core/device/device_session.cpp7
-rw-r--r--src/audio_core/renderer/adsp/adsp.cpp2
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.cpp2
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.h1
-rw-r--r--src/audio_core/renderer/adsp/command_list_processor.cpp2
-rw-r--r--src/audio_core/renderer/system.cpp3
-rw-r--r--src/audio_core/renderer/system_manager.cpp19
-rw-r--r--src/audio_core/renderer/system_manager.h7
-rw-r--r--src/audio_core/sink/cubeb_sink.cpp4
-rw-r--r--src/audio_core/sink/sdl2_sink.cpp2
-rw-r--r--src/audio_core/sink/sink_stream.cpp67
-rw-r--r--src/audio_core/sink/sink_stream.h29
-rw-r--r--src/common/CMakeLists.txt4
-rw-r--r--src/common/bounded_threadsafe_queue.h319
-rw-r--r--src/common/container_hash.h92
-rw-r--r--src/common/intrusive_red_black_tree.h8
-rw-r--r--src/common/logging/backend.cpp16
-rw-r--r--src/common/range_map.h6
-rw-r--r--src/common/string_util.cpp14
-rw-r--r--src/common/string_util.h8
-rw-r--r--src/common/telemetry.cpp1
-rw-r--r--src/common/typed_address.h315
-rw-r--r--src/common/x64/cpu_detect.cpp1
-rw-r--r--src/common/x64/cpu_detect.h1
-rw-r--r--src/common/x64/cpu_wait.cpp69
-rw-r--r--src/common/x64/cpu_wait.h10
-rw-r--r--src/common/x64/native_clock.cpp13
-rw-r--r--src/common/zstd_compression.cpp2
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/arm/arm_interface.cpp12
-rw-r--r--src/core/arm/arm_interface.h8
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp35
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h6
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp14
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h6
-rw-r--r--src/core/core.cpp10
-rw-r--r--src/core/core.h8
-rw-r--r--src/core/core_timing.cpp8
-rw-r--r--src/core/debugger/gdbstub.cpp70
-rw-r--r--src/core/device_memory.h14
-rw-r--r--src/core/frontend/applets/applet.h14
-rw-r--r--src/core/frontend/applets/cabinet.cpp2
-rw-r--r--src/core/frontend/applets/cabinet.h4
-rw-r--r--src/core/frontend/applets/controller.cpp4
-rw-r--r--src/core/frontend/applets/controller.h6
-rw-r--r--src/core/frontend/applets/error.cpp2
-rw-r--r--src/core/frontend/applets/error.h4
-rw-r--r--src/core/frontend/applets/general_frontend.cpp4
-rw-r--r--src/core/frontend/applets/general_frontend.h8
-rw-r--r--src/core/frontend/applets/mii_edit.cpp2
-rw-r--r--src/core/frontend/applets/mii_edit.h5
-rw-r--r--src/core/frontend/applets/profile_select.cpp5
-rw-r--r--src/core/frontend/applets/profile_select.h19
-rw-r--r--src/core/frontend/applets/software_keyboard.cpp2
-rw-r--r--src/core/frontend/applets/software_keyboard.h5
-rw-r--r--src/core/frontend/applets/web_browser.cpp2
-rw-r--r--src/core/frontend/applets/web_browser.h5
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp12
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h4
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp12
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h4
-rw-r--r--src/core/hle/kernel/code_set.h6
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp21
-rw-r--r--src/core/hle/kernel/initial_process.h2
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp88
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h14
-rw-r--r--src/core/hle/kernel/k_client_session.cpp3
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp11
-rw-r--r--src/core/hle/kernel/k_code_memory.h16
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp55
-rw-r--r--src/core/hle/kernel/k_condition_variable.h8
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp8
-rw-r--r--src/core/hle/kernel/k_device_address_space.h13
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h19
-rw-r--r--src/core/hle/kernel/k_dynamic_slab_heap.h6
-rw-r--r--src/core/hle/kernel/k_memory_block.h22
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp68
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h32
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp13
-rw-r--r--src/core/hle/kernel/k_memory_layout.h78
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp51
-rw-r--r--src/core/hle/kernel/k_memory_manager.h48
-rw-r--r--src/core/hle/kernel/k_memory_region.h8
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp4
-rw-r--r--src/core/hle/kernel/k_page_buffer.h2
-rw-r--r--src/core/hle/kernel/k_page_group.h2
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp40
-rw-r--r--src/core/hle/kernel/k_page_heap.h48
-rw-r--r--src/core/hle/kernel/k_page_table.cpp406
-rw-r--r--src/core/hle/kernel/k_page_table.h243
-rw-r--r--src/core/hle/kernel/k_page_table_manager.h14
-rw-r--r--src/core/hle/kernel/k_page_table_slab_heap.h12
-rw-r--r--src/core/hle/kernel/k_process.cpp53
-rw-r--r--src/core/hle/kernel/k_process.h50
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp2
-rw-r--r--src/core/hle/kernel/k_server_session.cpp4
-rw-r--r--src/core/hle/kernel/k_session_request.cpp15
-rw-r--r--src/core/hle/kernel/k_session_request.h55
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp5
-rw-r--r--src/core/hle/kernel/k_shared_memory.h8
-rw-r--r--src/core/hle/kernel/k_system_resource.h2
-rw-r--r--src/core/hle/kernel/k_thread.cpp39
-rw-r--r--src/core/hle/kernel/k_thread.h62
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp6
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h25
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp2
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h6
-rw-r--r--src/core/hle/kernel/k_typed_address.h12
-rw-r--r--src/core/hle/kernel/kernel.cpp112
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/memory_types.h4
-rw-r--r--src/core/hle/kernel/svc/svc_address_arbiter.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_cache.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_condition_variable.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_debug_string.cpp5
-rw-r--r--src/core/hle/kernel/svc/svc_exception.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp10
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp13
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_process.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp12
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_shared_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp5
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp12
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp2
-rw-r--r--src/core/hle/kernel/svc_types.h4
-rw-r--r--src/core/hle/service/am/am.cpp17
-rw-r--r--src/core/hle/service/am/applets/applet_cabinet.cpp5
-rw-r--r--src/core/hle/service/am/applets/applet_cabinet.h1
-rw-r--r--src/core/hle/service/am/applets/applet_controller.cpp17
-rw-r--r--src/core/hle/service/am/applets/applet_controller.h10
-rw-r--r--src/core/hle/service/am/applets/applet_error.cpp5
-rw-r--r--src/core/hle/service/am/applets/applet_error.h1
-rw-r--r--src/core/hle/service/am/applets/applet_general_backend.cpp15
-rw-r--r--src/core/hle/service/am/applets/applet_general_backend.h3
-rw-r--r--src/core/hle/service/am/applets/applet_mii_edit.cpp5
-rw-r--r--src/core/hle/service/am/applets/applet_mii_edit.h1
-rw-r--r--src/core/hle/service/am/applets/applet_profile_select.cpp57
-rw-r--r--src/core/hle/service/am/applets/applet_profile_select.h103
-rw-r--r--src/core/hle/service/am/applets/applet_software_keyboard.cpp5
-rw-r--r--src/core/hle/service/am/applets/applet_software_keyboard.h1
-rw-r--r--src/core/hle/service/am/applets/applet_web_browser.cpp5
-rw-r--r--src/core/hle/service/am/applets/applet_web_browser.h1
-rw-r--r--src/core/hle/service/am/applets/applets.h1
-rw-r--r--src/core/hle/service/hid/controllers/console_sixaxis.cpp5
-rw-r--r--src/core/hle/service/hid/controllers/console_sixaxis.h6
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp41
-rw-r--r--src/core/hle/service/hid/controllers/npad.h4
-rw-r--r--src/core/hle/service/hid/controllers/palma.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/palma.h6
-rw-r--r--src/core/hle/service/hid/hid.cpp41
-rw-r--r--src/core/hle/service/hid/hid.h1
-rw-r--r--src/core/hle/service/hid/hidbus/hidbus_base.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus/hidbus_base.h6
-rw-r--r--src/core/hle/service/hid/hidbus/ringcon.cpp4
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.cpp20
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.h6
-rw-r--r--src/core/hle/service/hle_ipc.cpp3
-rw-r--r--src/core/hle/service/jit/jit.cpp10
-rw-r--r--src/core/hle/service/ldr/ldr.cpp15
-rw-r--r--src/core/hle/service/mii/mii_manager.cpp35
-rw-r--r--src/core/hle/service/mii/mii_manager.h7
-rw-r--r--src/core/hle/service/mii/types.h60
-rw-r--r--src/core/hle/service/nfc/nfc_device.cpp16
-rw-r--r--src/core/hle/service/nfc/nfc_device.h1
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.cpp10
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.h3
-rw-r--r--src/core/hle/service/nfp/nfp_device.cpp139
-rw-r--r--src/core/hle/service/nfp/nfp_device.h4
-rw-r--r--src/core/hle/service/nfp/nfp_types.h13
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp4
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp4
-rw-r--r--src/core/internal_network/socket_proxy.h3
-rw-r--r--src/core/internal_network/sockets.h13
-rw-r--r--src/core/loader/deconstructed_rom_directory.cpp2
-rw-r--r--src/core/loader/kip.cpp2
-rw-r--r--src/core/loader/nso.cpp2
-rw-r--r--src/core/memory.cpp327
-rw-r--r--src/core/memory.h122
-rw-r--r--src/core/memory/cheat_engine.cpp10
-rw-r--r--src/core/reporter.cpp6
-rw-r--r--src/tests/CMakeLists.txt1
-rw-r--r--src/tests/common/container_hash.cpp44
-rw-r--r--src/tests/common/range_map.cpp20
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h2
-rw-r--r--src/video_core/gpu_thread.cpp6
-rw-r--r--src/video_core/gpu_thread.h4
-rw-r--r--src/video_core/macro/macro.cpp6
-rw-r--r--src/video_core/memory_manager.cpp36
-rw-r--r--src/video_core/memory_manager.h12
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp85
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h6
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp62
-rw-r--r--src/video_core/texture_cache/texture_cache.h95
-rw-r--r--src/video_core/textures/texture.cpp14
-rw-r--r--src/video_core/textures/texture.h70
-rw-r--r--src/video_core/video_core.cpp2
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp6
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.cpp2
-rw-r--r--src/web_service/verify_login.cpp2
-rw-r--r--src/yuzu/applets/qt_amiibo_settings.cpp11
-rw-r--r--src/yuzu/applets/qt_amiibo_settings.h2
-rw-r--r--src/yuzu/applets/qt_controller.cpp17
-rw-r--r--src/yuzu/applets/qt_controller.h4
-rw-r--r--src/yuzu/applets/qt_controller.ui8
-rw-r--r--src/yuzu/applets/qt_error.cpp11
-rw-r--r--src/yuzu/applets/qt_error.h2
-rw-r--r--src/yuzu/applets/qt_profile_select.cpp96
-rw-r--r--src/yuzu/applets/qt_profile_select.h13
-rw-r--r--src/yuzu/applets/qt_software_keyboard.h4
-rw-r--r--src/yuzu/applets/qt_web_browser.cpp11
-rw-r--r--src/yuzu/applets/qt_web_browser.h2
-rw-r--r--src/yuzu/configuration/configure_general.ui2
-rw-r--r--src/yuzu/configuration/configure_input.cpp3
-rw-r--r--src/yuzu/main.cpp183
-rw-r--r--src/yuzu/main.h21
-rw-r--r--src/yuzu_cmd/default_ini.h2
224 files changed, 3424 insertions, 1959 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 0eca8e90e..312a49f42 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -126,6 +126,17 @@ else()
add_compile_options("-stdlib=libc++")
endif()
+ # GCC bugs
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "12" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ # These diagnostics would be great if they worked, but are just completely broken
+ # and produce bogus errors on external libraries like fmt.
+ add_compile_options(
+ -Wno-array-bounds
+ -Wno-stringop-overread
+ -Wno-stringop-overflow
+ )
+ endif()
+
# Set file offset size to 64 bits.
#
# On modern Unixes, this is typically already the case. The lone exception is
diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp
index 5a327a606..b5c0ef0e6 100644
--- a/src/audio_core/device/device_session.cpp
+++ b/src/audio_core/device/device_session.cpp
@@ -93,7 +93,7 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) const {
stream->AppendBuffer(new_buffer, samples);
} else {
std::vector<s16> samples(buffer.size / sizeof(s16));
- system.Memory().ReadBlockUnsafe(buffer.samples, samples.data(), buffer.size);
+ system.ApplicationMemory().ReadBlockUnsafe(buffer.samples, samples.data(), buffer.size);
stream->AppendBuffer(new_buffer, samples);
}
}
@@ -102,7 +102,7 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) const {
void DeviceSession::ReleaseBuffer(const AudioBuffer& buffer) const {
if (type == Sink::StreamType::In) {
auto samples{stream->ReleaseBuffer(buffer.size / sizeof(s16))};
- system.Memory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
+ system.ApplicationMemory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
}
}
@@ -121,8 +121,7 @@ u64 DeviceSession::GetPlayedSampleCount() const {
}
std::optional<std::chrono::nanoseconds> DeviceSession::ThreadFunc() {
- // Add 5ms of samples at a 48K sample rate.
- played_sample_count += 48'000 * INCREMENT_TIME / 1s;
+ played_sample_count = stream->GetExpectedPlayedSampleCount();
if (type == Sink::StreamType::Out) {
system.AudioCore().GetAudioManager().SetEvent(Event::Type::AudioOutManager, true);
} else {
diff --git a/src/audio_core/renderer/adsp/adsp.cpp b/src/audio_core/renderer/adsp/adsp.cpp
index a28395663..74772fc50 100644
--- a/src/audio_core/renderer/adsp/adsp.cpp
+++ b/src/audio_core/renderer/adsp/adsp.cpp
@@ -13,7 +13,7 @@
namespace AudioCore::AudioRenderer::ADSP {
ADSP::ADSP(Core::System& system_, Sink::Sink& sink_)
- : system{system_}, memory{system.Memory()}, sink{sink_} {}
+ : system{system_}, memory{system.ApplicationMemory()}, sink{sink_} {}
ADSP::~ADSP() {
ClearCommandBuffers();
diff --git a/src/audio_core/renderer/adsp/audio_renderer.cpp b/src/audio_core/renderer/adsp/audio_renderer.cpp
index 42b4b167a..503f40349 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.cpp
+++ b/src/audio_core/renderer/adsp/audio_renderer.cpp
@@ -189,6 +189,8 @@ void AudioRenderer::ThreadFunc() {
max_time = std::min(command_buffer.time_limit, max_time);
command_list_processor.SetProcessTimeMax(max_time);
+ streams[index]->WaitFreeSpace();
+
// Process the command list
{
MICROPROFILE_SCOPE(Audio_Renderer);
diff --git a/src/audio_core/renderer/adsp/audio_renderer.h b/src/audio_core/renderer/adsp/audio_renderer.h
index 151f38c1b..85ce6a269 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.h
+++ b/src/audio_core/renderer/adsp/audio_renderer.h
@@ -10,6 +10,7 @@
#include "audio_core/renderer/adsp/command_buffer.h"
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "common/common_types.h"
+#include "common/polyfill_thread.h"
#include "common/reader_writer_queue.h"
#include "common/thread.h"
diff --git a/src/audio_core/renderer/adsp/command_list_processor.cpp b/src/audio_core/renderer/adsp/command_list_processor.cpp
index e3bf2d7ec..7a300d216 100644
--- a/src/audio_core/renderer/adsp/command_list_processor.cpp
+++ b/src/audio_core/renderer/adsp/command_list_processor.cpp
@@ -17,7 +17,7 @@ namespace AudioCore::AudioRenderer::ADSP {
void CommandListProcessor::Initialize(Core::System& system_, CpuAddr buffer, u64 size,
Sink::SinkStream* stream_) {
system = &system_;
- memory = &system->Memory();
+ memory = &system->ApplicationMemory();
stream = stream_;
header = reinterpret_cast<CommandListHeader*>(buffer);
commands = reinterpret_cast<u8*>(buffer + sizeof(CommandListHeader));
diff --git a/src/audio_core/renderer/system.cpp b/src/audio_core/renderer/system.cpp
index 28f063641..ad869facb 100644
--- a/src/audio_core/renderer/system.cpp
+++ b/src/audio_core/renderer/system.cpp
@@ -127,8 +127,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
render_device = params.rendering_device;
execution_mode = params.execution_mode;
- core.Memory().ZeroBlock(*core.ApplicationProcess(), transfer_memory->GetSourceAddress(),
- transfer_memory_size);
+ core.ApplicationMemory().ZeroBlock(transfer_memory->GetSourceAddress(), transfer_memory_size);
// Note: We're not actually using the transfer memory because it's a pain to code for.
// Allocate the memory normally instead and hope the game doesn't try to read anything back
diff --git a/src/audio_core/renderer/system_manager.cpp b/src/audio_core/renderer/system_manager.cpp
index ce631f810..07d8ed093 100644
--- a/src/audio_core/renderer/system_manager.cpp
+++ b/src/audio_core/renderer/system_manager.cpp
@@ -15,14 +15,9 @@ MICROPROFILE_DEFINE(Audio_RenderSystemManager, "Audio", "Render System Manager",
MP_RGB(60, 19, 97));
namespace AudioCore::AudioRenderer {
-constexpr std::chrono::nanoseconds RENDER_TIME{5'000'000UL};
SystemManager::SystemManager(Core::System& core_)
- : core{core_}, adsp{core.AudioCore().GetADSP()}, mailbox{adsp.GetRenderMailbox()},
- thread_event{Core::Timing::CreateEvent(
- "AudioRendererSystemManager", [this](std::uintptr_t, s64 time, std::chrono::nanoseconds) {
- return ThreadFunc2(time);
- })} {}
+ : core{core_}, adsp{core.AudioCore().GetADSP()}, mailbox{adsp.GetRenderMailbox()} {}
SystemManager::~SystemManager() {
Stop();
@@ -33,8 +28,6 @@ bool SystemManager::InitializeUnsafe() {
if (adsp.Start()) {
active = true;
thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(); });
- core.CoreTiming().ScheduleLoopingEvent(std::chrono::nanoseconds(0), RENDER_TIME,
- thread_event);
}
}
@@ -45,7 +38,6 @@ void SystemManager::Stop() {
if (!active) {
return;
}
- core.CoreTiming().UnscheduleEvent(thread_event, {});
active = false;
update.store(true);
update.notify_all();
@@ -111,16 +103,7 @@ void SystemManager::ThreadFunc() {
adsp.Signal();
adsp.Wait();
-
- update.wait(false);
- update.store(false);
}
}
-std::optional<std::chrono::nanoseconds> SystemManager::ThreadFunc2(s64 time) {
- update.store(true);
- update.notify_all();
- return std::nullopt;
-}
-
} // namespace AudioCore::AudioRenderer
diff --git a/src/audio_core/renderer/system_manager.h b/src/audio_core/renderer/system_manager.h
index 415ddb74f..1f0bbd8b4 100644
--- a/src/audio_core/renderer/system_manager.h
+++ b/src/audio_core/renderer/system_manager.h
@@ -68,11 +68,6 @@ private:
*/
void ThreadFunc();
- /**
- * Signalling core timing thread to run ThreadFunc.
- */
- std::optional<std::chrono::nanoseconds> ThreadFunc2(s64 time);
-
enum class StreamState {
Filling,
Steady,
@@ -95,8 +90,6 @@ private:
ADSP::ADSP& adsp;
/// AudioRenderer mailbox for communication
ADSP::AudioRenderer_Mailbox* mailbox{};
- /// Core timing event to signal main thread
- std::shared_ptr<Core::Timing::EventType> thread_event;
/// Atomic for main thread to wait on
std::atomic<bool> update{};
};
diff --git a/src/audio_core/sink/cubeb_sink.cpp b/src/audio_core/sink/cubeb_sink.cpp
index 9133f5388..9a0801888 100644
--- a/src/audio_core/sink/cubeb_sink.cpp
+++ b/src/audio_core/sink/cubeb_sink.cpp
@@ -101,8 +101,6 @@ public:
~CubebSinkStream() override {
LOG_DEBUG(Service_Audio, "Destructing cubeb stream {}", name);
- Unstall();
-
if (!ctx) {
return;
}
@@ -143,8 +141,6 @@ public:
* Stop the sink stream.
*/
void Stop() override {
- Unstall();
-
if (!ctx || paused) {
return;
}
diff --git a/src/audio_core/sink/sdl2_sink.cpp b/src/audio_core/sink/sdl2_sink.cpp
index c138dc628..ee1a0652f 100644
--- a/src/audio_core/sink/sdl2_sink.cpp
+++ b/src/audio_core/sink/sdl2_sink.cpp
@@ -88,7 +88,6 @@ public:
* Finalize the sink stream.
*/
void Finalize() override {
- Unstall();
if (device == 0) {
return;
}
@@ -116,7 +115,6 @@ public:
* Stop the sink stream.
*/
void Stop() override {
- Unstall();
if (device == 0 || paused) {
return;
}
diff --git a/src/audio_core/sink/sink_stream.cpp b/src/audio_core/sink/sink_stream.cpp
index 39a21b0f0..13ba26e74 100644
--- a/src/audio_core/sink/sink_stream.cpp
+++ b/src/audio_core/sink/sink_stream.cpp
@@ -14,6 +14,8 @@
#include "common/fixed_point.h"
#include "common/settings.h"
#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/core_timing_util.h"
namespace AudioCore::Sink {
@@ -149,10 +151,6 @@ void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t n
return;
}
- if (queued_buffers > max_queue_size) {
- Stall();
- }
-
while (frames_written < num_frames) {
// If the playing buffer has been consumed or has no frames, we need a new one
if (playing_buffer.consumed || playing_buffer.frames == 0) {
@@ -187,10 +185,6 @@ void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t n
}
std::memcpy(&last_frame[0], &input_buffer[(frames_written - 1) * frame_size], frame_size_bytes);
-
- if (queued_buffers <= max_queue_size) {
- Unstall();
- }
}
void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames) {
@@ -198,10 +192,15 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
const std::size_t frame_size = num_channels;
const std::size_t frame_size_bytes = frame_size * sizeof(s16);
size_t frames_written{0};
+ size_t actual_frames_written{0};
// If we're paused or going to shut down, we don't want to consume buffers as coretiming is
// paused and we'll desync, so just play silence.
if (system.IsPaused() || system.IsShuttingDown()) {
+ if (system.IsShuttingDown()) {
+ release_cv.notify_one();
+ }
+
static constexpr std::array<s16, 6> silence{};
for (size_t i = frames_written; i < num_frames; i++) {
std::memcpy(&output_buffer[i * frame_size], &silence[0], frame_size_bytes);
@@ -209,20 +208,6 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
return;
}
- // Due to many frames being queued up with nvdec (5 frames or so?), a lot of buffers also get
- // queued up (30+) but not all at once, which causes constant stalling here, so just let the
- // video play out without attempting to stall.
- // Can hopefully remove this later with a more complete NVDEC implementation.
- const auto nvdec_active{system.AudioCore().IsNVDECActive()};
-
- // Core timing cannot be paused in single-core mode, so Stall ends up being called over and over
- // and never recovers to a normal state, so just skip attempting to sync things on single-core.
- if (system.IsMulticore() && !nvdec_active && queued_buffers > max_queue_size) {
- Stall();
- } else if (system.IsMulticore() && queued_buffers <= max_queue_size) {
- Unstall();
- }
-
while (frames_written < num_frames) {
// If the playing buffer has been consumed or has no frames, we need a new one
if (playing_buffer.consumed || playing_buffer.frames == 0) {
@@ -237,6 +222,10 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
}
// Successfully dequeued a new buffer.
queued_buffers--;
+
+ { std::unique_lock lk{release_mutex}; }
+
+ release_cv.notify_one();
}
// Get the minimum frames available between the currently playing buffer, and the
@@ -248,6 +237,7 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
frames_available * frame_size);
frames_written += frames_available;
+ actual_frames_written += frames_available;
playing_buffer.frames_played += frames_available;
// If that's all the frames in the current buffer, add its samples and mark it as
@@ -260,26 +250,29 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
std::memcpy(&last_frame[0], &output_buffer[(frames_written - 1) * frame_size],
frame_size_bytes);
- if (system.IsMulticore() && queued_buffers <= max_queue_size) {
- Unstall();
+ {
+ std::scoped_lock lk{sample_count_lock};
+ last_sample_count_update_time = system.CoreTiming().GetGlobalTimeNs();
+ min_played_sample_count = max_played_sample_count;
+ max_played_sample_count += actual_frames_written;
}
}
-void SinkStream::Stall() {
- std::scoped_lock lk{stall_guard};
- if (stalled_lock) {
- return;
- }
- stalled_lock = system.StallApplication();
+u64 SinkStream::GetExpectedPlayedSampleCount() {
+ std::scoped_lock lk{sample_count_lock};
+ auto cur_time{system.CoreTiming().GetGlobalTimeNs()};
+ auto time_delta{cur_time - last_sample_count_update_time};
+ auto exp_played_sample_count{min_played_sample_count +
+ (TargetSampleRate * time_delta) / std::chrono::seconds{1}};
+
+ // Add 15ms of latency in sample reporting to allow for some leeway in scheduler timings
+ return std::min<u64>(exp_played_sample_count, max_played_sample_count) + TargetSampleCount * 3;
}
-void SinkStream::Unstall() {
- std::scoped_lock lk{stall_guard};
- if (!stalled_lock) {
- return;
- }
- system.UnstallApplication();
- stalled_lock.unlock();
+void SinkStream::WaitFreeSpace() {
+ std::unique_lock lk{release_mutex};
+ release_cv.wait(
+ lk, [this]() { return queued_buffers < max_queue_size || system.IsShuttingDown(); });
}
} // namespace AudioCore::Sink
diff --git a/src/audio_core/sink/sink_stream.h b/src/audio_core/sink/sink_stream.h
index 5fea72ab7..21b5b40a1 100644
--- a/src/audio_core/sink/sink_stream.h
+++ b/src/audio_core/sink/sink_stream.h
@@ -5,6 +5,7 @@
#include <array>
#include <atomic>
+#include <chrono>
#include <memory>
#include <mutex>
#include <span>
@@ -14,6 +15,7 @@
#include "common/common_types.h"
#include "common/reader_writer_queue.h"
#include "common/ring_buffer.h"
+#include "common/thread.h"
namespace Core {
class System;
@@ -53,9 +55,7 @@ struct SinkBuffer {
class SinkStream {
public:
explicit SinkStream(Core::System& system_, StreamType type_) : system{system_}, type{type_} {}
- virtual ~SinkStream() {
- Unstall();
- }
+ virtual ~SinkStream() {}
/**
* Finalize the sink stream.
@@ -201,14 +201,16 @@ public:
void ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames);
/**
- * Stall core processes if the audio thread falls too far behind.
+ * Get the total number of samples expected to have been played by this stream.
+ *
+ * @return The number of samples.
*/
- void Stall();
+ u64 GetExpectedPlayedSampleCount();
/**
- * Unstall core processes.
+ * Waits for free space in the sample ring buffer
*/
- void Unstall();
+ void WaitFreeSpace();
protected:
/// Core system
@@ -237,12 +239,21 @@ private:
std::atomic<u32> queued_buffers{};
/// The ring size for audio out buffers (usually 4, rarely 2 or 8)
u32 max_queue_size{};
+ /// Locks access to sample count tracking info
+ std::mutex sample_count_lock;
+ /// Minimum number of total samples that have been played since the last callback
+ u64 min_played_sample_count{};
+ /// Maximum number of total samples that can be played since the last callback
+ u64 max_played_sample_count{};
+ /// The time the two above tracking variables were last written to
+ std::chrono::nanoseconds last_sample_count_update_time{};
/// Set by the audio render/in/out system which uses this stream
f32 system_volume{1.0f};
/// Set via IAudioDevice service calls
f32 device_volume{1.0f};
- std::mutex stall_guard;
- std::unique_lock<std::mutex> stalled_lock;
+ /// Signalled when ring buffer entries are consumed
+ std::condition_variable release_cv;
+ std::mutex release_mutex;
};
using SinkStreamPtr = std::unique_ptr<SinkStream>;
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 61ab68864..13ed68b3f 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -38,6 +38,7 @@ add_library(common STATIC
common_precompiled_headers.h
common_types.h
concepts.h
+ container_hash.h
demangle.cpp
demangle.h
div_ceil.h
@@ -132,6 +133,7 @@ add_library(common STATIC
time_zone.h
tiny_mt.h
tree.h
+ typed_address.h
uint128.h
unique_function.h
uuid.cpp
@@ -158,6 +160,8 @@ if(ARCHITECTURE_x86_64)
PRIVATE
x64/cpu_detect.cpp
x64/cpu_detect.h
+ x64/cpu_wait.cpp
+ x64/cpu_wait.h
x64/native_clock.cpp
x64/native_clock.h
x64/xbyak_abi.h
diff --git a/src/common/bounded_threadsafe_queue.h b/src/common/bounded_threadsafe_queue.h
index 21217801e..bd87aa09b 100644
--- a/src/common/bounded_threadsafe_queue.h
+++ b/src/common/bounded_threadsafe_queue.h
@@ -1,158 +1,249 @@
-// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
-// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
-#include <bit>
#include <condition_variable>
-#include <memory>
+#include <cstddef>
#include <mutex>
#include <new>
-#include <stop_token>
-#include <type_traits>
-#include <utility>
+
+#include "common/polyfill_thread.h"
namespace Common {
-#if defined(__cpp_lib_hardware_interference_size)
-constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
-#else
-constexpr size_t hardware_interference_size = 64;
-#endif
+namespace detail {
+constexpr size_t DefaultCapacity = 0x1000;
+} // namespace detail
+
+template <typename T, size_t Capacity = detail::DefaultCapacity>
+class SPSCQueue {
+ static_assert((Capacity & (Capacity - 1)) == 0, "Capacity must be a power of two.");
-template <typename T, size_t capacity = 0x400>
-class MPSCQueue {
public:
- explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
- // Allocate one extra slot to prevent false sharing on the last slot
- slots = allocator.allocate(capacity + 1);
- // Allocators are not required to honor alignment for over-aligned types
- // (see http://eel.is/c++draft/allocator.requirements#10) so we verify
- // alignment here
- if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
- allocator.deallocate(slots, capacity + 1);
- throw std::bad_alloc();
- }
- for (size_t i = 0; i < capacity; ++i) {
- std::construct_at(&slots[i]);
- }
- static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
- static_assert(alignof(Slot<T>) == hardware_interference_size,
- "Slot must be aligned to cache line boundary to prevent false sharing");
- static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
- "Slot size must be a multiple of cache line size to prevent "
- "false sharing between adjacent slots");
- static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
- "Queue size must be a multiple of cache line size to "
- "prevent false sharing between adjacent queues");
- }
-
- ~MPSCQueue() noexcept {
- for (size_t i = 0; i < capacity; ++i) {
- std::destroy_at(&slots[i]);
- }
- allocator.deallocate(slots, capacity + 1);
+ template <typename... Args>
+ bool TryEmplace(Args&&... args) {
+ return Emplace<PushMode::Try>(std::forward<Args>(args)...);
}
- // The queue must be both non-copyable and non-movable
- MPSCQueue(const MPSCQueue&) = delete;
- MPSCQueue& operator=(const MPSCQueue&) = delete;
+ template <typename... Args>
+ void EmplaceWait(Args&&... args) {
+ Emplace<PushMode::Wait>(std::forward<Args>(args)...);
+ }
- MPSCQueue(MPSCQueue&&) = delete;
- MPSCQueue& operator=(MPSCQueue&&) = delete;
+ bool TryPop(T& t) {
+ return Pop<PopMode::Try>(t);
+ }
- void Push(const T& v) noexcept {
- static_assert(std::is_nothrow_copy_constructible_v<T>,
- "T must be nothrow copy constructible");
- emplace(v);
+ void PopWait(T& t) {
+ Pop<PopMode::Wait>(t);
}
- template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
- void Push(P&& v) noexcept {
- emplace(std::forward<P>(v));
+ void PopWait(T& t, std::stop_token stop_token) {
+ Pop<PopMode::WaitWithStopToken>(t, stop_token);
}
- void Pop(T& v, std::stop_token stop) noexcept {
- auto const tail = tail_.fetch_add(1);
- auto& slot = slots[idx(tail)];
- if (!slot.turn.test()) {
- std::unique_lock lock{cv_mutex};
- cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
- }
- v = slot.move();
- slot.destroy();
- slot.turn.clear();
- slot.turn.notify_one();
+ T PopWait() {
+ T t;
+ Pop<PopMode::Wait>(t);
+ return t;
+ }
+
+ T PopWait(std::stop_token stop_token) {
+ T t;
+ Pop<PopMode::WaitWithStopToken>(t, stop_token);
+ return t;
}
private:
- template <typename U = T>
- struct Slot {
- ~Slot() noexcept {
- if (turn.test()) {
- destroy();
+ enum class PushMode {
+ Try,
+ Wait,
+ Count,
+ };
+
+ enum class PopMode {
+ Try,
+ Wait,
+ WaitWithStopToken,
+ Count,
+ };
+
+ template <PushMode Mode, typename... Args>
+ bool Emplace(Args&&... args) {
+ const size_t write_index = m_write_index.load(std::memory_order::relaxed);
+
+ if constexpr (Mode == PushMode::Try) {
+ // Check if we have free slots to write to.
+ if ((write_index - m_read_index.load(std::memory_order::acquire)) == Capacity) {
+ return false;
}
+ } else if constexpr (Mode == PushMode::Wait) {
+ // Wait until we have free slots to write to.
+ std::unique_lock lock{producer_cv_mutex};
+ producer_cv.wait(lock, [this, write_index] {
+ return (write_index - m_read_index.load(std::memory_order::acquire)) < Capacity;
+ });
+ } else {
+ static_assert(Mode < PushMode::Count, "Invalid PushMode.");
}
- template <typename... Args>
- void construct(Args&&... args) noexcept {
- static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
- "T must be nothrow constructible with Args&&...");
- std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
- }
+ // Determine the position to write to.
+ const size_t pos = write_index % Capacity;
- void destroy() noexcept {
- static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
- std::destroy_at(reinterpret_cast<U*>(&storage));
- }
+ // Emplace into the queue.
+ std::construct_at(std::addressof(m_data[pos]), std::forward<Args>(args)...);
+
+ // Increment the write index.
+ ++m_write_index;
+
+ // Notify the consumer that we have pushed into the queue.
+ std::scoped_lock lock{consumer_cv_mutex};
+ consumer_cv.notify_one();
+
+ return true;
+ }
+
+ template <PopMode Mode>
+ bool Pop(T& t, [[maybe_unused]] std::stop_token stop_token = {}) {
+ const size_t read_index = m_read_index.load(std::memory_order::relaxed);
- U&& move() noexcept {
- return reinterpret_cast<U&&>(storage);
+ if constexpr (Mode == PopMode::Try) {
+ // Check if the queue is empty.
+ if (read_index == m_write_index.load(std::memory_order::acquire)) {
+ return false;
+ }
+ } else if constexpr (Mode == PopMode::Wait) {
+ // Wait until the queue is not empty.
+ std::unique_lock lock{consumer_cv_mutex};
+ consumer_cv.wait(lock, [this, read_index] {
+ return read_index != m_write_index.load(std::memory_order::acquire);
+ });
+ } else if constexpr (Mode == PopMode::WaitWithStopToken) {
+ // Wait until the queue is not empty.
+ std::unique_lock lock{consumer_cv_mutex};
+ Common::CondvarWait(consumer_cv, lock, stop_token, [this, read_index] {
+ return read_index != m_write_index.load(std::memory_order::acquire);
+ });
+ if (stop_token.stop_requested()) {
+ return false;
+ }
+ } else {
+ static_assert(Mode < PopMode::Count, "Invalid PopMode.");
}
- // Align to avoid false sharing between adjacent slots
- alignas(hardware_interference_size) std::atomic_flag turn{};
- struct aligned_store {
- struct type {
- alignas(U) unsigned char data[sizeof(U)];
- };
- };
- typename aligned_store::type storage;
- };
+ // Determine the position to read from.
+ const size_t pos = read_index % Capacity;
+
+ // Pop the data off the queue, moving it.
+ t = std::move(m_data[pos]);
+
+ // Increment the read index.
+ ++m_read_index;
+
+ // Notify the producer that we have popped off the queue.
+ std::scoped_lock lock{producer_cv_mutex};
+ producer_cv.notify_one();
+
+ return true;
+ }
+ alignas(128) std::atomic_size_t m_read_index{0};
+ alignas(128) std::atomic_size_t m_write_index{0};
+
+ std::array<T, Capacity> m_data;
+
+ std::condition_variable_any producer_cv;
+ std::mutex producer_cv_mutex;
+ std::condition_variable_any consumer_cv;
+ std::mutex consumer_cv_mutex;
+};
+
+template <typename T, size_t Capacity = detail::DefaultCapacity>
+class MPSCQueue {
+public:
template <typename... Args>
- void emplace(Args&&... args) noexcept {
- static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
- "T must be nothrow constructible with Args&&...");
- auto const head = head_.fetch_add(1);
- auto& slot = slots[idx(head)];
- slot.turn.wait(true);
- slot.construct(std::forward<Args>(args)...);
- slot.turn.test_and_set();
- cv.notify_one();
+ bool TryEmplace(Args&&... args) {
+ std::scoped_lock lock{write_mutex};
+ return spsc_queue.TryEmplace(std::forward<Args>(args)...);
}
- constexpr size_t idx(size_t i) const noexcept {
- return i & mask;
+ template <typename... Args>
+ void EmplaceWait(Args&&... args) {
+ std::scoped_lock lock{write_mutex};
+ spsc_queue.EmplaceWait(std::forward<Args>(args)...);
}
- static constexpr size_t mask = capacity - 1;
+ bool TryPop(T& t) {
+ return spsc_queue.TryPop(t);
+ }
- // Align to avoid false sharing between head_ and tail_
- alignas(hardware_interference_size) std::atomic<size_t> head_{0};
- alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
+ void PopWait(T& t) {
+ spsc_queue.PopWait(t);
+ }
- std::mutex cv_mutex;
- std::condition_variable_any cv;
+ void PopWait(T& t, std::stop_token stop_token) {
+ spsc_queue.PopWait(t, stop_token);
+ }
+
+ T PopWait() {
+ return spsc_queue.PopWait();
+ }
- Slot<T>* slots;
- [[no_unique_address]] std::allocator<Slot<T>> allocator;
+ T PopWait(std::stop_token stop_token) {
+ return spsc_queue.PopWait(stop_token);
+ }
- static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
- "T must be nothrow copy or move assignable");
+private:
+ SPSCQueue<T, Capacity> spsc_queue;
+ std::mutex write_mutex;
+};
- static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
+template <typename T, size_t Capacity = detail::DefaultCapacity>
+class MPMCQueue {
+public:
+ template <typename... Args>
+ bool TryEmplace(Args&&... args) {
+ std::scoped_lock lock{write_mutex};
+ return spsc_queue.TryEmplace(std::forward<Args>(args)...);
+ }
+
+ template <typename... Args>
+ void EmplaceWait(Args&&... args) {
+ std::scoped_lock lock{write_mutex};
+ spsc_queue.EmplaceWait(std::forward<Args>(args)...);
+ }
+
+ bool TryPop(T& t) {
+ std::scoped_lock lock{read_mutex};
+ return spsc_queue.TryPop(t);
+ }
+
+ void PopWait(T& t) {
+ std::scoped_lock lock{read_mutex};
+ spsc_queue.PopWait(t);
+ }
+
+ void PopWait(T& t, std::stop_token stop_token) {
+ std::scoped_lock lock{read_mutex};
+ spsc_queue.PopWait(t, stop_token);
+ }
+
+ T PopWait() {
+ std::scoped_lock lock{read_mutex};
+ return spsc_queue.PopWait();
+ }
+
+ T PopWait(std::stop_token stop_token) {
+ std::scoped_lock lock{read_mutex};
+ return spsc_queue.PopWait(stop_token);
+ }
+
+private:
+ SPSCQueue<T, Capacity> spsc_queue;
+ std::mutex write_mutex;
+ std::mutex read_mutex;
};
} // namespace Common
diff --git a/src/common/container_hash.h b/src/common/container_hash.h
new file mode 100644
index 000000000..a5e357745
--- /dev/null
+++ b/src/common/container_hash.h
@@ -0,0 +1,92 @@
+// SPDX-FileCopyrightText: 2005-2014 Daniel James
+// SPDX-FileCopyrightText: 2016 Austin Appleby
+// SPDX-License-Identifier: BSL-1.0
+
+#include <array>
+#include <climits>
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+#include <vector>
+
+namespace Common {
+
+namespace detail {
+
+template <typename T>
+ requires std::is_unsigned_v<T>
+inline std::size_t HashValue(T val) {
+ const unsigned int size_t_bits = std::numeric_limits<std::size_t>::digits;
+ const unsigned int length =
+ (std::numeric_limits<T>::digits - 1) / static_cast<unsigned int>(size_t_bits);
+
+ std::size_t seed = 0;
+
+ for (unsigned int i = length * size_t_bits; i > 0; i -= size_t_bits) {
+ seed ^= static_cast<size_t>(val >> i) + (seed << 6) + (seed >> 2);
+ }
+
+ seed ^= static_cast<size_t>(val) + (seed << 6) + (seed >> 2);
+
+ return seed;
+}
+
+template <size_t Bits>
+struct HashCombineImpl {
+ template <typename T>
+ static inline T fn(T seed, T value) {
+ seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+ return seed;
+ }
+};
+
+template <>
+struct HashCombineImpl<64> {
+ static inline std::uint64_t fn(std::uint64_t h, std::uint64_t k) {
+ const std::uint64_t m = (std::uint64_t(0xc6a4a793) << 32) + 0x5bd1e995;
+ const int r = 47;
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h ^= k;
+ h *= m;
+
+ // Completely arbitrary number, to prevent 0's
+ // from hashing to 0.
+ h += 0xe6546b64;
+
+ return h;
+ }
+};
+
+} // namespace detail
+
+template <typename T>
+inline void HashCombine(std::size_t& seed, const T& v) {
+ seed = detail::HashCombineImpl<sizeof(std::size_t) * CHAR_BIT>::fn(seed, detail::HashValue(v));
+}
+
+template <typename It>
+inline std::size_t HashRange(It first, It last) {
+ std::size_t seed = 0;
+
+ for (; first != last; ++first) {
+ HashCombine<typename std::iterator_traits<It>::value_type>(seed, *first);
+ }
+
+ return seed;
+}
+
+template <typename T, size_t Size>
+std::size_t HashValue(const std::array<T, Size>& v) {
+ return HashRange(v.cbegin(), v.cend());
+}
+
+template <typename T, typename Allocator>
+std::size_t HashValue(const std::vector<T, Allocator>& v) {
+ return HashRange(v.cbegin(), v.cend());
+}
+
+} // namespace Common
diff --git a/src/common/intrusive_red_black_tree.h b/src/common/intrusive_red_black_tree.h
index 5f6b34e82..bc2940fa0 100644
--- a/src/common/intrusive_red_black_tree.h
+++ b/src/common/intrusive_red_black_tree.h
@@ -96,10 +96,6 @@ public:
return m_node == rhs.m_node;
}
- constexpr bool operator!=(const Iterator& rhs) const {
- return !(*this == rhs);
- }
-
constexpr pointer operator->() const {
return m_node;
}
@@ -324,10 +320,6 @@ public:
return m_impl == rhs.m_impl;
}
- constexpr bool operator!=(const Iterator& rhs) const {
- return !(*this == rhs);
- }
-
constexpr pointer operator->() const {
return Traits::GetParent(std::addressof(*m_impl));
}
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 2a3bded40..f96c7c222 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -28,7 +28,7 @@
#ifdef _WIN32
#include "common/string_util.h"
#endif
-#include "common/threadsafe_queue.h"
+#include "common/bounded_threadsafe_queue.h"
namespace Common::Log {
@@ -204,11 +204,11 @@ public:
void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num,
const char* function, std::string&& message) {
- if (!filter.CheckMessage(log_class, log_level))
+ if (!filter.CheckMessage(log_class, log_level)) {
return;
- const Entry& entry =
- CreateEntry(log_class, log_level, filename, line_num, function, std::move(message));
- message_queue.Push(entry);
+ }
+ message_queue.EmplaceWait(
+ CreateEntry(log_class, log_level, filename, line_num, function, std::move(message)));
}
private:
@@ -225,7 +225,7 @@ private:
ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
};
while (!stop_token.stop_requested()) {
- entry = message_queue.PopWait(stop_token);
+ message_queue.PopWait(entry, stop_token);
if (entry.filename != nullptr) {
write_logs();
}
@@ -233,7 +233,7 @@ private:
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a
// case where a system is repeatedly spamming logs even on close.
int max_logs_to_write = filter.IsDebug() ? INT_MAX : 100;
- while (max_logs_to_write-- && message_queue.Pop(entry)) {
+ while (max_logs_to_write-- && message_queue.TryPop(entry)) {
write_logs();
}
});
@@ -273,7 +273,7 @@ private:
ColorConsoleBackend color_console_backend{};
FileBackend file_backend;
- MPSCQueue<Entry, true> message_queue{};
+ MPSCQueue<Entry> message_queue{};
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
std::jthread backend_thread;
};
diff --git a/src/common/range_map.h b/src/common/range_map.h
index 79c7ef547..ab73993e3 100644
--- a/src/common/range_map.h
+++ b/src/common/range_map.h
@@ -38,12 +38,12 @@ public:
Map(address, address_end, null_value);
}
- [[nodiscard]] size_t GetContinousSizeFrom(KeyTBase address) const {
+ [[nodiscard]] size_t GetContinuousSizeFrom(KeyTBase address) const {
const KeyT new_address = static_cast<KeyT>(address);
if (new_address < 0) {
return 0;
}
- return ContinousSizeInternal(new_address);
+ return ContinuousSizeInternal(new_address);
}
[[nodiscard]] ValueT GetValueAt(KeyT address) const {
@@ -59,7 +59,7 @@ private:
using IteratorType = typename MapType::iterator;
using ConstIteratorType = typename MapType::const_iterator;
- size_t ContinousSizeInternal(KeyT address) const {
+ size_t ContinuousSizeInternal(KeyT address) const {
const auto it = GetFirstElementBeforeOrOn(address);
if (it == container.end() || it->second == null_value) {
return 0;
diff --git a/src/common/string_util.cpp b/src/common/string_util.cpp
index e0b6180c5..feab1653d 100644
--- a/src/common/string_util.cpp
+++ b/src/common/string_util.cpp
@@ -125,18 +125,18 @@ std::string ReplaceAll(std::string result, const std::string& src, const std::st
return result;
}
-std::string UTF16ToUTF8(const std::u16string& input) {
+std::string UTF16ToUTF8(std::u16string_view input) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> convert;
- return convert.to_bytes(input);
+ return convert.to_bytes(input.data(), input.data() + input.size());
}
-std::u16string UTF8ToUTF16(const std::string& input) {
+std::u16string UTF8ToUTF16(std::string_view input) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> convert;
- return convert.from_bytes(input);
+ return convert.from_bytes(input.data(), input.data() + input.size());
}
#ifdef _WIN32
-static std::wstring CPToUTF16(u32 code_page, const std::string& input) {
+static std::wstring CPToUTF16(u32 code_page, std::string_view input) {
const auto size =
MultiByteToWideChar(code_page, 0, input.data(), static_cast<int>(input.size()), nullptr, 0);
@@ -154,7 +154,7 @@ static std::wstring CPToUTF16(u32 code_page, const std::string& input) {
return output;
}
-std::string UTF16ToUTF8(const std::wstring& input) {
+std::string UTF16ToUTF8(std::wstring_view input) {
const auto size = WideCharToMultiByte(CP_UTF8, 0, input.data(), static_cast<int>(input.size()),
nullptr, 0, nullptr, nullptr);
if (size == 0) {
@@ -172,7 +172,7 @@ std::string UTF16ToUTF8(const std::wstring& input) {
return output;
}
-std::wstring UTF8ToUTF16W(const std::string& input) {
+std::wstring UTF8ToUTF16W(std::string_view input) {
return CPToUTF16(CP_UTF8, input);
}
diff --git a/src/common/string_util.h b/src/common/string_util.h
index f8aecc875..c351f1a0c 100644
--- a/src/common/string_util.h
+++ b/src/common/string_util.h
@@ -36,12 +36,12 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
[[nodiscard]] std::string ReplaceAll(std::string result, const std::string& src,
const std::string& dest);
-[[nodiscard]] std::string UTF16ToUTF8(const std::u16string& input);
-[[nodiscard]] std::u16string UTF8ToUTF16(const std::string& input);
+[[nodiscard]] std::string UTF16ToUTF8(std::u16string_view input);
+[[nodiscard]] std::u16string UTF8ToUTF16(std::string_view input);
#ifdef _WIN32
-[[nodiscard]] std::string UTF16ToUTF8(const std::wstring& input);
-[[nodiscard]] std::wstring UTF8ToUTF16W(const std::string& str);
+[[nodiscard]] std::string UTF16ToUTF8(std::wstring_view input);
+[[nodiscard]] std::wstring UTF8ToUTF16W(std::string_view str);
#endif
diff --git a/src/common/telemetry.cpp b/src/common/telemetry.cpp
index d26394359..91352912d 100644
--- a/src/common/telemetry.cpp
+++ b/src/common/telemetry.cpp
@@ -97,6 +97,7 @@ void AppendCPUInfo(FieldCollection& fc) {
add_field("CPU_Extension_x64_PCLMULQDQ", caps.pclmulqdq);
add_field("CPU_Extension_x64_POPCNT", caps.popcnt);
add_field("CPU_Extension_x64_SHA", caps.sha);
+ add_field("CPU_Extension_x64_WAITPKG", caps.waitpkg);
#else
fc.AddField(FieldType::UserSystem, "CPU_Model", "Other");
#endif
diff --git a/src/common/typed_address.h b/src/common/typed_address.h
new file mode 100644
index 000000000..64f4a07c2
--- /dev/null
+++ b/src/common/typed_address.h
@@ -0,0 +1,315 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <compare>
+#include <type_traits>
+#include <fmt/format.h>
+
+#include "common/common_types.h"
+
+namespace Common {
+
+template <bool Virtual, typename T>
+class TypedAddress {
+public:
+ // Constructors.
+ constexpr inline TypedAddress() : m_address(0) {}
+ constexpr inline TypedAddress(uint64_t a) : m_address(a) {}
+
+ template <typename U>
+ constexpr inline explicit TypedAddress(const U* ptr)
+ : m_address(reinterpret_cast<uint64_t>(ptr)) {}
+
+ // Copy constructor.
+ constexpr inline TypedAddress(const TypedAddress& rhs) = default;
+
+ // Assignment operator.
+ constexpr inline TypedAddress& operator=(const TypedAddress& rhs) = default;
+
+ // Arithmetic operators.
+ template <typename I>
+ constexpr inline TypedAddress operator+(I rhs) const {
+ static_assert(std::is_integral_v<I>);
+ return m_address + rhs;
+ }
+
+ constexpr inline TypedAddress operator+(TypedAddress rhs) const {
+ return m_address + rhs.m_address;
+ }
+
+ constexpr inline TypedAddress operator++() {
+ return ++m_address;
+ }
+
+ constexpr inline TypedAddress operator++(int) {
+ return m_address++;
+ }
+
+ template <typename I>
+ constexpr inline TypedAddress operator-(I rhs) const {
+ static_assert(std::is_integral_v<I>);
+ return m_address - rhs;
+ }
+
+ constexpr inline ptrdiff_t operator-(TypedAddress rhs) const {
+ return m_address - rhs.m_address;
+ }
+
+ constexpr inline TypedAddress operator--() {
+ return --m_address;
+ }
+
+ constexpr inline TypedAddress operator--(int) {
+ return m_address--;
+ }
+
+ template <typename I>
+ constexpr inline TypedAddress operator+=(I rhs) {
+ static_assert(std::is_integral_v<I>);
+ m_address += rhs;
+ return *this;
+ }
+
+ template <typename I>
+ constexpr inline TypedAddress operator-=(I rhs) {
+ static_assert(std::is_integral_v<I>);
+ m_address -= rhs;
+ return *this;
+ }
+
+ // Logical operators.
+ constexpr inline uint64_t operator&(uint64_t mask) const {
+ return m_address & mask;
+ }
+
+ constexpr inline uint64_t operator|(uint64_t mask) const {
+ return m_address | mask;
+ }
+
+ template <typename I>
+ constexpr inline TypedAddress operator|=(I rhs) {
+ static_assert(std::is_integral_v<I>);
+ m_address |= rhs;
+ return *this;
+ }
+
+ constexpr inline uint64_t operator<<(int shift) const {
+ return m_address << shift;
+ }
+
+ constexpr inline uint64_t operator>>(int shift) const {
+ return m_address >> shift;
+ }
+
+ template <typename U>
+ constexpr inline size_t operator/(U size) const {
+ return m_address / size;
+ }
+
+ constexpr explicit operator bool() const {
+ return m_address != 0;
+ }
+
+ // constexpr inline uint64_t operator%(U align) const { return m_address % align; }
+
+ // Comparison operators.
+ constexpr bool operator==(const TypedAddress&) const = default;
+ constexpr auto operator<=>(const TypedAddress&) const = default;
+
+ // For convenience, also define comparison operators versus uint64_t.
+ constexpr inline bool operator==(uint64_t rhs) const {
+ return m_address == rhs;
+ }
+
+ // Allow getting the address explicitly, for use in accessors.
+ constexpr inline uint64_t GetValue() const {
+ return m_address;
+ }
+
+private:
+ uint64_t m_address{};
+};
+
+struct PhysicalAddressTag {};
+struct VirtualAddressTag {};
+struct ProcessAddressTag {};
+
+using PhysicalAddress = TypedAddress<false, PhysicalAddressTag>;
+using VirtualAddress = TypedAddress<true, VirtualAddressTag>;
+using ProcessAddress = TypedAddress<true, ProcessAddressTag>;
+
+// Define accessors.
+template <typename T>
+concept IsTypedAddress = std::same_as<T, PhysicalAddress> || std::same_as<T, VirtualAddress> ||
+ std::same_as<T, ProcessAddress>;
+
+template <typename T>
+constexpr inline T Null = [] {
+ if constexpr (std::is_same<T, uint64_t>::value) {
+ return 0;
+ } else {
+ static_assert(std::is_same<T, PhysicalAddress>::value ||
+ std::is_same<T, VirtualAddress>::value ||
+ std::is_same<T, ProcessAddress>::value);
+ return T(0);
+ }
+}();
+
+// Basic type validations.
+static_assert(sizeof(PhysicalAddress) == sizeof(uint64_t));
+static_assert(sizeof(VirtualAddress) == sizeof(uint64_t));
+static_assert(sizeof(ProcessAddress) == sizeof(uint64_t));
+
+static_assert(std::is_trivially_copyable_v<PhysicalAddress>);
+static_assert(std::is_trivially_copyable_v<VirtualAddress>);
+static_assert(std::is_trivially_copyable_v<ProcessAddress>);
+
+static_assert(std::is_trivially_copy_constructible_v<PhysicalAddress>);
+static_assert(std::is_trivially_copy_constructible_v<VirtualAddress>);
+static_assert(std::is_trivially_copy_constructible_v<ProcessAddress>);
+
+static_assert(std::is_trivially_move_constructible_v<PhysicalAddress>);
+static_assert(std::is_trivially_move_constructible_v<VirtualAddress>);
+static_assert(std::is_trivially_move_constructible_v<ProcessAddress>);
+
+static_assert(std::is_trivially_copy_assignable_v<PhysicalAddress>);
+static_assert(std::is_trivially_copy_assignable_v<VirtualAddress>);
+static_assert(std::is_trivially_copy_assignable_v<ProcessAddress>);
+
+static_assert(std::is_trivially_move_assignable_v<PhysicalAddress>);
+static_assert(std::is_trivially_move_assignable_v<VirtualAddress>);
+static_assert(std::is_trivially_move_assignable_v<ProcessAddress>);
+
+static_assert(std::is_trivially_destructible_v<PhysicalAddress>);
+static_assert(std::is_trivially_destructible_v<VirtualAddress>);
+static_assert(std::is_trivially_destructible_v<ProcessAddress>);
+
+static_assert(Null<uint64_t> == 0);
+static_assert(Null<PhysicalAddress> == Null<uint64_t>);
+static_assert(Null<VirtualAddress> == Null<uint64_t>);
+static_assert(Null<ProcessAddress> == Null<uint64_t>);
+
+// Constructor/assignment validations.
+static_assert([] {
+ const PhysicalAddress a(5);
+ PhysicalAddress b(a);
+ return b;
+}() == PhysicalAddress(5));
+static_assert([] {
+ const PhysicalAddress a(5);
+ PhysicalAddress b(10);
+ b = a;
+ return b;
+}() == PhysicalAddress(5));
+
+// Arithmetic validations.
+static_assert(PhysicalAddress(10) + 5 == PhysicalAddress(15));
+static_assert(PhysicalAddress(10) - 5 == PhysicalAddress(5));
+static_assert([] {
+ PhysicalAddress v(10);
+ v += 5;
+ return v;
+}() == PhysicalAddress(15));
+static_assert([] {
+ PhysicalAddress v(10);
+ v -= 5;
+ return v;
+}() == PhysicalAddress(5));
+static_assert(PhysicalAddress(10)++ == PhysicalAddress(10));
+static_assert(++PhysicalAddress(10) == PhysicalAddress(11));
+static_assert(PhysicalAddress(10)-- == PhysicalAddress(10));
+static_assert(--PhysicalAddress(10) == PhysicalAddress(9));
+
+// Logical validations.
+static_assert((PhysicalAddress(0b11111111) >> 1) == 0b01111111);
+static_assert((PhysicalAddress(0b10101010) >> 1) == 0b01010101);
+static_assert((PhysicalAddress(0b11111111) << 1) == 0b111111110);
+static_assert((PhysicalAddress(0b01010101) << 1) == 0b10101010);
+static_assert((PhysicalAddress(0b11111111) & 0b01010101) == 0b01010101);
+static_assert((PhysicalAddress(0b11111111) & 0b10101010) == 0b10101010);
+static_assert((PhysicalAddress(0b01010101) & 0b10101010) == 0b00000000);
+static_assert((PhysicalAddress(0b00000000) | 0b01010101) == 0b01010101);
+static_assert((PhysicalAddress(0b11111111) | 0b01010101) == 0b11111111);
+static_assert((PhysicalAddress(0b10101010) | 0b01010101) == 0b11111111);
+
+// Comparisons.
+static_assert(PhysicalAddress(0) == PhysicalAddress(0));
+static_assert(PhysicalAddress(0) != PhysicalAddress(1));
+static_assert(PhysicalAddress(0) < PhysicalAddress(1));
+static_assert(PhysicalAddress(0) <= PhysicalAddress(1));
+static_assert(PhysicalAddress(1) > PhysicalAddress(0));
+static_assert(PhysicalAddress(1) >= PhysicalAddress(0));
+
+static_assert(!(PhysicalAddress(0) == PhysicalAddress(1)));
+static_assert(!(PhysicalAddress(0) != PhysicalAddress(0)));
+static_assert(!(PhysicalAddress(1) < PhysicalAddress(0)));
+static_assert(!(PhysicalAddress(1) <= PhysicalAddress(0)));
+static_assert(!(PhysicalAddress(0) > PhysicalAddress(1)));
+static_assert(!(PhysicalAddress(0) >= PhysicalAddress(1)));
+
+} // namespace Common
+
+template <bool Virtual, typename T>
+constexpr inline uint64_t GetInteger(Common::TypedAddress<Virtual, T> address) {
+ return address.GetValue();
+}
+
+template <>
+struct fmt::formatter<Common::PhysicalAddress> {
+ constexpr auto parse(fmt::format_parse_context& ctx) {
+ return ctx.begin();
+ }
+ template <typename FormatContext>
+ auto format(const Common::PhysicalAddress& addr, FormatContext& ctx) {
+ return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
+ }
+};
+
+template <>
+struct fmt::formatter<Common::ProcessAddress> {
+ constexpr auto parse(fmt::format_parse_context& ctx) {
+ return ctx.begin();
+ }
+ template <typename FormatContext>
+ auto format(const Common::ProcessAddress& addr, FormatContext& ctx) {
+ return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
+ }
+};
+
+template <>
+struct fmt::formatter<Common::VirtualAddress> {
+ constexpr auto parse(fmt::format_parse_context& ctx) {
+ return ctx.begin();
+ }
+ template <typename FormatContext>
+ auto format(const Common::VirtualAddress& addr, FormatContext& ctx) {
+ return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
+ }
+};
+
+namespace std {
+
+template <>
+struct hash<Common::PhysicalAddress> {
+ size_t operator()(const Common::PhysicalAddress& k) const noexcept {
+ return k.GetValue();
+ }
+};
+
+template <>
+struct hash<Common::ProcessAddress> {
+ size_t operator()(const Common::ProcessAddress& k) const noexcept {
+ return k.GetValue();
+ }
+};
+
+template <>
+struct hash<Common::VirtualAddress> {
+ size_t operator()(const Common::VirtualAddress& k) const noexcept {
+ return k.GetValue();
+ }
+};
+
+} // namespace std
diff --git a/src/common/x64/cpu_detect.cpp b/src/common/x64/cpu_detect.cpp
index e54383a4a..72ed6e96c 100644
--- a/src/common/x64/cpu_detect.cpp
+++ b/src/common/x64/cpu_detect.cpp
@@ -144,6 +144,7 @@ static CPUCaps Detect() {
caps.bmi2 = Common::Bit<8>(cpu_id[1]);
caps.sha = Common::Bit<29>(cpu_id[1]);
+ caps.waitpkg = Common::Bit<5>(cpu_id[2]);
caps.gfni = Common::Bit<8>(cpu_id[2]);
__cpuidex(cpu_id, 0x00000007, 0x00000001);
diff --git a/src/common/x64/cpu_detect.h b/src/common/x64/cpu_detect.h
index ca8db19d6..8253944d6 100644
--- a/src/common/x64/cpu_detect.h
+++ b/src/common/x64/cpu_detect.h
@@ -67,6 +67,7 @@ struct CPUCaps {
bool pclmulqdq : 1;
bool popcnt : 1;
bool sha : 1;
+ bool waitpkg : 1;
};
/**
diff --git a/src/common/x64/cpu_wait.cpp b/src/common/x64/cpu_wait.cpp
new file mode 100644
index 000000000..cfeef6a3d
--- /dev/null
+++ b/src/common/x64/cpu_wait.cpp
@@ -0,0 +1,69 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <thread>
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#include "common/x64/cpu_detect.h"
+#include "common/x64/cpu_wait.h"
+
+namespace Common::X64 {
+
+#ifdef _MSC_VER
+__forceinline static u64 FencedRDTSC() {
+ _mm_lfence();
+ _ReadWriteBarrier();
+ const u64 result = __rdtsc();
+ _mm_lfence();
+ _ReadWriteBarrier();
+ return result;
+}
+
+__forceinline static void TPAUSE() {
+ // 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
+ // For reference:
+ // At 1 GHz, 100K cycles is 100us
+ // At 2 GHz, 100K cycles is 50us
+ // At 4 GHz, 100K cycles is 25us
+ static constexpr auto PauseCycles = 100'000;
+ _tpause(0, FencedRDTSC() + PauseCycles);
+}
+#else
+static u64 FencedRDTSC() {
+ u64 eax;
+ u64 edx;
+ asm volatile("lfence\n\t"
+ "rdtsc\n\t"
+ "lfence\n\t"
+ : "=a"(eax), "=d"(edx));
+ return (edx << 32) | eax;
+}
+
+static void TPAUSE() {
+ // 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
+ // For reference:
+ // At 1 GHz, 100K cycles is 100us
+ // At 2 GHz, 100K cycles is 50us
+ // At 4 GHz, 100K cycles is 25us
+ static constexpr auto PauseCycles = 100'000;
+ const auto tsc = FencedRDTSC() + PauseCycles;
+ const auto eax = static_cast<u32>(tsc & 0xFFFFFFFF);
+ const auto edx = static_cast<u32>(tsc >> 32);
+ asm volatile("tpause %0" : : "r"(0), "d"(edx), "a"(eax));
+}
+#endif
+
+void MicroSleep() {
+ static const bool has_waitpkg = GetCPUCaps().waitpkg;
+
+ if (has_waitpkg) {
+ TPAUSE();
+ } else {
+ std::this_thread::yield();
+ }
+}
+
+} // namespace Common::X64
diff --git a/src/common/x64/cpu_wait.h b/src/common/x64/cpu_wait.h
new file mode 100644
index 000000000..99d3757a7
--- /dev/null
+++ b/src/common/x64/cpu_wait.h
@@ -0,0 +1,10 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+namespace Common::X64 {
+
+void MicroSleep();
+
+} // namespace Common::X64
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp
index 76c66e7ee..277b00662 100644
--- a/src/common/x64/native_clock.cpp
+++ b/src/common/x64/native_clock.cpp
@@ -27,16 +27,13 @@ __forceinline static u64 FencedRDTSC() {
}
#else
static u64 FencedRDTSC() {
- u64 result;
+ u64 eax;
+ u64 edx;
asm volatile("lfence\n\t"
"rdtsc\n\t"
- "shl $32, %%rdx\n\t"
- "or %%rdx, %0\n\t"
- "lfence"
- : "=a"(result)
- :
- : "rdx", "memory", "cc");
- return result;
+ "lfence\n\t"
+ : "=a"(eax), "=d"(edx));
+ return (edx << 32) | eax;
}
#endif
diff --git a/src/common/zstd_compression.cpp b/src/common/zstd_compression.cpp
index b71a41b78..cb6ec171b 100644
--- a/src/common/zstd_compression.cpp
+++ b/src/common/zstd_compression.cpp
@@ -33,7 +33,7 @@ std::vector<u8> CompressDataZSTDDefault(const u8* source, std::size_t source_siz
std::vector<u8> DecompressDataZSTD(std::span<const u8> compressed) {
const std::size_t decompressed_size =
- ZSTD_getDecompressedSize(compressed.data(), compressed.size());
+ ZSTD_getFrameContentSize(compressed.data(), compressed.size());
std::vector<u8> decompressed(decompressed_size);
const std::size_t uncompressed_result_size = ZSTD_decompress(
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 378e6c023..4e677f287 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -278,6 +278,7 @@ add_library(core STATIC
hle/kernel/k_trace.h
hle/kernel/k_transfer_memory.cpp
hle/kernel/k_transfer_memory.h
+ hle/kernel/k_typed_address.h
hle/kernel/k_worker_task.h
hle/kernel/k_worker_task_manager.cpp
hle/kernel/k_worker_task_manager.h
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 4a331d4c1..d30914b7a 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -44,7 +44,7 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
std::map<std::string, Symbols::Symbols> symbols;
for (const auto& module : modules) {
symbols.insert_or_assign(
- module.second, Symbols::GetSymbols(module.first, system.Memory(),
+ module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(),
system.ApplicationProcess()->Is64BitProcess()));
}
@@ -168,21 +168,21 @@ void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
}
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
- VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const {
+ u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const {
if (!watchpoints) {
return nullptr;
}
- const VAddr start_address{addr};
- const VAddr end_address{addr + size};
+ const u64 start_address{addr};
+ const u64 end_address{addr + size};
for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
const auto& watch{(*watchpoints)[i]};
- if (end_address <= watch.start_address) {
+ if (end_address <= GetInteger(watch.start_address)) {
continue;
}
- if (start_address >= watch.end_address) {
+ if (start_address >= GetInteger(watch.end_address)) {
continue;
}
if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) {
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index c40771c97..8e40702cc 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -78,7 +78,7 @@ public:
* @param addr Start address of the cache range to clear
* @param size Size of the cache range to clear, starting at addr
*/
- virtual void InvalidateCacheRange(VAddr addr, std::size_t size) = 0;
+ virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
/**
* Notifies CPU emulation that the current page table has changed.
@@ -149,9 +149,9 @@ public:
*/
virtual void SetPSTATE(u32 pstate) = 0;
- virtual VAddr GetTlsAddress() const = 0;
+ virtual u64 GetTlsAddress() const = 0;
- virtual void SetTlsAddress(VAddr address) = 0;
+ virtual void SetTlsAddress(u64 address) = 0;
/**
* Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
@@ -214,7 +214,7 @@ protected:
static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
const Kernel::DebugWatchpoint* MatchingWatchpoint(
- VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const;
+ u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
virtual Dynarmic::HaltReason RunJit() = 0;
virtual Dynarmic::HaltReason StepJit() = 0;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 2a7570073..dfdcbe35a 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -5,7 +5,6 @@
#include <memory>
#include <dynarmic/interface/A32/a32.h>
#include <dynarmic/interface/A32/config.h>
-#include <dynarmic/interface/A32/context.h>
#include "common/assert.h"
#include "common/literals.h"
#include "common/logging/log.h"
@@ -28,8 +27,8 @@ using namespace Common::Literals;
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_)
- : parent{parent_},
- memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()},
+ : parent{parent_}, memory(parent.system.ApplicationMemory()),
+ debugger_enabled{parent.system.DebuggerEnabled()},
check_memory_access{debugger_enabled ||
!Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
@@ -155,7 +154,7 @@ public:
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
- bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
+ bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!check_memory_access) {
return true;
}
@@ -397,7 +396,7 @@ u64 ARM_Dynarmic_32::GetTlsAddress() const {
return cp15->uro;
}
-void ARM_Dynarmic_32::SetTlsAddress(VAddr address) {
+void ARM_Dynarmic_32::SetTlsAddress(u64 address) {
cp15->uro = static_cast<u32>(address);
}
@@ -410,21 +409,19 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
}
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
- Dynarmic::A32::Context context;
- jit.load()->SaveContext(context);
- ctx.cpu_registers = context.Regs();
- ctx.extension_registers = context.ExtRegs();
- ctx.cpsr = context.Cpsr();
- ctx.fpscr = context.Fpscr();
+ Dynarmic::A32::Jit* j = jit.load();
+ ctx.cpu_registers = j->Regs();
+ ctx.extension_registers = j->ExtRegs();
+ ctx.cpsr = j->Cpsr();
+ ctx.fpscr = j->Fpscr();
}
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
- Dynarmic::A32::Context context;
- context.Regs() = ctx.cpu_registers;
- context.ExtRegs() = ctx.extension_registers;
- context.SetCpsr(ctx.cpsr);
- context.SetFpscr(ctx.fpscr);
- jit.load()->LoadContext(context);
+ Dynarmic::A32::Jit* j = jit.load();
+ j->Regs() = ctx.cpu_registers;
+ j->ExtRegs() = ctx.extension_registers;
+ j->SetCpsr(ctx.cpsr);
+ j->SetFpscr(ctx.fpscr);
}
void ARM_Dynarmic_32::SignalInterrupt() {
@@ -439,7 +436,7 @@ void ARM_Dynarmic_32::ClearInstructionCache() {
jit.load()->ClearCache();
}
-void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
+void ARM_Dynarmic_32::InvalidateCacheRange(u64 addr, std::size_t size) {
jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size);
}
@@ -468,7 +465,7 @@ void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::System& system,
u64 fp, u64 lr, u64 pc) {
std::vector<BacktraceEntry> out;
- auto& memory = system.Memory();
+ auto& memory = system.ApplicationMemory();
out.push_back({"", 0, pc, 0, ""});
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index d24ba2289..bce695daf 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -41,8 +41,8 @@ public:
void SetVectorReg(int index, u128 value) override;
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
- VAddr GetTlsAddress() const override;
- void SetTlsAddress(VAddr address) override;
+ u64 GetTlsAddress() const override;
+ void SetTlsAddress(u64 address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
@@ -60,7 +60,7 @@ public:
void ClearExclusiveState() override;
void ClearInstructionCache() override;
- void InvalidateCacheRange(VAddr addr, std::size_t size) override;
+ void InvalidateCacheRange(u64 addr, std::size_t size) override;
void PageTableChanged(Common::PageTable& new_page_table,
std::size_t new_address_space_size_in_bits) override;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 7229fdc2a..bbbcb4f9d 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -28,8 +28,8 @@ using namespace Common::Literals;
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
public:
explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_)
- : parent{parent_},
- memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()},
+ : parent{parent_}, memory(parent.system.ApplicationMemory()),
+ debugger_enabled{parent.system.DebuggerEnabled()},
check_memory_access{debugger_enabled ||
!Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
@@ -117,7 +117,7 @@ public:
}
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
- VAddr value) override {
+ u64 value) override {
switch (op) {
case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: {
static constexpr u64 ICACHE_LINE_SIZE = 64;
@@ -199,7 +199,7 @@ public:
return parent.system.CoreTiming().GetClockTicks();
}
- bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
+ bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!check_memory_access) {
return true;
}
@@ -452,7 +452,7 @@ u64 ARM_Dynarmic_64::GetTlsAddress() const {
return cb->tpidrro_el0;
}
-void ARM_Dynarmic_64::SetTlsAddress(VAddr address) {
+void ARM_Dynarmic_64::SetTlsAddress(u64 address) {
cb->tpidrro_el0 = address;
}
@@ -500,7 +500,7 @@ void ARM_Dynarmic_64::ClearInstructionCache() {
jit.load()->ClearCache();
}
-void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
+void ARM_Dynarmic_64::InvalidateCacheRange(u64 addr, std::size_t size) {
jit.load()->InvalidateCacheRange(addr, size);
}
@@ -529,7 +529,7 @@ void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::System& system,
u64 fp, u64 lr, u64 pc) {
std::vector<BacktraceEntry> out;
- auto& memory = system.Memory();
+ auto& memory = system.ApplicationMemory();
out.push_back({"", 0, pc, 0, ""});
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index ed1a5eb96..e83599e82 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -38,8 +38,8 @@ public:
void SetVectorReg(int index, u128 value) override;
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
- VAddr GetTlsAddress() const override;
- void SetTlsAddress(VAddr address) override;
+ u64 GetTlsAddress() const override;
+ void SetTlsAddress(u64 address) override;
void SetTPIDR_EL0(u64 value) override;
u64 GetTPIDR_EL0() const override;
@@ -53,7 +53,7 @@ public:
void ClearExclusiveState() override;
void ClearInstructionCache() override;
- void InvalidateCacheRange(VAddr addr, std::size_t size) override;
+ void InvalidateCacheRange(u64 addr, std::size_t size) override;
void PageTableChanged(Common::PageTable& new_page_table,
std::size_t new_address_space_size_in_bits) override;
diff --git a/src/core/core.cpp b/src/core/core.cpp
index d2b597068..caa6a77be 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -293,6 +293,7 @@ struct System::Impl {
ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
Kernel::KProcess::ProcessType::Userland, resource_limit)
.IsSuccess());
+ kernel.MakeApplicationProcess(main_process);
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
@@ -302,7 +303,6 @@ struct System::Impl {
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
}
AddGlueRegistrationForProcess(*app_loader, *main_process);
- kernel.MakeApplicationProcess(main_process);
kernel.InitializeCores();
// Initialize cheat engine
@@ -564,7 +564,7 @@ void System::InvalidateCpuInstructionCaches() {
impl->kernel.InvalidateAllInstructionCaches();
}
-void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
+void System::InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size) {
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
}
@@ -681,11 +681,11 @@ const ExclusiveMonitor& System::Monitor() const {
return impl->kernel.GetExclusiveMonitor();
}
-Memory::Memory& System::Memory() {
+Memory::Memory& System::ApplicationMemory() {
return impl->memory;
}
-const Core::Memory::Memory& System::Memory() const {
+const Core::Memory::Memory& System::ApplicationMemory() const {
return impl->memory;
}
@@ -794,7 +794,7 @@ FileSys::VirtualFilesystem System::GetFilesystem() const {
}
void System::RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
- const std::array<u8, 32>& build_id, VAddr main_region_begin,
+ const std::array<u8, 32>& build_id, u64 main_region_begin,
u64 main_region_size) {
impl->cheat_engine = std::make_unique<Memory::CheatEngine>(*this, list, build_id);
impl->cheat_engine->SetMainMemoryParameters(main_region_begin, main_region_size);
diff --git a/src/core/core.h b/src/core/core.h
index 5843696d4..4a5aba032 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -172,7 +172,7 @@ public:
*/
void InvalidateCpuInstructionCaches();
- void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
+ void InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size);
/// Shutdown the main emulated process.
void ShutdownMainProcess();
@@ -256,10 +256,10 @@ public:
[[nodiscard]] const ExclusiveMonitor& Monitor() const;
/// Gets a mutable reference to the system memory instance.
- [[nodiscard]] Core::Memory::Memory& Memory();
+ [[nodiscard]] Core::Memory::Memory& ApplicationMemory();
/// Gets a constant reference to the system memory instance.
- [[nodiscard]] const Core::Memory::Memory& Memory() const;
+ [[nodiscard]] const Core::Memory::Memory& ApplicationMemory() const;
/// Gets a mutable reference to the GPU interface
[[nodiscard]] Tegra::GPU& GPU();
@@ -353,7 +353,7 @@ public:
[[nodiscard]] FileSys::VirtualFilesystem GetFilesystem() const;
void RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
- const std::array<u8, 0x20>& build_id, VAddr main_region_begin,
+ const std::array<u8, 0x20>& build_id, u64 main_region_begin,
u64 main_region_size);
void SetAppletFrontendSet(Service::AM::Applets::AppletFrontendSet&& set);
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index cd4df4522..4f2692b05 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -10,6 +10,10 @@
#include "common/windows/timer_resolution.h"
#endif
+#ifdef ARCHITECTURE_x86_64
+#include "common/x64/cpu_wait.h"
+#endif
+
#include "common/microprofile.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
@@ -269,7 +273,11 @@ void CoreTiming::ThreadLoop() {
if (wait_time >= timer_resolution_ns) {
Common::Windows::SleepForOneTick();
} else {
+#ifdef ARCHITECTURE_x86_64
+ Common::X64::MicroSleep();
+#else
std::this_thread::yield();
+#endif
}
}
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index b2fe6bd7d..e2a13bbd2 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -118,14 +118,14 @@ void GDBStub::Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint&
switch (watch.type) {
case Kernel::DebugWatchpointType::Read:
- SendReply(fmt::format("{}rwatch:{:x};", status, watch.start_address));
+ SendReply(fmt::format("{}rwatch:{:x};", status, GetInteger(watch.start_address)));
break;
case Kernel::DebugWatchpointType::Write:
- SendReply(fmt::format("{}watch:{:x};", status, watch.start_address));
+ SendReply(fmt::format("{}watch:{:x};", status, GetInteger(watch.start_address)));
break;
case Kernel::DebugWatchpointType::ReadOrWrite:
default:
- SendReply(fmt::format("{}awatch:{:x};", status, watch.start_address));
+ SendReply(fmt::format("{}awatch:{:x};", status, GetInteger(watch.start_address)));
break;
}
}
@@ -261,9 +261,9 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
const size_t addr{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + sep, nullptr, 16))};
- if (system.Memory().IsValidVirtualAddressRange(addr, size)) {
+ if (system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
std::vector<u8> mem(size);
- system.Memory().ReadBlock(addr, mem.data(), size);
+ system.ApplicationMemory().ReadBlock(addr, mem.data(), size);
SendReply(Common::HexToString(mem));
} else {
@@ -281,8 +281,8 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
const auto mem_substr{std::string_view(command).substr(mem_sep)};
const auto mem{Common::HexStringToVector(mem_substr, false)};
- if (system.Memory().IsValidVirtualAddressRange(addr, size)) {
- system.Memory().WriteBlock(addr, mem.data(), size);
+ if (system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
+ system.ApplicationMemory().WriteBlock(addr, mem.data(), size);
system.InvalidateCpuInstructionCacheRange(addr, size);
SendReply(GDB_STUB_REPLY_OK);
} else {
@@ -325,7 +325,7 @@ void GDBStub::HandleBreakpointInsert(std::string_view command) {
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
- if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
+ if (!system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
SendReply(GDB_STUB_REPLY_ERR);
return;
}
@@ -334,22 +334,22 @@ void GDBStub::HandleBreakpointInsert(std::string_view command) {
switch (type) {
case BreakpointType::Software:
- replaced_instructions[addr] = system.Memory().Read32(addr);
- system.Memory().Write32(addr, arch->BreakpointInstruction());
+ replaced_instructions[addr] = system.ApplicationMemory().Read32(addr);
+ system.ApplicationMemory().Write32(addr, arch->BreakpointInstruction());
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
success = true;
break;
case BreakpointType::WriteWatch:
- success = system.ApplicationProcess()->InsertWatchpoint(system, addr, size,
+ success = system.ApplicationProcess()->InsertWatchpoint(addr, size,
Kernel::DebugWatchpointType::Write);
break;
case BreakpointType::ReadWatch:
- success = system.ApplicationProcess()->InsertWatchpoint(system, addr, size,
+ success = system.ApplicationProcess()->InsertWatchpoint(addr, size,
Kernel::DebugWatchpointType::Read);
break;
case BreakpointType::AccessWatch:
success = system.ApplicationProcess()->InsertWatchpoint(
- system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
+ addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
break;
case BreakpointType::Hardware:
default:
@@ -372,7 +372,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
- if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
+ if (!system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
SendReply(GDB_STUB_REPLY_ERR);
return;
}
@@ -383,7 +383,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
case BreakpointType::Software: {
const auto orig_insn{replaced_instructions.find(addr)};
if (orig_insn != replaced_instructions.end()) {
- system.Memory().Write32(addr, orig_insn->second);
+ system.ApplicationMemory().Write32(addr, orig_insn->second);
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
replaced_instructions.erase(addr);
success = true;
@@ -391,16 +391,16 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
break;
}
case BreakpointType::WriteWatch:
- success = system.ApplicationProcess()->RemoveWatchpoint(system, addr, size,
+ success = system.ApplicationProcess()->RemoveWatchpoint(addr, size,
Kernel::DebugWatchpointType::Write);
break;
case BreakpointType::ReadWatch:
- success = system.ApplicationProcess()->RemoveWatchpoint(system, addr, size,
+ success = system.ApplicationProcess()->RemoveWatchpoint(addr, size,
Kernel::DebugWatchpointType::Read);
break;
case BreakpointType::AccessWatch:
success = system.ApplicationProcess()->RemoveWatchpoint(
- system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
+ addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
break;
case BreakpointType::Hardware:
default:
@@ -483,9 +483,9 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
static std::optional<std::string> GetThreadName(Core::System& system,
const Kernel::KThread* thread) {
if (system.ApplicationProcess()->Is64BitProcess()) {
- return GetNameFromThreadType64(system.Memory(), thread);
+ return GetNameFromThreadType64(system.ApplicationMemory(), thread);
} else {
- return GetNameFromThreadType32(system.Memory(), thread);
+ return GetNameFromThreadType32(system.ApplicationMemory(), thread);
}
}
@@ -554,8 +554,9 @@ void GDBStub::HandleQuery(std::string_view command) {
if (main != modules.end()) {
SendReply(fmt::format("TextSeg={:x}", main->first));
} else {
- SendReply(fmt::format("TextSeg={:x}",
- system.ApplicationProcess()->PageTable().GetCodeRegionStart()));
+ SendReply(fmt::format(
+ "TextSeg={:x}",
+ GetInteger(system.ApplicationProcess()->PageTable().GetCodeRegionStart())));
}
} else if (command.starts_with("Xfer:libraries:read::")) {
Loader::AppLoader::Modules modules;
@@ -757,17 +758,20 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
reply = fmt::format("Process: {:#x} ({})\n"
"Program Id: {:#018x}\n",
process->GetProcessId(), process->GetName(), process->GetProgramId());
- reply +=
- fmt::format("Layout:\n"
- " Alias: {:#012x} - {:#012x}\n"
- " Heap: {:#012x} - {:#012x}\n"
- " Aslr: {:#012x} - {:#012x}\n"
- " Stack: {:#012x} - {:#012x}\n"
- "Modules:\n",
- page_table.GetAliasRegionStart(), page_table.GetAliasRegionEnd(),
- page_table.GetHeapRegionStart(), page_table.GetHeapRegionEnd(),
- page_table.GetAliasCodeRegionStart(), page_table.GetAliasCodeRegionEnd(),
- page_table.GetStackRegionStart(), page_table.GetStackRegionEnd());
+ reply += fmt::format("Layout:\n"
+ " Alias: {:#012x} - {:#012x}\n"
+ " Heap: {:#012x} - {:#012x}\n"
+ " Aslr: {:#012x} - {:#012x}\n"
+ " Stack: {:#012x} - {:#012x}\n"
+ "Modules:\n",
+ GetInteger(page_table.GetAliasRegionStart()),
+ GetInteger(page_table.GetAliasRegionEnd()),
+ GetInteger(page_table.GetHeapRegionStart()),
+ GetInteger(page_table.GetHeapRegionEnd()),
+ GetInteger(page_table.GetAliasCodeRegionStart()),
+ GetInteger(page_table.GetAliasCodeRegionEnd()),
+ GetInteger(page_table.GetStackRegionStart()),
+ GetInteger(page_table.GetStackRegionEnd()));
for (const auto& [vaddr, name] : modules) {
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index 90510733c..13388b73e 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -3,8 +3,8 @@
#pragma once
-#include "common/common_types.h"
#include "common/host_memory.h"
+#include "common/typed_address.h"
namespace Core {
@@ -25,20 +25,22 @@ public:
DeviceMemory(const DeviceMemory&) = delete;
template <typename T>
- PAddr GetPhysicalAddr(const T* ptr) const {
+ Common::PhysicalAddress GetPhysicalAddr(const T* ptr) const {
return (reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) +
DramMemoryMap::Base;
}
template <typename T>
- T* GetPointer(PAddr addr) {
- return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
+ T* GetPointer(Common::PhysicalAddress addr) {
+ return reinterpret_cast<T*>(buffer.BackingBasePointer() +
+ (GetInteger(addr) - DramMemoryMap::Base));
}
template <typename T>
- const T* GetPointer(PAddr addr) const {
- return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
+ const T* GetPointer(Common::PhysicalAddress addr) const {
+ return reinterpret_cast<T*>(buffer.BackingBasePointer() +
+ (GetInteger(addr) - DramMemoryMap::Base));
}
Common::HostMemory buffer;
diff --git a/src/core/frontend/applets/applet.h b/src/core/frontend/applets/applet.h
new file mode 100644
index 000000000..77fffe306
--- /dev/null
+++ b/src/core/frontend/applets/applet.h
@@ -0,0 +1,14 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+namespace Core::Frontend {
+
+class Applet {
+public:
+ virtual ~Applet() = default;
+ virtual void Close() const = 0;
+};
+
+} // namespace Core::Frontend
diff --git a/src/core/frontend/applets/cabinet.cpp b/src/core/frontend/applets/cabinet.cpp
index 26c7fefe3..2d501eeae 100644
--- a/src/core/frontend/applets/cabinet.cpp
+++ b/src/core/frontend/applets/cabinet.cpp
@@ -10,6 +10,8 @@ namespace Core::Frontend {
CabinetApplet::~CabinetApplet() = default;
+void DefaultCabinetApplet::Close() const {}
+
void DefaultCabinetApplet::ShowCabinetApplet(
const CabinetCallback& callback, const CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const {
diff --git a/src/core/frontend/applets/cabinet.h b/src/core/frontend/applets/cabinet.h
index c28a235c1..74dc5a4f6 100644
--- a/src/core/frontend/applets/cabinet.h
+++ b/src/core/frontend/applets/cabinet.h
@@ -4,6 +4,7 @@
#pragma once
#include <functional>
+#include "core/frontend/applets/applet.h"
#include "core/hle/service/nfp/nfp_types.h"
namespace Service::NFP {
@@ -20,7 +21,7 @@ struct CabinetParameters {
using CabinetCallback = std::function<void(bool, const std::string&)>;
-class CabinetApplet {
+class CabinetApplet : public Applet {
public:
virtual ~CabinetApplet();
virtual void ShowCabinetApplet(const CabinetCallback& callback,
@@ -30,6 +31,7 @@ public:
class DefaultCabinetApplet final : public CabinetApplet {
public:
+ void Close() const override;
void ShowCabinetApplet(const CabinetCallback& callback, const CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const override;
};
diff --git a/src/core/frontend/applets/controller.cpp b/src/core/frontend/applets/controller.cpp
index 52919484e..3300d4f79 100644
--- a/src/core/frontend/applets/controller.cpp
+++ b/src/core/frontend/applets/controller.cpp
@@ -16,6 +16,8 @@ DefaultControllerApplet::DefaultControllerApplet(HID::HIDCore& hid_core_) : hid_
DefaultControllerApplet::~DefaultControllerApplet() = default;
+void DefaultControllerApplet::Close() const {}
+
void DefaultControllerApplet::ReconfigureControllers(ReconfigureCallback callback,
const ControllerParameters& parameters) const {
LOG_INFO(Service_HID, "called, deducing the best configuration based on the given parameters!");
@@ -69,7 +71,7 @@ void DefaultControllerApplet::ReconfigureControllers(ReconfigureCallback callbac
}
}
- callback();
+ callback(true);
}
} // namespace Core::Frontend
diff --git a/src/core/frontend/applets/controller.h b/src/core/frontend/applets/controller.h
index adb2feefd..19a2db6bf 100644
--- a/src/core/frontend/applets/controller.h
+++ b/src/core/frontend/applets/controller.h
@@ -7,6 +7,7 @@
#include <vector>
#include "common/common_types.h"
+#include "core/frontend/applets/applet.h"
namespace Core::HID {
class HIDCore;
@@ -34,9 +35,9 @@ struct ControllerParameters {
bool allow_gamecube_controller{};
};
-class ControllerApplet {
+class ControllerApplet : public Applet {
public:
- using ReconfigureCallback = std::function<void()>;
+ using ReconfigureCallback = std::function<void(bool)>;
virtual ~ControllerApplet();
@@ -49,6 +50,7 @@ public:
explicit DefaultControllerApplet(HID::HIDCore& hid_core_);
~DefaultControllerApplet() override;
+ void Close() const override;
void ReconfigureControllers(ReconfigureCallback callback,
const ControllerParameters& parameters) const override;
diff --git a/src/core/frontend/applets/error.cpp b/src/core/frontend/applets/error.cpp
index 69c2b2b4d..2e6f7a3d9 100644
--- a/src/core/frontend/applets/error.cpp
+++ b/src/core/frontend/applets/error.cpp
@@ -8,6 +8,8 @@ namespace Core::Frontend {
ErrorApplet::~ErrorApplet() = default;
+void DefaultErrorApplet::Close() const {}
+
void DefaultErrorApplet::ShowError(Result error, FinishedCallback finished) const {
LOG_CRITICAL(Service_Fatal, "Application requested error display: {:04}-{:04} (raw={:08X})",
error.module.Value(), error.description.Value(), error.raw);
diff --git a/src/core/frontend/applets/error.h b/src/core/frontend/applets/error.h
index 884f2f653..3a12196ce 100644
--- a/src/core/frontend/applets/error.h
+++ b/src/core/frontend/applets/error.h
@@ -6,11 +6,12 @@
#include <chrono>
#include <functional>
+#include "core/frontend/applets/applet.h"
#include "core/hle/result.h"
namespace Core::Frontend {
-class ErrorApplet {
+class ErrorApplet : public Applet {
public:
using FinishedCallback = std::function<void()>;
@@ -28,6 +29,7 @@ public:
class DefaultErrorApplet final : public ErrorApplet {
public:
+ void Close() const override;
void ShowError(Result error, FinishedCallback finished) const override;
void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
FinishedCallback finished) const override;
diff --git a/src/core/frontend/applets/general_frontend.cpp b/src/core/frontend/applets/general_frontend.cpp
index 29a00fb6f..b4b213a31 100644
--- a/src/core/frontend/applets/general_frontend.cpp
+++ b/src/core/frontend/applets/general_frontend.cpp
@@ -10,6 +10,8 @@ ParentalControlsApplet::~ParentalControlsApplet() = default;
DefaultParentalControlsApplet::~DefaultParentalControlsApplet() = default;
+void DefaultParentalControlsApplet::Close() const {}
+
void DefaultParentalControlsApplet::VerifyPIN(std::function<void(bool)> finished,
bool suspend_future_verification_temporarily) {
LOG_INFO(Service_AM,
@@ -39,6 +41,8 @@ PhotoViewerApplet::~PhotoViewerApplet() = default;
DefaultPhotoViewerApplet::~DefaultPhotoViewerApplet() = default;
+void DefaultPhotoViewerApplet::Close() const {}
+
void DefaultPhotoViewerApplet::ShowPhotosForApplication(u64 title_id,
std::function<void()> finished) const {
LOG_INFO(Service_AM,
diff --git a/src/core/frontend/applets/general_frontend.h b/src/core/frontend/applets/general_frontend.h
index cbec8b4ad..319838ac7 100644
--- a/src/core/frontend/applets/general_frontend.h
+++ b/src/core/frontend/applets/general_frontend.h
@@ -6,9 +6,11 @@
#include <functional>
#include "common/common_types.h"
+#include "core/frontend/applets/applet.h"
+
namespace Core::Frontend {
-class ParentalControlsApplet {
+class ParentalControlsApplet : public Applet {
public:
virtual ~ParentalControlsApplet();
@@ -33,6 +35,7 @@ class DefaultParentalControlsApplet final : public ParentalControlsApplet {
public:
~DefaultParentalControlsApplet() override;
+ void Close() const override;
void VerifyPIN(std::function<void(bool)> finished,
bool suspend_future_verification_temporarily) override;
void VerifyPINForSettings(std::function<void(bool)> finished) override;
@@ -40,7 +43,7 @@ public:
void ChangePIN(std::function<void()> finished) override;
};
-class PhotoViewerApplet {
+class PhotoViewerApplet : public Applet {
public:
virtual ~PhotoViewerApplet();
@@ -52,6 +55,7 @@ class DefaultPhotoViewerApplet final : public PhotoViewerApplet {
public:
~DefaultPhotoViewerApplet() override;
+ void Close() const override;
void ShowPhotosForApplication(u64 title_id, std::function<void()> finished) const override;
void ShowAllPhotos(std::function<void()> finished) const override;
};
diff --git a/src/core/frontend/applets/mii_edit.cpp b/src/core/frontend/applets/mii_edit.cpp
index bc8c57067..2988c3e72 100644
--- a/src/core/frontend/applets/mii_edit.cpp
+++ b/src/core/frontend/applets/mii_edit.cpp
@@ -8,6 +8,8 @@ namespace Core::Frontend {
MiiEditApplet::~MiiEditApplet() = default;
+void DefaultMiiEditApplet::Close() const {}
+
void DefaultMiiEditApplet::ShowMiiEdit(const MiiEditCallback& callback) const {
LOG_WARNING(Service_AM, "(STUBBED) called");
diff --git a/src/core/frontend/applets/mii_edit.h b/src/core/frontend/applets/mii_edit.h
index d828f06ec..9d86ee658 100644
--- a/src/core/frontend/applets/mii_edit.h
+++ b/src/core/frontend/applets/mii_edit.h
@@ -5,9 +5,11 @@
#include <functional>
+#include "core/frontend/applets/applet.h"
+
namespace Core::Frontend {
-class MiiEditApplet {
+class MiiEditApplet : public Applet {
public:
using MiiEditCallback = std::function<void()>;
@@ -18,6 +20,7 @@ public:
class DefaultMiiEditApplet final : public MiiEditApplet {
public:
+ void Close() const override;
void ShowMiiEdit(const MiiEditCallback& callback) const override;
};
diff --git a/src/core/frontend/applets/profile_select.cpp b/src/core/frontend/applets/profile_select.cpp
index da4cfbf87..c18f17a36 100644
--- a/src/core/frontend/applets/profile_select.cpp
+++ b/src/core/frontend/applets/profile_select.cpp
@@ -9,7 +9,10 @@ namespace Core::Frontend {
ProfileSelectApplet::~ProfileSelectApplet() = default;
-void DefaultProfileSelectApplet::SelectProfile(SelectProfileCallback callback) const {
+void DefaultProfileSelectApplet::Close() const {}
+
+void DefaultProfileSelectApplet::SelectProfile(SelectProfileCallback callback,
+ const ProfileSelectParameters& parameters) const {
Service::Account::ProfileManager manager;
callback(manager.GetUser(Settings::values.current_user.GetValue()).value_or(Common::UUID{}));
LOG_INFO(Service_ACC, "called, selecting current user instead of prompting...");
diff --git a/src/core/frontend/applets/profile_select.h b/src/core/frontend/applets/profile_select.h
index 138429533..92e2737ea 100644
--- a/src/core/frontend/applets/profile_select.h
+++ b/src/core/frontend/applets/profile_select.h
@@ -5,22 +5,35 @@
#include <functional>
#include <optional>
+
#include "common/uuid.h"
+#include "core/frontend/applets/applet.h"
+#include "core/hle/service/am/applets/applet_profile_select.h"
namespace Core::Frontend {
-class ProfileSelectApplet {
+struct ProfileSelectParameters {
+ Service::AM::Applets::UiMode mode;
+ std::array<Common::UUID, 8> invalid_uid_list;
+ Service::AM::Applets::UiSettingsDisplayOptions display_options;
+ Service::AM::Applets::UserSelectionPurpose purpose;
+};
+
+class ProfileSelectApplet : public Applet {
public:
using SelectProfileCallback = std::function<void(std::optional<Common::UUID>)>;
virtual ~ProfileSelectApplet();
- virtual void SelectProfile(SelectProfileCallback callback) const = 0;
+ virtual void SelectProfile(SelectProfileCallback callback,
+ const ProfileSelectParameters& parameters) const = 0;
};
class DefaultProfileSelectApplet final : public ProfileSelectApplet {
public:
- void SelectProfile(SelectProfileCallback callback) const override;
+ void Close() const override;
+ void SelectProfile(SelectProfileCallback callback,
+ const ProfileSelectParameters& parameters) const override;
};
} // namespace Core::Frontend
diff --git a/src/core/frontend/applets/software_keyboard.cpp b/src/core/frontend/applets/software_keyboard.cpp
index a3720f4d7..7655d215b 100644
--- a/src/core/frontend/applets/software_keyboard.cpp
+++ b/src/core/frontend/applets/software_keyboard.cpp
@@ -13,6 +13,8 @@ SoftwareKeyboardApplet::~SoftwareKeyboardApplet() = default;
DefaultSoftwareKeyboardApplet::~DefaultSoftwareKeyboardApplet() = default;
+void DefaultSoftwareKeyboardApplet::Close() const {}
+
void DefaultSoftwareKeyboardApplet::InitializeKeyboard(
bool is_inline, KeyboardInitializeParameters initialize_parameters,
SubmitNormalCallback submit_normal_callback_, SubmitInlineCallback submit_inline_callback_) {
diff --git a/src/core/frontend/applets/software_keyboard.h b/src/core/frontend/applets/software_keyboard.h
index 8aef103d3..8ed96da24 100644
--- a/src/core/frontend/applets/software_keyboard.h
+++ b/src/core/frontend/applets/software_keyboard.h
@@ -7,6 +7,7 @@
#include "common/common_types.h"
+#include "core/frontend/applets/applet.h"
#include "core/hle/service/am/applets/applet_software_keyboard_types.h"
namespace Core::Frontend {
@@ -52,7 +53,7 @@ struct InlineTextParameters {
s32 cursor_position;
};
-class SoftwareKeyboardApplet {
+class SoftwareKeyboardApplet : public Applet {
public:
using SubmitInlineCallback =
std::function<void(Service::AM::Applets::SwkbdReplyType, std::u16string, s32)>;
@@ -84,6 +85,8 @@ class DefaultSoftwareKeyboardApplet final : public SoftwareKeyboardApplet {
public:
~DefaultSoftwareKeyboardApplet() override;
+ void Close() const override;
+
void InitializeKeyboard(bool is_inline, KeyboardInitializeParameters initialize_parameters,
SubmitNormalCallback submit_normal_callback_,
SubmitInlineCallback submit_inline_callback_) override;
diff --git a/src/core/frontend/applets/web_browser.cpp b/src/core/frontend/applets/web_browser.cpp
index b09cb7102..6e703ef06 100644
--- a/src/core/frontend/applets/web_browser.cpp
+++ b/src/core/frontend/applets/web_browser.cpp
@@ -10,6 +10,8 @@ WebBrowserApplet::~WebBrowserApplet() = default;
DefaultWebBrowserApplet::~DefaultWebBrowserApplet() = default;
+void DefaultWebBrowserApplet::Close() const {}
+
void DefaultWebBrowserApplet::OpenLocalWebPage(const std::string& local_url,
ExtractROMFSCallback extract_romfs_callback,
OpenWebPageCallback callback) const {
diff --git a/src/core/frontend/applets/web_browser.h b/src/core/frontend/applets/web_browser.h
index 4f72284ad..178bbdd3f 100644
--- a/src/core/frontend/applets/web_browser.h
+++ b/src/core/frontend/applets/web_browser.h
@@ -5,11 +5,12 @@
#include <functional>
+#include "core/frontend/applets/applet.h"
#include "core/hle/service/am/applets/applet_web_browser_types.h"
namespace Core::Frontend {
-class WebBrowserApplet {
+class WebBrowserApplet : public Applet {
public:
using ExtractROMFSCallback = std::function<void()>;
using OpenWebPageCallback =
@@ -29,6 +30,8 @@ class DefaultWebBrowserApplet final : public WebBrowserApplet {
public:
~DefaultWebBrowserApplet() override;
+ void Close() const override;
+
void OpenLocalWebPage(const std::string& local_url, ExtractROMFSCallback extract_romfs_callback,
OpenWebPageCallback callback) const override;
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
index 098ba6eac..24eb3f886 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
@@ -76,22 +76,24 @@ void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
- const PAddr physical_memory_base_address =
+ const KPhysicalAddress physical_memory_base_address =
KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
// Insert blocks into the tree.
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
- physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram));
+ GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
- physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
+ GetInteger(physical_memory_base_address), ReservedEarlyDramSize,
+ KMemoryRegionType_DramReservedEarly));
// Insert the KTrace block at the end of Dram, if KTrace is enabled.
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
if constexpr (IsKTraceEnabled) {
- const PAddr ktrace_buffer_phys_addr =
+ const KPhysicalAddress ktrace_buffer_phys_addr =
physical_memory_base_address + intended_memory_size - KTraceBufferSize;
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
- ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
+ GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize,
+ KMemoryRegionType_KernelTraceBuffer));
}
}
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
index d02ee61c3..f8fee4f5b 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
@@ -3,10 +3,10 @@
#pragma once
-#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
-constexpr inline PAddr MainMemoryAddress = 0x80000000;
+constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
} // namespace Kernel
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 5b8a248c8..36d0d20d2 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -35,11 +35,11 @@ namespace {
using namespace Common::Literals;
u32 GetMemorySizeForInit() {
- return Settings::values.use_extended_memory_layout ? Smc::MemorySize_6GB : Smc::MemorySize_4GB;
+ return Settings::values.use_extended_memory_layout ? Smc::MemorySize_8GB : Smc::MemorySize_4GB;
}
Smc::MemoryArrangement GetMemoryArrangeForInit() {
- return Settings::values.use_extended_memory_layout ? Smc::MemoryArrangement_6GB
+ return Settings::values.use_extended_memory_layout ? Smc::MemoryArrangement_8GB
: Smc::MemoryArrangement_4GB;
}
} // namespace
@@ -61,7 +61,7 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
}
}
-PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
+KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {
@@ -91,7 +91,8 @@ std::size_t KSystemControl::Init::GetApplicationPoolSize() {
case Smc::MemoryArrangement_6GBForAppletDev:
return 3285_MiB;
case Smc::MemoryArrangement_8GB:
- return 4916_MiB;
+ // Real kernel sets this to 4916_MiB. We are not debugging applets.
+ return 6547_MiB;
}
}();
@@ -115,7 +116,8 @@ size_t KSystemControl::Init::GetAppletPoolSize() {
case Smc::MemoryArrangement_6GBForAppletDev:
return 2193_MiB;
case Smc::MemoryArrangement_8GB:
- return 2193_MiB;
+ //! Real kernel sets this to 2193_MiB. We are not debugging applets.
+ return 562_MiB;
}
}();
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index 4b717d091..b477e8193 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -3,7 +3,7 @@
#pragma once
-#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel::Board::Nintendo::Nx {
@@ -18,7 +18,7 @@ public:
// Initialization.
static std::size_t GetRealMemorySize();
static std::size_t GetIntendedMemorySize();
- static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
+ static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static bool ShouldIncreaseThreadResourceLimit();
static std::size_t GetApplicationPoolSize();
static std::size_t GetAppletPoolSize();
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index 5220dbcb6..af1af2b78 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -5,7 +5,7 @@
#include <cstddef>
-#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/physical_memory.h"
namespace Kernel {
@@ -36,7 +36,7 @@ struct CodeSet final {
std::size_t offset = 0;
/// The address to map this segment to.
- VAddr addr = 0;
+ KProcessAddress addr = 0;
/// The size of this segment in bytes.
u32 size = 0;
@@ -82,7 +82,7 @@ struct CodeSet final {
std::array<Segment, 3> segments;
/// The entry point address for this code set.
- VAddr entrypoint = 0;
+ KProcessAddress entrypoint = 0;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 5e4090e2b..1f2db673c 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -4,7 +4,6 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/core.h"
#include "core/device_memory.h"
#include "core/hardware_properties.h"
@@ -30,6 +29,7 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel::Init {
@@ -104,17 +104,18 @@ static_assert(KernelPageBufferAdditionalSize ==
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
-static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
- slab_addr -= memory_layout.GetSlabRegionAddress();
- return slab_addr + Core::DramMemoryMap::SlabHeapBase;
+static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
+ KVirtualAddress slab_addr) {
+ slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
+ return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
}
template <typename T>
-VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
- size_t num_objects) {
+KVirtualAddress InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout,
+ KVirtualAddress address, size_t num_objects) {
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
- VAddr start = Common::AlignUp(address, alignof(T));
+ KVirtualAddress start = Common::AlignUp(GetInteger(address), alignof(T));
// This should use the virtual memory address passed in, but currently, we do not setup the
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
@@ -195,7 +196,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
// Get the start of the slab region, since that's where we'll be working.
- VAddr address = memory_layout.GetSlabRegionAddress();
+ KVirtualAddress address = memory_layout.GetSlabRegionAddress();
// Initialize slab type array to be in sorted order.
std::array<KSlabType, KSlabType_Count> slab_types;
@@ -228,7 +229,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
// Track the gaps, so that we can free them to the unused slab tree.
- VAddr gap_start = address;
+ KVirtualAddress gap_start = address;
size_t gap_size = 0;
for (size_t i = 0; i < slab_gaps.size(); i++) {
@@ -280,7 +281,7 @@ void KPageBufferSlabHeap::Initialize(Core::System& system) {
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
- const PAddr slab_address =
+ const KPhysicalAddress slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h
index af0fb23b6..82195f4f7 100644
--- a/src/core/hle/kernel/initial_process.h
+++ b/src/core/hle/kernel/initial_process.h
@@ -14,7 +14,7 @@ using namespace Common::Literals;
constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
-static inline PAddr GetInitialProcessBinaryPhysicalAddress() {
+static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
MainMemoryAddress);
}
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 30a4e6edb..78d43d729 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -8,6 +8,7 @@
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
@@ -20,12 +21,12 @@ KAddressArbiter::~KAddressArbiter() = default;
namespace {
-bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
- *out = system.Memory().Read32(address);
+bool ReadFromUser(KernelCore& kernel, s32* out, KProcessAddress address) {
+ *out = GetCurrentMemory(kernel).Read32(GetInteger(address));
return true;
}
-bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
+bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
@@ -34,22 +35,30 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
// TODO(bunnei): We should call CanAccessAtomic(..) here.
- // Load the value from the address.
- const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+ s32 current_value{};
+
+ while (true) {
+ // Load the value from the address.
+ current_value =
+ static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
- // Compare it to the desired one.
- if (current_value < value) {
- // If less than, we want to try to decrement.
- const s32 decrement_value = current_value - 1;
+ // Compare it to the desired one.
+ if (current_value < value) {
+ // If less than, we want to try to decrement.
+ const s32 decrement_value = current_value - 1;
+
+ // Decrement and try to store.
+ if (monitor.ExclusiveWrite32(current_core, GetInteger(address),
+ static_cast<u32>(decrement_value))) {
+ break;
+ }
- // Decrement and try to store.
- if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
// If we failed to store, try again.
- DecrementIfLessThan(system, out, address, value);
+ } else {
+ // Otherwise, clear our exclusive hold and finish
+ monitor.ClearExclusive(current_core);
+ break;
}
- } else {
- // Otherwise, clear our exclusive hold and finish
- monitor.ClearExclusive(current_core);
}
// We're done.
@@ -57,7 +66,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
return true;
}
-bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
+bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value,
+ s32 new_value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
@@ -66,21 +76,29 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
// TODO(bunnei): We should call CanAccessAtomic(..) here.
- // Load the value from the address.
- const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
+ s32 current_value{};
- // Compare it to the desired one.
- if (current_value == value) {
- // If equal, we want to try to write the new value.
+ // Load the value from the address.
+ while (true) {
+ current_value =
+ static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
+
+ // Compare it to the desired one.
+ if (current_value == value) {
+ // If equal, we want to try to write the new value.
+
+ // Try to store.
+ if (monitor.ExclusiveWrite32(current_core, GetInteger(address),
+ static_cast<u32>(new_value))) {
+ break;
+ }
- // Try to store.
- if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
// If we failed to store, try again.
- UpdateIfEqual(system, out, address, value, new_value);
+ } else {
+ // Otherwise, clear our exclusive hold and finish.
+ monitor.ClearExclusive(current_core);
+ break;
}
- } else {
- // Otherwise, clear our exclusive hold and finish.
- monitor.ClearExclusive(current_core);
}
// We're done.
@@ -110,7 +128,7 @@ private:
} // namespace
-Result KAddressArbiter::Signal(VAddr addr, s32 count) {
+Result KAddressArbiter::Signal(uint64_t addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -133,7 +151,7 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
R_SUCCEED();
}
-Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -162,7 +180,7 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
R_SUCCEED();
}
-Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -203,7 +221,7 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
if (value != new_value) {
succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value);
} else {
- succeeded = ReadFromUser(m_system, std::addressof(user_value), addr);
+ succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
}
R_UNLESS(succeeded, ResultInvalidCurrentMemory);
@@ -225,7 +243,7 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
R_SUCCEED();
}
-Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
KHardwareTimer* timer{};
@@ -246,7 +264,7 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
if (decrement) {
succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value);
} else {
- succeeded = ReadFromUser(m_system, std::addressof(user_value), addr);
+ succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
}
if (!succeeded) {
@@ -280,7 +298,7 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
return cur_thread->GetWaitResult();
}
-Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+Result KAddressArbiter::WaitIfEqual(uint64_t addr, s32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
KHardwareTimer* timer{};
@@ -297,7 +315,7 @@ Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Read the value from userspace.
s32 user_value{};
- if (!ReadFromUser(m_system, std::addressof(user_value), addr)) {
+ if (!ReadFromUser(m_kernel, std::addressof(user_value), addr)) {
slp.CancelSleep();
R_THROW(ResultInvalidCurrentMemory);
}
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index 9a8c1ae94..3b70e1ab2 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -25,7 +25,7 @@ public:
explicit KAddressArbiter(Core::System& system);
~KAddressArbiter();
- Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
+ Result SignalToAddress(uint64_t addr, Svc::SignalType type, s32 value, s32 count) {
switch (type) {
case Svc::SignalType::Signal:
R_RETURN(this->Signal(addr, count));
@@ -38,7 +38,7 @@ public:
}
}
- Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
+ Result WaitForAddress(uint64_t addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
R_RETURN(WaitIfLessThan(addr, value, false, timeout));
@@ -52,11 +52,11 @@ public:
}
private:
- Result Signal(VAddr addr, s32 count);
- Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
- Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
- Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
- Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+ Result Signal(uint64_t addr, s32 count);
+ Result SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count);
+ Result SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count);
+ Result WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout);
+ Result WaitIfEqual(uint64_t addr, s32 value, s64 timeout);
private:
ThreadTree m_tree;
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index d998b2be2..72b66270d 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -29,7 +29,8 @@ Result KClientSession::SendSyncRequest() {
SCOPE_EXIT({ request->Close(); });
// Initialize the request.
- request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize);
+ request->Initialize(nullptr, GetInteger(GetCurrentThread(m_kernel).GetTlsAddress()),
+ MessageBufferSize);
// Send the request.
R_RETURN(m_parent->GetServerSession().OnRequest(request));
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 89df6b5d8..3583bee44 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -19,7 +19,8 @@ namespace Kernel {
KCodeMemory::KCodeMemory(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
-Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
+Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, KProcessAddress addr,
+ size_t size) {
// Set members.
m_owner = GetCurrentProcessPointer(m_kernel);
@@ -63,7 +64,7 @@ void KCodeMemory::Finalize() {
m_owner->Close();
}
-Result KCodeMemory::Map(VAddr address, size_t size) {
+Result KCodeMemory::Map(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -83,7 +84,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_SUCCEED();
}
-Result KCodeMemory::Unmap(VAddr address, size_t size) {
+Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -100,7 +101,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
R_SUCCEED();
}
-Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
+Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -134,7 +135,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
R_SUCCEED();
}
-Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
+Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index 23cbb283b..26fe6b3dc 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -5,12 +5,12 @@
#include <optional>
-#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
@@ -31,13 +31,13 @@ class KCodeMemory final
public:
explicit KCodeMemory(KernelCore& kernel);
- Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
+ Result Initialize(Core::DeviceMemory& device_memory, KProcessAddress address, size_t size);
void Finalize() override;
- Result Map(VAddr address, size_t size);
- Result Unmap(VAddr address, size_t size);
- Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
- Result UnmapFromOwner(VAddr address, size_t size);
+ Result Map(KProcessAddress address, size_t size);
+ Result Unmap(KProcessAddress address, size_t size);
+ Result MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm);
+ Result UnmapFromOwner(KProcessAddress address, size_t size);
bool IsInitialized() const override {
return m_is_initialized;
@@ -47,7 +47,7 @@ public:
KProcess* GetOwner() const override {
return m_owner;
}
- VAddr GetSourceAddress() const {
+ KProcessAddress GetSourceAddress() const {
return m_address;
}
size_t GetSize() const {
@@ -57,7 +57,7 @@ public:
private:
std::optional<KPageGroup> m_page_group{};
KProcess* m_owner{};
- VAddr m_address{};
+ KProcessAddress m_address{};
KLightLock m_lock;
bool m_is_initialized{};
bool m_is_owner_mapped{};
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 58b8609d8..efbac0e6a 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -18,36 +18,41 @@ namespace Kernel {
namespace {
-bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
- *out = system.Memory().Read32(address);
+bool ReadFromUser(KernelCore& kernel, u32* out, KProcessAddress address) {
+ *out = GetCurrentMemory(kernel).Read32(GetInteger(address));
return true;
}
-bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
- system.Memory().Write32(address, *p);
+bool WriteToUser(KernelCore& kernel, KProcessAddress address, const u32* p) {
+ GetCurrentMemory(kernel).Write32(GetInteger(address), *p);
return true;
}
-bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
+bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero,
u32 new_orr_mask) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
- // Load the value from the address.
- const auto expected = monitor.ExclusiveRead32(current_core, address);
+ u32 expected{};
- // Orr in the new mask.
- u32 value = expected | new_orr_mask;
+ while (true) {
+ // Load the value from the address.
+ expected = monitor.ExclusiveRead32(current_core, GetInteger(address));
- // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
- if (!expected) {
- value = if_zero;
- }
+ // Orr in the new mask.
+ u32 value = expected | new_orr_mask;
+
+ // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
+ if (!expected) {
+ value = if_zero;
+ }
+
+ // Try to store.
+ if (monitor.ExclusiveWrite32(current_core, GetInteger(address), value)) {
+ break;
+ }
- // Try to store.
- if (!monitor.ExclusiveWrite32(current_core, address, value)) {
// If we failed to store, try again.
- return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
}
// We're done.
@@ -102,7 +107,7 @@ KConditionVariable::KConditionVariable(Core::System& system)
KConditionVariable::~KConditionVariable() = default;
-Result KConditionVariable::SignalToAddress(VAddr addr) {
+Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
// Signal the address.
@@ -128,7 +133,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
// Write the value to userspace.
Result result{ResultSuccess};
- if (WriteToUser(m_system, addr, std::addressof(next_value))) [[likely]] {
+ if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
result = ResultInvalidCurrentMemory;
@@ -143,7 +148,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
}
}
-Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
+Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
@@ -157,7 +162,7 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
// Read the tag from userspace.
u32 test_tag{};
- R_UNLESS(ReadFromUser(m_system, std::addressof(test_tag), addr),
+ R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done.
@@ -191,7 +196,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Update the tag.
- VAddr address = thread->GetAddressKey();
+ KProcessAddress address = thread->GetAddressKey();
u32 own_tag = thread->GetAddressKeyValue();
u32 prev_tag{};
@@ -257,12 +262,12 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
// If we have no waiters, clear the has waiter flag.
if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
const u32 has_waiter_flag{};
- WriteToUser(m_system, cv_key, std::addressof(has_waiter_flag));
+ WriteToUser(m_kernel, cv_key, std::addressof(has_waiter_flag));
}
}
}
-Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+Result KConditionVariable::Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
KHardwareTimer* timer{};
@@ -301,12 +306,12 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Write to the cv key.
{
const u32 has_waiter_flag = 1;
- WriteToUser(m_system, key, std::addressof(has_waiter_flag));
+ WriteToUser(m_kernel, key, std::addressof(has_waiter_flag));
std::atomic_thread_fence(std::memory_order_seq_cst);
}
// Write the value to userspace.
- if (!WriteToUser(m_system, addr, std::addressof(next_value))) {
+ if (!WriteToUser(m_kernel, addr, std::addressof(next_value))) {
slp.CancelSleep();
R_THROW(ResultInvalidCurrentMemory);
}
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index fbd2c1fc0..8c2f3ae51 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -4,10 +4,10 @@
#pragma once
#include "common/assert.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
@@ -25,12 +25,12 @@ public:
~KConditionVariable();
// Arbitration
- Result SignalToAddress(VAddr addr);
- Result WaitForAddress(Handle handle, VAddr addr, u32 value);
+ Result SignalToAddress(KProcessAddress addr);
+ Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
- Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+ Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
private:
void SignalImpl(KThread* thread);
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index a2fc4fe1f..f48896715 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,8 +54,8 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
R_SUCCEED();
}
-Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, size_t size,
- u64 device_address, u32 option, bool is_aligned) {
+Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
+ size_t size, u64 device_address, u32 option, bool is_aligned) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
device_address + size - 1 <= m_space_address + m_space_size - 1),
@@ -113,8 +113,8 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, s
R_SUCCEED();
}
-Result KDeviceAddressSpace::Unmap(KPageTable* page_table, VAddr process_address, size_t size,
- u64 device_address) {
+Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
+ size_t size, u64 device_address) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
device_address + size - 1 <= m_space_address + m_space_size - 1),
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index b4a014c38..18556e3cc 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,8 +5,8 @@
#include <string>
-#include "common/common_types.h"
#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -31,23 +31,24 @@ public:
Result Attach(Svc::DeviceName device_name);
Result Detach(Svc::DeviceName device_name);
- Result MapByForce(KPageTable* page_table, VAddr process_address, size_t size,
+ Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
}
- Result MapAligned(KPageTable* page_table, VAddr process_address, size_t size,
+ Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
}
- Result Unmap(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address);
+ Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ u64 device_address);
static void Initialize();
private:
- Result Map(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address,
- u32 option, bool is_aligned);
+ Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ u64 device_address, u32 option, bool is_aligned);
private:
KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
index ac80d60a1..ad11e84b7 100644
--- a/src/core/hle/kernel/k_dynamic_page_manager.h
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -6,9 +6,9 @@
#include <vector>
#include "common/alignment.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
#include "core/hle/kernel/k_spin_lock.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_results.h"
@@ -26,23 +26,23 @@ public:
KDynamicPageManager() = default;
template <typename T>
- T* GetPointer(VAddr addr) {
+ T* GetPointer(KVirtualAddress addr) {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
template <typename T>
- const T* GetPointer(VAddr addr) const {
+ const T* GetPointer(KVirtualAddress addr) const {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
- Result Initialize(VAddr memory, size_t size, size_t align) {
+ Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
// We need to have positive size.
R_UNLESS(size > 0, ResultOutOfMemory);
m_backing_memory.resize(size);
// Set addresses.
m_address = memory;
- m_aligned_address = Common::AlignDown(memory, align);
+ m_aligned_address = Common::AlignDown(GetInteger(memory), align);
// Calculate extents.
const size_t managed_size = m_address + size - m_aligned_address;
@@ -79,7 +79,7 @@ public:
R_SUCCEED();
}
- VAddr GetAddress() const {
+ KVirtualAddress GetAddress() const {
return m_address;
}
size_t GetSize() const {
@@ -145,7 +145,8 @@ public:
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
- size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
+ size_t offset =
+ (reinterpret_cast<uint64_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
@@ -158,8 +159,8 @@ private:
size_t m_used{};
size_t m_peak{};
size_t m_count{};
- VAddr m_address{};
- VAddr m_aligned_address{};
+ KVirtualAddress m_address{};
+ KVirtualAddress m_aligned_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h
index 3a0ddd050..76ed4cac1 100644
--- a/src/core/hle/kernel/k_dynamic_slab_heap.h
+++ b/src/core/hle/kernel/k_dynamic_slab_heap.h
@@ -19,7 +19,7 @@ class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
public:
constexpr KDynamicSlabHeap() = default;
- constexpr VAddr GetAddress() const {
+ constexpr KVirtualAddress GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
@@ -35,7 +35,7 @@ public:
return m_count.load();
}
- constexpr bool IsInRange(VAddr addr) const {
+ constexpr bool IsInRange(KVirtualAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
}
@@ -115,7 +115,7 @@ private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
- VAddr m_address{};
+ KVirtualAddress m_address{};
size_t m_size{};
};
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index e01929da6..41a29da24 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -5,8 +5,8 @@
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h"
@@ -282,7 +282,7 @@ class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>
private:
u16 m_device_disable_merge_left_count{};
u16 m_device_disable_merge_right_count{};
- VAddr m_address{};
+ KProcessAddress m_address{};
size_t m_num_pages{};
KMemoryState m_memory_state{KMemoryState::None};
u16 m_ipc_lock_count{};
@@ -306,7 +306,7 @@ public:
}
public:
- constexpr VAddr GetAddress() const {
+ constexpr KProcessAddress GetAddress() const {
return m_address;
}
@@ -318,11 +318,11 @@ public:
return this->GetNumPages() * PageSize;
}
- constexpr VAddr GetEndAddress() const {
+ constexpr KProcessAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
- constexpr VAddr GetLastAddress() const {
+ constexpr KProcessAddress GetLastAddress() const {
return this->GetEndAddress() - 1;
}
@@ -348,7 +348,7 @@ public:
constexpr KMemoryInfo GetMemoryInfo() const {
return {
- .m_address = this->GetAddress(),
+ .m_address = GetInteger(this->GetAddress()),
.m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
@@ -366,12 +366,12 @@ public:
public:
explicit KMemoryBlock() = default;
- constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+ constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr)
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), m_address(addr), m_num_pages(np),
m_memory_state(ms), m_permission(p), m_attribute(attr) {}
- constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
+ constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr) {
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
@@ -408,7 +408,7 @@ public:
KMemoryBlockDisableMergeAttribute::None;
}
- constexpr bool Contains(VAddr addr) const {
+ constexpr bool Contains(KProcessAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
}
@@ -443,10 +443,10 @@ public:
}
}
- constexpr void Split(KMemoryBlock* block, VAddr addr) {
+ constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index cf4c1e371..ab75f550e 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -7,7 +7,8 @@ namespace Kernel {
KMemoryBlockManager::KMemoryBlockManager() = default;
-Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
+Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
+ KMemoryBlockSlabManager* slab_manager) {
// Allocate a block to encapsulate the address space, insert it into the tree.
KMemoryBlock* start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, ResultOutOfResource);
@@ -15,8 +16,8 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag
// Set our start and end.
m_start_address = st;
m_end_address = nd;
- ASSERT(Common::IsAligned(m_start_address, PageSize));
- ASSERT(Common::IsAligned(m_end_address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(m_start_address), PageSize));
+ ASSERT(Common::IsAligned(GetInteger(m_end_address), PageSize));
// Initialize and insert the block.
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
@@ -40,12 +41,13 @@ void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
ASSERT(m_memory_block_tree.empty());
}
-VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages) const {
+KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start,
+ size_t region_num_pages, size_t num_pages,
+ size_t alignment, size_t offset,
+ size_t guard_pages) const {
if (num_pages > 0) {
- const VAddr region_end = region_start + region_num_pages * PageSize;
- const VAddr region_last = region_end - 1;
+ const KProcessAddress region_end = region_start + region_num_pages * PageSize;
+ const KProcessAddress region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
it++) {
const KMemoryInfo info = it->GetMemoryInfo();
@@ -56,17 +58,19 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
continue;
}
- VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
+ KProcessAddress area =
+ (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
area += guard_pages * PageSize;
- const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
+ const KProcessAddress offset_area =
+ Common::AlignDown(GetInteger(area), alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + alignment;
- const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
- const VAddr area_last = area_end - 1;
+ const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
+ const KProcessAddress area_last = area_end - 1;
- if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
- area_last <= info.GetLastAddress()) {
+ if (info.GetAddress() <= GetInteger(area) && area < area_last &&
+ area_last <= region_last && area_last <= info.GetLastAddress()) {
return area;
}
}
@@ -76,7 +80,7 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
}
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
- VAddr address, size_t num_pages) {
+ KProcessAddress address, size_t num_pages) {
// Find the iterator now that we've updated.
iterator it = this->FindIterator(address);
if (address != m_start_address) {
@@ -104,18 +108,18 @@ void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator*
}
}
-void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
- size_t num_pages, KMemoryState state, KMemoryPermission perm,
- KMemoryAttribute attr,
+void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
+ KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
- VAddr cur_address = address;
+ KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
@@ -168,17 +172,17 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
- VAddr address, size_t num_pages, KMemoryState test_state,
- KMemoryPermission test_perm, KMemoryAttribute test_attr,
- KMemoryState state, KMemoryPermission perm,
- KMemoryAttribute attr) {
+ KProcessAddress address, size_t num_pages,
+ KMemoryState test_state, KMemoryPermission test_perm,
+ KMemoryAttribute test_attr, KMemoryState state,
+ KMemoryPermission perm, KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
- VAddr cur_address = address;
+ KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
@@ -230,18 +234,18 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
this->CoalesceForUpdate(allocator, address, num_pages);
}
-void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
- size_t num_pages, MemoryBlockLockFunction lock_func,
- KMemoryPermission perm) {
+void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator,
+ KProcessAddress address, size_t num_pages,
+ MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
- VAddr cur_address = address;
+ KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
- const VAddr end_address = address + (num_pages * PageSize);
+ const KProcessAddress end_address = address + (num_pages * PageSize);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index d382722a6..7c0bd16f0 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -7,9 +7,9 @@
#include <functional>
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -85,9 +85,10 @@ public:
public:
KMemoryBlockManager();
- using HostUnmapCallback = std::function<void(VAddr, u64)>;
+ using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
- Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
+ Result Initialize(KProcessAddress st, KProcessAddress nd,
+ KMemoryBlockSlabManager* slab_manager);
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
iterator end() {
@@ -100,27 +101,28 @@ public:
return m_memory_block_tree.cend();
}
- VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
- size_t alignment, size_t offset, size_t guard_pages) const;
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const;
- void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
- KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
+ void Update(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
+ size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
- void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
- MemoryBlockLockFunction lock_func, KMemoryPermission perm);
+ void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
+ size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
- void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
- iterator FindIterator(VAddr address) const {
+ iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
}
- const KMemoryBlock* FindBlock(VAddr address) const {
+ const KMemoryBlock* FindBlock(KProcessAddress address) const {
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
return std::addressof(*it);
}
@@ -132,12 +134,12 @@ public:
bool CheckState() const;
private:
- void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
+ void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages);
MemoryBlockTree m_memory_block_tree;
- VAddr m_start_address{};
- VAddr m_end_address{};
+ KProcessAddress m_start_address{};
+ KProcessAddress m_end_address{};
};
class KScopedMemoryBlockManagerAuditor {
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index 9ff751119..af40092c0 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -85,7 +85,8 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
return true;
}
-VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
+KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment,
+ u32 type_id) {
// We want to find the total extents of the type id.
const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
@@ -130,11 +131,13 @@ KMemoryLayout::KMemoryLayout()
m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
m_memory_region_allocator} {}
-void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
- VAddr linear_virtual_start) {
+void KMemoryLayout::InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
+ KVirtualAddress linear_virtual_start) {
// Set static differences.
- m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
- m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
+ m_linear_phys_to_virt_diff =
+ GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
+ m_linear_virt_to_phys_diff =
+ GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
// Initialize linear trees.
for (auto& region : GetPhysicalMemoryRegionTree()) {
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 551b7a0e4..54a71df56 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -10,6 +10,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_region.h"
#include "core/hle/kernel/k_memory_region_type.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
@@ -69,10 +70,11 @@ constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelIniti
//! NB: Use KThread::GetAddressKeyIsKernel().
//! See explanation for deviation of GetAddressKey.
-bool IsKernelAddressKey(VAddr key) = delete;
+bool IsKernelAddressKey(KProcessAddress key) = delete;
-constexpr bool IsKernelAddress(VAddr address) {
- return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
+constexpr bool IsKernelAddress(KProcessAddress address) {
+ return KernelVirtualAddressSpaceBase <= GetInteger(address) &&
+ address < KernelVirtualAddressSpaceEnd;
}
class KMemoryLayout final {
@@ -104,38 +106,38 @@ public:
return m_physical_linear_tree;
}
- VAddr GetLinearVirtualAddress(PAddr address) const {
- return address + m_linear_phys_to_virt_diff;
+ KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) const {
+ return GetInteger(address) + m_linear_phys_to_virt_diff;
}
- PAddr GetLinearPhysicalAddress(VAddr address) const {
- return address + m_linear_virt_to_phys_diff;
+ KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) const {
+ return GetInteger(address) + m_linear_virt_to_phys_diff;
}
- const KMemoryRegion* FindVirtual(VAddr address) const {
+ const KMemoryRegion* FindVirtual(KVirtualAddress address) const {
return Find(address, GetVirtualMemoryRegionTree());
}
- const KMemoryRegion* FindPhysical(PAddr address) const {
+ const KMemoryRegion* FindPhysical(KPhysicalAddress address) const {
return Find(address, GetPhysicalMemoryRegionTree());
}
- const KMemoryRegion* FindVirtualLinear(VAddr address) const {
+ const KMemoryRegion* FindVirtualLinear(KVirtualAddress address) const {
return Find(address, GetVirtualLinearMemoryRegionTree());
}
- const KMemoryRegion* FindPhysicalLinear(PAddr address) const {
+ const KMemoryRegion* FindPhysicalLinear(KPhysicalAddress address) const {
return Find(address, GetPhysicalLinearMemoryRegionTree());
}
- VAddr GetMainStackTopAddress(s32 core_id) const {
+ KVirtualAddress GetMainStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack);
}
- VAddr GetIdleStackTopAddress(s32 core_id) const {
+ KVirtualAddress GetIdleStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack);
}
- VAddr GetExceptionStackTopAddress(s32 core_id) const {
+ KVirtualAddress GetExceptionStackTopAddress(s32 core_id) const {
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
}
- VAddr GetSlabRegionAddress() const {
+ KVirtualAddress GetSlabRegionAddress() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
.GetAddress();
}
@@ -143,10 +145,10 @@ public:
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
}
- PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const {
+ KPhysicalAddress GetDevicePhysicalAddress(KMemoryRegionType type) const {
return GetDeviceRegion(type).GetAddress();
}
- VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const {
+ KVirtualAddress GetDeviceVirtualAddress(KMemoryRegionType type) const {
return GetDeviceRegion(type).GetPairAddress();
}
@@ -175,11 +177,11 @@ public:
KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
}
- const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
+ const KMemoryRegion& GetVirtualLinearRegion(KVirtualAddress address) const {
return Dereference(FindVirtualLinear(address));
}
- const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const {
+ const KMemoryRegion& GetPhysicalLinearRegion(KPhysicalAddress address) const {
return Dereference(FindPhysicalLinear(address));
}
@@ -193,29 +195,32 @@ public:
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB);
}
- bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
+ bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address) const {
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
KMemoryRegionType_DramUserPool);
}
- bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const {
+ bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address) const {
return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(),
KMemoryRegionType_VirtualDramUserPool);
}
- bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const {
+ bool IsHeapPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
+ size_t size) const {
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
KMemoryRegionType_DramUserPool);
}
- bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const {
+ bool IsHeapVirtualAddress(const KMemoryRegion*& region, KVirtualAddress address,
+ size_t size) const {
return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(),
KMemoryRegionType_VirtualDramUserPool);
}
- bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
+ bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region,
+ KPhysicalAddress address) const {
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
}
- bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address,
+ bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, KPhysicalAddress address,
size_t size) const {
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
@@ -234,8 +239,8 @@ public:
return std::make_pair(total_size, kernel_size);
}
- void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
- VAddr linear_virtual_start);
+ void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start,
+ KVirtualAddress linear_virtual_start);
static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
auto GetKernelRegionExtents() const {
@@ -261,8 +266,8 @@ public:
auto GetLinearRegionVirtualExtents() const {
const auto physical = GetLinearRegionPhysicalExtents();
- return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()),
- GetLinearVirtualAddress(physical.GetLastAddress()), 0,
+ return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())),
+ GetInteger(GetLinearVirtualAddress(physical.GetLastAddress())), 0,
KMemoryRegionType_None);
}
@@ -334,12 +339,12 @@ private:
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address,
const KMemoryRegionTree& tree, KMemoryRegionType type) {
// Check if the cached region already contains the address.
- if (region != nullptr && region->Contains(address)) {
+ if (region != nullptr && region->Contains(GetInteger(address))) {
return true;
}
// Find the containing region, and update the cache.
- if (const KMemoryRegion* found = tree.Find(address);
+ if (const KMemoryRegion* found = tree.Find(GetInteger(address));
found != nullptr && found->IsDerivedFrom(type)) {
region = found;
return true;
@@ -352,11 +357,12 @@ private:
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size,
const KMemoryRegionTree& tree, KMemoryRegionType type) {
// Get the end of the checked region.
- const u64 last_address = address + size - 1;
+ const u64 last_address = GetInteger(address) + size - 1;
// Walk the tree to verify the region is correct.
- const KMemoryRegion* cur =
- (region != nullptr && region->Contains(address)) ? region : tree.Find(address);
+ const KMemoryRegion* cur = (region != nullptr && region->Contains(GetInteger(address)))
+ ? region
+ : tree.Find(GetInteger(address));
while (cur != nullptr && cur->IsDerivedFrom(type)) {
if (last_address <= cur->GetLastAddress()) {
region = cur;
@@ -370,7 +376,7 @@ private:
template <typename AddressType>
static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) {
- return tree.Find(address);
+ return tree.Find(GetInteger(address));
}
static KMemoryRegion& Dereference(KMemoryRegion* region) {
@@ -383,7 +389,7 @@ private:
return *region;
}
- VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
+ KVirtualAddress GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
const auto& region = Dereference(
GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
ASSERT(region.GetEndAddress() != 0);
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cd6ea388e..74d8169e0 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -5,7 +5,6 @@
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/common_types.h"
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/device_memory.h"
@@ -44,10 +43,10 @@ KMemoryManager::KMemoryManager(Core::System& system)
KLightLock{system.Kernel()},
} {}
-void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
+void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
// Clear the management region to zero.
- const VAddr management_region_end = management_region + management_region_size;
+ const KVirtualAddress management_region_end = management_region + management_region_size;
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
// Reset our manager count.
@@ -56,7 +55,7 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
while (m_num_managers != MaxManagerCount) {
// Locate the region that should initialize the current manager.
- PAddr region_address = 0;
+ KPhysicalAddress region_address = 0;
size_t region_size = 0;
Pool region_pool = Pool::Count;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
@@ -70,8 +69,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
continue;
}
- const PAddr cur_start = it.GetAddress();
- const PAddr cur_end = it.GetEndAddress();
+ const KPhysicalAddress cur_start = it.GetAddress();
+ const KPhysicalAddress cur_end = it.GetEndAddress();
// Validate the region.
ASSERT(cur_end != 0);
@@ -119,17 +118,17 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Free each region to its corresponding heap.
size_t reserved_sizes[MaxManagerCount] = {};
- const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
- const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
- const PAddr ini_last = ini_end - 1;
+ const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
+ const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
+ const KPhysicalAddress ini_last = ini_end - 1;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
// Get the manager for the region.
auto& manager = m_managers[it.GetAttributes()];
- const PAddr cur_start = it.GetAddress();
- const PAddr cur_last = it.GetLastAddress();
- const PAddr cur_end = it.GetEndAddress();
+ const KPhysicalAddress cur_start = it.GetAddress();
+ const KPhysicalAddress cur_last = it.GetLastAddress();
+ const KPhysicalAddress cur_end = it.GetEndAddress();
if (cur_start <= ini_start && ini_last <= cur_last) {
// Free memory before the ini to the heap.
@@ -175,7 +174,8 @@ void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
}
-PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
+KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
+ u32 option) {
// Early return if we're allocating no pages.
if (num_pages == 0) {
return 0;
@@ -190,7 +190,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
// Loop, trying to iterate from each block.
Impl* chosen_manager = nullptr;
- PAddr allocated_block = 0;
+ KPhysicalAddress allocated_block = 0;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
@@ -239,7 +239,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) {
// Allocate a block.
- PAddr allocated_block = cur_manager->AllocateBlock(index, random);
+ KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random);
if (allocated_block == 0) {
break;
}
@@ -286,7 +286,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
// Open the first reference to the pages.
for (const auto& block : *out) {
- PAddr cur_address = block.GetAddress();
+ KPhysicalAddress cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -337,7 +337,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Iterate over the allocated blocks.
for (const auto& block : *out) {
// Get the block extents.
- const PAddr block_address = block.GetAddress();
+ const KPhysicalAddress block_address = block.GetAddress();
const size_t block_pages = block.GetNumPages();
// If it has no pages, we don't need to do anything.
@@ -348,7 +348,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Fill all the pages that we need to fill.
bool any_new = false;
{
- PAddr cur_address = block_address;
+ KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -369,7 +369,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// If there are new pages, update tracking for the allocation.
if (any_new) {
// Update tracking for the allocation.
- PAddr cur_address = block_address;
+ KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
@@ -400,8 +400,9 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
R_SUCCEED();
}
-size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
- VAddr management_end, Pool p) {
+size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
+ KVirtualAddress management, KVirtualAddress management_end,
+ Pool p) {
// Calculate management sizes.
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
@@ -417,7 +418,7 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
m_management_region = management;
m_page_reference_counts.resize(
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
- ASSERT(Common::IsAligned(m_management_region, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(m_management_region), PageSize));
// Initialize the manager's KPageHeap.
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
@@ -425,15 +426,15 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
return total_management_size;
}
-void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
+void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
}
-void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
+void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
}
-bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
+bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
u8 fill_pattern) {
UNREACHABLE();
}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 401d4e644..7e4b41319 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -7,10 +7,10 @@
#include <tuple>
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
namespace Core {
@@ -50,21 +50,21 @@ public:
explicit KMemoryManager(Core::System& system);
- void Initialize(VAddr management_region, size_t management_region_size);
+ void Initialize(KVirtualAddress management_region, size_t management_region_size);
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
- PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
+ KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
u8 fill_pattern);
- Pool GetPool(PAddr address) const {
+ Pool GetPool(KPhysicalAddress address) const {
return this->GetManager(address).GetPool();
}
- void Open(PAddr address, size_t num_pages) {
+ void Open(KPhysicalAddress address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -80,7 +80,7 @@ public:
}
}
- void OpenFirst(PAddr address, size_t num_pages) {
+ void OpenFirst(KPhysicalAddress address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -96,7 +96,7 @@ public:
}
}
- void Close(PAddr address, size_t num_pages) {
+ void Close(KPhysicalAddress address, size_t num_pages) {
// Repeatedly close references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
@@ -199,16 +199,16 @@ private:
public:
Impl() = default;
- size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
- Pool p);
+ size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management,
+ KVirtualAddress management_end, Pool p);
- PAddr AllocateBlock(s32 index, bool random) {
+ KPhysicalAddress AllocateBlock(s32 index, bool random) {
return m_heap.AllocateBlock(index, random);
}
- PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
+ KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
return m_heap.AllocateAligned(index, num_pages, align_pages);
}
- void Free(PAddr addr, size_t num_pages) {
+ void Free(KPhysicalAddress addr, size_t num_pages) {
m_heap.Free(addr, num_pages);
}
@@ -220,10 +220,10 @@ private:
UNIMPLEMENTED();
}
- void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
- void TrackOptimizedAllocation(PAddr block, size_t num_pages);
+ void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
+ void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
- bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
+ bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return m_pool;
@@ -231,7 +231,7 @@ private:
constexpr size_t GetSize() const {
return m_heap.GetSize();
}
- constexpr PAddr GetEndAddress() const {
+ constexpr KPhysicalAddress GetEndAddress() const {
return m_heap.GetEndAddress();
}
@@ -243,10 +243,10 @@ private:
UNIMPLEMENTED();
}
- constexpr size_t GetPageOffset(PAddr address) const {
+ constexpr size_t GetPageOffset(KPhysicalAddress address) const {
return m_heap.GetPageOffset(address);
}
- constexpr size_t GetPageOffsetToEnd(PAddr address) const {
+ constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const {
return m_heap.GetPageOffsetToEnd(address);
}
@@ -263,7 +263,7 @@ private:
return m_prev;
}
- void OpenFirst(PAddr address, size_t num_pages) {
+ void OpenFirst(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
@@ -274,7 +274,7 @@ private:
}
}
- void Open(PAddr address, size_t num_pages) {
+ void Open(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
@@ -285,7 +285,7 @@ private:
}
}
- void Close(PAddr address, size_t num_pages) {
+ void Close(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
@@ -323,18 +323,18 @@ private:
KPageHeap m_heap;
std::vector<RefCount> m_page_reference_counts;
- VAddr m_management_region{};
+ KVirtualAddress m_management_region{};
Pool m_pool{};
Impl* m_next{};
Impl* m_prev{};
};
private:
- Impl& GetManager(PAddr address) {
+ Impl& GetManager(KPhysicalAddress address) {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
- const Impl& GetManager(PAddr address) const {
+ const Impl& GetManager(KPhysicalAddress address) const {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index cfe86fb82..e3044f022 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -5,9 +5,9 @@
#include "common/assert.h"
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/k_memory_region_type.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -243,10 +243,10 @@ public:
void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
- VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
+ KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
- VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
- size_t guard_size) {
+ KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
+ size_t guard_size) {
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
}
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
index 0c16dded4..e9830e6d9 100644
--- a/src/core/hle/kernel/k_page_buffer.cpp
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -10,8 +10,8 @@
namespace Kernel {
-KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
- ASSERT(Common::IsAligned(phys_addr, PageSize));
+KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr) {
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
}
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index b7a3ccb4a..f6a7f1e39 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -26,7 +26,7 @@ public:
explicit KPageBuffer(KernelCore&) {}
KPageBuffer() = default;
- static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
+ static KPageBuffer* FromPhysicalAddress(Core::System& system, KPhysicalAddress phys_addr);
private:
alignas(PageSize) std::array<u8, PageSize> m_buffer{};
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index c07f17663..b32909f05 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -22,7 +22,7 @@ public:
constexpr explicit KBlockInfo() : m_next(nullptr) {}
constexpr void Initialize(KPhysicalAddress addr, size_t np) {
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(static_cast<u32>(np) == np);
m_page_index = static_cast<u32>(addr / PageSize);
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 7b02c7d8b..95762b5a2 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -6,14 +6,14 @@
namespace Kernel {
-void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
- size_t management_size, const size_t* block_shifts,
- size_t num_block_shifts) {
+void KPageHeap::Initialize(KPhysicalAddress address, size_t size,
+ KVirtualAddress management_address, size_t management_size,
+ const size_t* block_shifts, size_t num_block_shifts) {
// Check our assumptions.
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
- const VAddr management_end = management_address + management_size;
+ const KVirtualAddress management_end = management_address + management_size;
// Set our members.
m_heap_address = address;
@@ -31,7 +31,7 @@ void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
}
// Ensure we didn't overextend our bounds.
- ASSERT(VAddr(cur_bitmap_storage) <= management_end);
+ ASSERT(KVirtualAddress(cur_bitmap_storage) <= management_end);
}
size_t KPageHeap::GetNumFreePages() const {
@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
return num_free;
}
-PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
+KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
- if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
+ if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
@@ -59,7 +59,7 @@ PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
return 0;
}
-PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
+KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
// Get the size and required alignment.
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
@@ -110,7 +110,7 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
}
// Pop a block from the index we selected.
- if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
+ if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != 0) {
// Determine how much size we have left over.
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
leftover_size > 0) {
@@ -141,13 +141,13 @@ PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_page
return 0;
}
-void KPageHeap::FreeBlock(PAddr block, s32 index) {
+void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);
} while (block != 0);
}
-void KPageHeap::Free(PAddr addr, size_t num_pages) {
+void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
// Freeing no pages is a no-op.
if (num_pages == 0) {
return;
@@ -155,16 +155,16 @@ void KPageHeap::Free(PAddr addr, size_t num_pages) {
// Find the largest block size that we can free, and free as many as possible.
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
- const PAddr start = addr;
- const PAddr end = addr + num_pages * PageSize;
- PAddr before_start = start;
- PAddr before_end = start;
- PAddr after_start = end;
- PAddr after_end = end;
+ const KPhysicalAddress start = addr;
+ const KPhysicalAddress end = addr + num_pages * PageSize;
+ KPhysicalAddress before_start = start;
+ KPhysicalAddress before_end = start;
+ KPhysicalAddress after_start = end;
+ KPhysicalAddress after_end = end;
while (big_index >= 0) {
const size_t block_size = m_blocks[big_index].GetSize();
- const PAddr big_start = Common::AlignUp(start, block_size);
- const PAddr big_end = Common::AlignDown(end, block_size);
+ const KPhysicalAddress big_start = Common::AlignUp(GetInteger(start), block_size);
+ const KPhysicalAddress big_end = Common::AlignDown(GetInteger(end), block_size);
if (big_start < big_end) {
// Free as many big blocks as we can.
for (auto block = big_start; block < big_end; block += block_size) {
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index 9021edcf7..c55225bac 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -8,8 +8,8 @@
#include "common/alignment.h"
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
@@ -18,24 +18,24 @@ class KPageHeap {
public:
KPageHeap() = default;
- constexpr PAddr GetAddress() const {
+ constexpr KPhysicalAddress GetAddress() const {
return m_heap_address;
}
constexpr size_t GetSize() const {
return m_heap_size;
}
- constexpr PAddr GetEndAddress() const {
+ constexpr KPhysicalAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
- constexpr size_t GetPageOffset(PAddr block) const {
+ constexpr size_t GetPageOffset(KPhysicalAddress block) const {
return (block - this->GetAddress()) / PageSize;
}
- constexpr size_t GetPageOffsetToEnd(PAddr block) const {
+ constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const {
return (this->GetEndAddress() - block) / PageSize;
}
- void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
- size_t management_size) {
+ void Initialize(KPhysicalAddress heap_address, size_t heap_size,
+ KVirtualAddress management_address, size_t management_size) {
return this->Initialize(heap_address, heap_size, management_address, management_size,
MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
}
@@ -53,7 +53,7 @@ public:
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
- PAddr AllocateBlock(s32 index, bool random) {
+ KPhysicalAddress AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
@@ -62,12 +62,12 @@ public:
}
}
- PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
+ KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
// TODO: linear search support?
return this->AllocateByRandom(index, num_pages, align_pages);
}
- void Free(PAddr addr, size_t num_pages);
+ void Free(KPhysicalAddress addr, size_t num_pages);
static size_t CalculateManagementOverheadSize(size_t region_size) {
return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
@@ -125,24 +125,25 @@ private:
return this->GetNumFreeBlocks() * this->GetNumPages();
}
- u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
+ u64* Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs,
+ u64* bit_storage) {
// Set shifts.
m_block_shift = bs;
m_next_block_shift = nbs;
// Align up the address.
- PAddr end = addr + size;
+ KPhysicalAddress end = addr + size;
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
: (u64(1) << m_block_shift);
- addr = Common::AlignDown(addr, align);
- end = Common::AlignUp(end, align);
+ addr = Common::AlignDown(GetInteger(addr), align);
+ end = Common::AlignUp(GetInteger(end), align);
m_heap_address = addr;
m_end_offset = (end - addr) / (u64(1) << m_block_shift);
return m_bitmap.Initialize(bit_storage, m_end_offset);
}
- PAddr PushBlock(PAddr address) {
+ KPhysicalAddress PushBlock(KPhysicalAddress address) {
// Set the bit for the free block.
size_t offset = (address - m_heap_address) >> this->GetShift();
m_bitmap.SetBit(offset);
@@ -161,7 +162,7 @@ private:
return {};
}
- PAddr PopBlock(bool random) {
+ KPhysicalAddress PopBlock(bool random) {
// Find a free block.
s64 soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
@@ -187,18 +188,19 @@ private:
private:
KPageBitmap m_bitmap;
- PAddr m_heap_address{};
+ KPhysicalAddress m_heap_address{};
uintptr_t m_end_offset{};
size_t m_block_shift{};
size_t m_next_block_shift{};
};
private:
- void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
- size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
+ void Initialize(KPhysicalAddress heap_address, size_t heap_size,
+ KVirtualAddress management_address, size_t management_size,
+ const size_t* block_shifts, size_t num_block_shifts);
size_t GetNumFreePages() const;
- void FreeBlock(PAddr block, s32 index);
+ void FreeBlock(KPhysicalAddress block, s32 index);
static constexpr size_t NumMemoryBlockPageShifts{7};
static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
@@ -206,14 +208,14 @@ private:
};
private:
- PAddr AllocateByLinearSearch(s32 index);
- PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
+ KPhysicalAddress AllocateByLinearSearch(s32 index);
+ KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
size_t num_block_shifts);
private:
- PAddr m_heap_address{};
+ KPhysicalAddress m_heap_address{};
size_t m_heap_size{};
size_t m_initial_used_size{};
size_t m_num_blocks{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 2e13d5d0d..02b5cada4 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -106,9 +106,10 @@ KPageTable::~KPageTable() = default;
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
bool enable_das_merge, bool from_back,
- KMemoryManager::Pool pool, VAddr code_addr,
+ KMemoryManager::Pool pool, KProcessAddress code_addr,
size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit) {
+ KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
@@ -117,10 +118,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
};
+ // Set the tracking memory
+ m_memory = std::addressof(memory);
+
// Set our width and heap/alias sizes
m_address_space_width = GetAddressSpaceWidthFromType(as_type);
- const VAddr start = 0;
- const VAddr end{1ULL << m_address_space_width};
+ const KProcessAddress start = 0;
+ const KProcessAddress end{1ULL << m_address_space_width};
size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
@@ -135,8 +139,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
// Set code regions and determine remaining
constexpr size_t RegionAlignment{2_MiB};
- VAddr process_code_start{};
- VAddr process_code_end{};
+ KProcessAddress process_code_start{};
+ KProcessAddress process_code_end{};
size_t stack_region_size{};
size_t kernel_map_region_size{};
@@ -149,8 +153,8 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = m_code_region_end;
- process_code_start = Common::AlignDown(code_addr, RegionAlignment);
- process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
+ process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
+ process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
} else {
stack_region_size = 0;
kernel_map_region_size = 0;
@@ -178,7 +182,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
m_resource_limit = resource_limit;
// Determine the region we can place our undetermineds in
- VAddr alloc_start{};
+ KProcessAddress alloc_start{};
size_t alloc_size{};
if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
alloc_start = m_code_region_start;
@@ -292,7 +296,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
: KMemoryManager::Direction::FromFront);
// Ensure that we regions inside our address space
- auto IsInAddressSpace = [&](VAddr addr) {
+ auto IsInAddressSpace = [&](KProcessAddress addr) {
return m_address_space_start <= addr && addr <= m_address_space_end;
};
ASSERT(IsInAddressSpace(m_alias_region_start));
@@ -305,14 +309,14 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
ASSERT(IsInAddressSpace(m_kernel_map_region_end));
// Ensure that we selected regions that don't overlap
- const VAddr alias_start{m_alias_region_start};
- const VAddr alias_last{m_alias_region_end - 1};
- const VAddr heap_start{m_heap_region_start};
- const VAddr heap_last{m_heap_region_end - 1};
- const VAddr stack_start{m_stack_region_start};
- const VAddr stack_last{m_stack_region_end - 1};
- const VAddr kmap_start{m_kernel_map_region_start};
- const VAddr kmap_last{m_kernel_map_region_end - 1};
+ const KProcessAddress alias_start{m_alias_region_start};
+ const KProcessAddress alias_last{m_alias_region_end - 1};
+ const KProcessAddress heap_start{m_heap_region_start};
+ const KProcessAddress heap_last{m_heap_region_end - 1};
+ const KProcessAddress stack_start{m_stack_region_start};
+ const KProcessAddress stack_last{m_stack_region_end - 1};
+ const KProcessAddress kmap_start{m_kernel_map_region_start};
+ const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
ASSERT(alias_last < heap_start || heap_last < alias_start);
ASSERT(alias_last < stack_start || stack_last < alias_start);
ASSERT(alias_last < kmap_start || kmap_last < alias_start);
@@ -334,9 +338,10 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
void KPageTable::Finalize() {
// Finalize memory blocks.
- m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) {
- m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
- });
+ m_memory_block_manager.Finalize(m_memory_block_slab_manager,
+ [&](KProcessAddress addr, u64 size) {
+ m_memory->UnmapRegion(*m_page_table_impl, addr, size);
+ });
// Release any insecure mapped memory.
if (m_mapped_insecure_memory) {
@@ -352,7 +357,7 @@ void KPageTable::Finalize() {
m_page_table_impl.reset();
}
-Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state,
+Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
const u64 size{num_pages * PageSize};
@@ -388,7 +393,8 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
R_SUCCEED();
}
-Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
+Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -473,7 +479,8 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
R_SUCCEED();
}
-Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
+Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
@@ -525,7 +532,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
SCOPE_EXIT({
if (reprotected_pages && any_code_pages) {
if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
- m_system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
} else {
m_system.InvalidateCpuInstructionCaches();
}
@@ -575,9 +582,10 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
R_SUCCEED();
}
-VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
- size_t alignment, size_t offset, size_t guard_pages) {
- VAddr address = 0;
+KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) {
+ KProcessAddress address = 0;
if (num_pages <= region_num_pages) {
if (this->IsAslrEnabled()) {
@@ -593,7 +601,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size
return address;
}
-Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
+Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -604,11 +612,11 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr),
+ R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
ResultInvalidCurrentMemory);
// Prepare tracking variables.
- PAddr cur_addr = next_entry.phys_addr;
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
@@ -646,7 +654,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
R_SUCCEED();
}
-bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {
+bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -659,7 +667,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
// We're going to validate that the group we'd expect is the group we see.
auto cur_it = pg.begin();
- PAddr cur_block_address = cur_it->GetAddress();
+ KPhysicalAddress cur_block_address = cur_it->GetAddress();
size_t cur_block_pages = cur_it->GetNumPages();
auto UpdateCurrentIterator = [&]() {
@@ -677,12 +685,12 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) {
+ if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
return false;
}
// Prepare tracking variables.
- PAddr cur_addr = next_entry.phys_addr;
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
@@ -734,8 +742,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_p
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
-Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
- VAddr src_addr) {
+Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
+ KPageTable& src_page_table, KProcessAddress src_addr) {
// Acquire the table locks.
KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
@@ -774,8 +782,8 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
}
Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- VAddr address, size_t size, KMemoryPermission test_perm,
- KMemoryState dst_state) {
+ KProcessAddress address, size_t size,
+ KMemoryPermission test_perm, KMemoryState dst_state) {
// Validate pre-conditions.
ASSERT(this->IsLockedByCurrentThread());
ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
@@ -790,10 +798,10 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
: KMemoryPermission::UserRead;
// Get aligned extents.
- const VAddr aligned_src_start = Common::AlignDown((address), PageSize);
- const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize);
- const VAddr mapping_src_start = Common::AlignUp((address), PageSize);
- const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize);
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
const auto aligned_src_last = (aligned_src_end)-1;
const auto mapping_src_last = (mapping_src_end)-1;
@@ -840,14 +848,15 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
test_attr_mask, KMemoryAttribute::None));
if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
- info.GetAddress() < (mapping_src_end)) {
- const auto cur_start =
- info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start);
+ info.GetAddress() < GetInteger(mapping_src_end)) {
+ const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
+ ? info.GetAddress()
+ : (mapping_src_start);
const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
: (mapping_src_end);
const size_t cur_size = cur_end - cur_start;
- if (info.GetAddress() < (mapping_src_start)) {
+ if (info.GetAddress() < GetInteger(mapping_src_start)) {
++blocks_needed;
}
if (mapping_src_last < info.GetLastAddress()) {
@@ -882,30 +891,32 @@ Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_bloc
R_SUCCEED();
}
-Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
- KMemoryPermission test_perm, KMemoryState dst_state,
- KPageTable& src_page_table, bool send) {
+Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
+ KProcessAddress src_addr, KMemoryPermission test_perm,
+ KMemoryState dst_state, KPageTable& src_page_table,
+ bool send) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(src_page_table.IsLockedByCurrentThread());
// Check that we can theoretically map.
- const VAddr region_start = m_alias_region_start;
+ const KProcessAddress region_start = m_alias_region_start;
const size_t region_size = m_alias_region_end - m_alias_region_start;
R_UNLESS(size < region_size, ResultOutOfAddressSpace);
// Get aligned source extents.
- const VAddr src_start = src_addr;
- const VAddr src_end = src_addr + size;
- const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize);
- const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize);
- const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize);
- const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize);
+ const KProcessAddress src_start = src_addr;
+ const KProcessAddress src_end = src_addr + size;
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
+ const KProcessAddress mapping_src_end =
+ Common::AlignDown(GetInteger(src_start) + size, PageSize);
const size_t aligned_src_size = aligned_src_end - aligned_src_start;
const size_t mapping_src_size =
(mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
// Select a random address to map at.
- VAddr dst_addr =
+ KProcessAddress dst_addr =
this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
PageSize, 0, this->GetNumGuardPages());
@@ -930,9 +941,9 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Ensure that we manage page references correctly.
- PAddr start_partial_page = 0;
- PAddr end_partial_page = 0;
- VAddr cur_mapped_addr = dst_addr;
+ KPhysicalAddress start_partial_page = 0;
+ KPhysicalAddress end_partial_page = 0;
+ KProcessAddress cur_mapped_addr = dst_addr;
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
// free on scope exit.
@@ -977,11 +988,12 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
- bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start);
+ bool traverse_valid =
+ src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
ASSERT(traverse_valid);
// Prepare tracking variables.
- PAddr cur_block_addr = next_entry.phys_addr;
+ KPhysicalAddress cur_block_addr = next_entry.phys_addr;
size_t cur_block_size =
next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
size_t tot_block_size = cur_block_size;
@@ -989,7 +1001,7 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
// Map the start page, if we have one.
if (start_partial_page != 0) {
// Ensure the page holds correct data.
- const VAddr start_partial_virt =
+ const KVirtualAddress start_partial_virt =
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
if (send) {
const size_t partial_offset = src_start - aligned_src_start;
@@ -1002,21 +1014,22 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
clear_size = 0;
}
- std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val,
+ std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
partial_offset);
std::memcpy(
- m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset),
- m_system.Memory().GetPointer<void>(
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) +
- partial_offset),
+ m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
+ m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
+ m_system.Kernel().MemoryLayout(), cur_block_addr)) +
+ partial_offset),
copy_size);
if (clear_size > 0) {
- std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset +
- copy_size),
+ std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
+ partial_offset + copy_size),
fill_val, clear_size);
}
} else {
- std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize);
+ std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
+ PageSize);
}
// Map the page.
@@ -1061,7 +1074,8 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
}
// Handle the last direct-mapped page.
- if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size;
+ if (const KProcessAddress mapped_block_end =
+ aligned_src_start + tot_block_size - cur_block_size;
mapped_block_end < mapping_src_end) {
const size_t last_block_size = mapping_src_end - mapped_block_end;
@@ -1084,18 +1098,19 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
// Map the end page, if we have one.
if (end_partial_page != 0) {
// Ensure the page holds correct data.
- const VAddr end_partial_virt =
+ const KVirtualAddress end_partial_virt =
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
if (send) {
const size_t copy_size = src_end - mapping_src_end;
- std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt),
- m_system.Memory().GetPointer<void>(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr)),
+ std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
+ m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
+ m_system.Kernel().MemoryLayout(), cur_block_addr))),
copy_size);
- std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val,
- PageSize - copy_size);
+ std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
+ fill_val, PageSize - copy_size);
} else {
- std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize);
+ std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
+ PageSize);
}
// Map the page.
@@ -1116,7 +1131,7 @@ Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_add
R_SUCCEED();
}
-Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
+Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
KPageTable& src_page_table, KMemoryPermission test_perm,
KMemoryState dst_state, bool send) {
// For convenience, alias this.
@@ -1142,8 +1157,8 @@ Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
R_TRY(allocator_result);
// Get the mapped extents.
- const VAddr src_map_start = Common::AlignUp((src_addr), PageSize);
- const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize);
+ const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
+ const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
const size_t src_map_size = src_map_end - src_map_start;
// Ensure that we clean up appropriately if we fail after this.
@@ -1172,7 +1187,8 @@ Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
R_SUCCEED();
}
-Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) {
+Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
// Validate the address.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -1196,8 +1212,8 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
KScopedPageTableUpdater updater(this);
// Get aligned extents.
- const VAddr aligned_start = Common::AlignDown((address), PageSize);
- const VAddr aligned_end = Common::AlignUp((address) + size, PageSize);
+ const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
const size_t aligned_size = aligned_end - aligned_start;
const size_t aligned_num_pages = aligned_size / PageSize;
@@ -1211,22 +1227,23 @@ Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState
KMemoryBlockDisableMergeAttribute::Normal);
// Release from the resource limit as relevant.
- const VAddr mapping_start = Common::AlignUp((address), PageSize);
- const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
R_SUCCEED();
}
-Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) {
+Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
// Validate the address.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Get aligned source extents.
- const VAddr mapping_start = Common::AlignUp((address), PageSize);
- const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
- const VAddr mapping_last = mapping_end - 1;
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_last = mapping_end - 1;
const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
// If nothing was mapped, we're actually done immediately.
@@ -1279,7 +1296,7 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
KMemoryInfo cur_info = start_it->GetMemoryInfo();
// Create tracking variables.
- VAddr cur_address = cur_info.GetAddress();
+ KProcessAddress cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
@@ -1352,7 +1369,7 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
.IsSuccess());
// Create tracking variables.
- VAddr cur_address = cur_info.GetAddress();
+ KProcessAddress cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
@@ -1439,16 +1456,16 @@ Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState
}
void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
- VAddr address, size_t size,
+ KProcessAddress address, size_t size,
KMemoryPermission prot_perm) {
ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Get the mapped extents.
- const VAddr src_map_start = address;
- const VAddr src_map_end = address + size;
- const VAddr src_map_last = src_map_end - 1;
+ const KProcessAddress src_map_start = address;
+ const KProcessAddress src_map_end = address + size;
+ const KProcessAddress src_map_last = src_map_end - 1;
// This function is only invoked when there's something to do.
ASSERT(src_map_end > src_map_start);
@@ -1458,8 +1475,9 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
while (true) {
const KMemoryInfo info = it->GetMemoryInfo();
- const auto cur_start =
- info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start;
+ const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
+ ? info.GetAddress()
+ : GetInteger(src_map_start);
const auto cur_end =
src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
@@ -1469,7 +1487,7 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
(info.GetIpcLockCount() != 0 &&
(info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
// Check if we actually need to fix the protections on the block.
- if (cur_end == src_map_end || info.GetAddress() <= src_map_start ||
+ if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
(info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
OperationType::ChangePermissions)
@@ -1488,15 +1506,15 @@ void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLi
}
}
-Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
+Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Lock the physical memory lock.
KScopedLightLock phys_lk(m_map_physical_memory_lock);
// Calculate the last address for convenience.
- const VAddr last_address = address + size - 1;
+ const KProcessAddress last_address = address + size - 1;
// Define iteration variables.
- VAddr cur_address;
+ KProcessAddress cur_address;
size_t mapped_size;
// The entire mapping process can be retried.
@@ -1528,7 +1546,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Track the memory if it's mapped.
if (info.GetState() != KMemoryState::Free) {
- mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
}
// Advance.
@@ -1581,7 +1599,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
const bool is_free = info.GetState() == KMemoryState::Free;
if (is_free) {
- if (info.GetAddress() < address) {
+ if (info.GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
@@ -1599,7 +1617,8 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Track the memory if it's mapped.
if (!is_free) {
- checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ checked_mapped_size +=
+ KProcessAddress(info.GetEndAddress()) - cur_address;
}
// Advance.
@@ -1627,7 +1646,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Prepare to iterate over the memory.
auto pg_it = pg.begin();
- PAddr pg_phys_addr = pg_it->GetAddress();
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
// Reset the current tracking address, and make sure we clean up on failure.
@@ -1635,7 +1654,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
cur_address = address;
ON_RESULT_FAILURE {
if (cur_address > address) {
- const VAddr last_unmap_address = cur_address - 1;
+ const KProcessAddress last_unmap_address = cur_address - 1;
// Iterate, unmapping the pages.
cur_address = address;
@@ -1652,7 +1671,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
if (info.GetState() == KMemoryState::Free) {
// Determine the range to unmap.
const size_t cur_pages =
- std::min(VAddr(info.GetEndAddress()) - cur_address,
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
last_unmap_address + 1 - cur_address) /
PageSize;
@@ -1695,9 +1714,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// If it's unmapped, we need to map it.
if (info.GetState() == KMemoryState::Free) {
// Determine the range to map.
- size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
+ size_t map_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
@@ -1754,7 +1774,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
}
}
-Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
+Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
// Lock the physical memory lock.
KScopedLightLock phys_lk(m_map_physical_memory_lock);
@@ -1762,13 +1782,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
KScopedLightLock lk(m_general_lock);
// Calculate the last address for convenience.
- const VAddr last_address = address + size - 1;
+ const KProcessAddress last_address = address + size - 1;
// Define iteration variables.
- VAddr map_start_address = 0;
- VAddr map_last_address = 0;
+ KProcessAddress map_start_address = 0;
+ KProcessAddress map_last_address = 0;
- VAddr cur_address;
+ KProcessAddress cur_address;
size_t mapped_size;
size_t num_allocator_blocks = 0;
@@ -1801,7 +1821,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
map_last_address =
(last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
- if (info.GetAddress() < address) {
+ if (info.GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
@@ -1854,7 +1874,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
// If the memory state is normal, we need to unmap it.
if (info.GetState() == KMemoryState::Normal) {
// Determine the range to unmap.
- const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
last_address + 1 - cur_address) /
PageSize;
@@ -2144,13 +2164,14 @@ void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress addre
const KMemoryInfo info = it->GetMemoryInfo();
// Determine the range to map.
- KProcessAddress map_address = std::max<VAddr>(info.GetAddress(), start_address);
- const KProcessAddress map_end_address = std::min<VAddr>(info.GetEndAddress(), end_address);
+ KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
+ const KProcessAddress map_end_address =
+ std::min<KProcessAddress>(info.GetEndAddress(), end_address);
ASSERT(map_end_address != map_address);
// Determine if we should disable head merge.
const bool disable_head_merge =
- info.GetAddress() >= start_address &&
+ info.GetAddress() >= GetInteger(start_address) &&
True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
const KPageProperties map_properties = {
info.GetPermission(), false, false,
@@ -2214,7 +2235,7 @@ Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(Common::IsAligned(addr, alignment));
+ ASSERT(Common::IsAligned(GetInteger(addr), alignment));
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
@@ -2455,7 +2476,7 @@ Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
R_SUCCEED();
}
-Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
@@ -2480,7 +2501,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
R_SUCCEED();
}
-Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
+Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
@@ -2541,23 +2562,23 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
// Ensure cache coherency, if we're setting pages as executable.
if (is_x) {
- m_system.InvalidateCpuInstructionCacheRange(addr, size);
+ m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
}
R_SUCCEED();
}
-KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
+KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
KScopedLightLock lk(m_general_lock);
return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
}
-KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
+KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
if (!Contains(addr, 1)) {
return {
- .m_address = m_address_space_end,
- .m_size = 0 - m_address_space_end,
+ .m_address = GetInteger(m_address_space_end),
+ .m_size = 0 - GetInteger(m_address_space_end),
.m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
.m_device_disable_merge_left_count = 0,
.m_device_disable_merge_right_count = 0,
@@ -2574,7 +2595,8 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
return QueryInfoImpl(addr);
}
-Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -2611,7 +2633,7 @@ Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermi
R_SUCCEED();
}
-Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
KMemoryAttribute::SetMask);
@@ -2666,12 +2688,12 @@ Result KPageTable::SetMaxHeapSize(size_t size) {
R_SUCCEED();
}
-Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
+Result KPageTable::SetHeapSize(u64* out, size_t size) {
// Lock the physical memory mutex.
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
// Try to perform a reduction in heap, instead of an extension.
- VAddr cur_address{};
+ KProcessAddress cur_address{};
size_t allocation_size{};
{
// Lock the table.
@@ -2722,11 +2744,11 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
m_current_heap_end = m_heap_region_start + size;
// Set the output.
- *out = m_heap_region_start;
+ *out = GetInteger(m_heap_region_start);
R_SUCCEED();
} else if (size == GetHeapSize()) {
// The size requested is exactly the current size.
- *out = m_heap_region_start;
+ *out = GetInteger(m_heap_region_start);
R_SUCCEED();
} else {
// We have to allocate memory. Determine how much to allocate and where while the table
@@ -2780,7 +2802,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
// Clear all the newly allocated pages.
for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
- std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
+ std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
PageSize);
}
@@ -2799,14 +2821,14 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
m_current_heap_end = m_heap_region_start + size;
// Set the output.
- *out = m_heap_region_start;
+ *out = GetInteger(m_heap_region_start);
R_SUCCEED();
}
}
-Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
- KMemoryPermission perm, bool is_aligned,
- bool check_heap) {
+Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
+ size_t size, KMemoryPermission perm,
+ bool is_aligned, bool check_heap) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2842,7 +2864,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address,
R_SUCCEED();
}
-Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) {
+Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
+ bool check_heap) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2876,7 +2899,7 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bo
R_SUCCEED();
}
-Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
+Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -2904,7 +2927,8 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
R_SUCCEED();
}
-Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) {
+Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
+ size_t size) {
R_RETURN(this->LockMemoryAndOpen(
nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
@@ -2913,7 +2937,7 @@ Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size)
KMemoryAttribute::Locked));
}
-Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
+Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
@@ -2921,7 +2945,7 @@ Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
KMemoryAttribute::Locked, nullptr));
}
-Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
+Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
R_RETURN(this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
@@ -2929,17 +2953,17 @@ Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
KMemoryAttribute::Locked));
}
-Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
+Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
R_RETURN(this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
}
-bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
- auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr);
+bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
+ auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
for (u64 offset{}; offset < size; offset += PageSize) {
- if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) {
+ if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
return false;
}
start_ptr += PageSize;
@@ -2947,18 +2971,19 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
return true;
}
-void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) {
- VAddr addr{start};
+void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
+ KPageGroup& page_linked_list) {
+ KProcessAddress addr{start};
while (addr < start + (num_pages * PageSize)) {
- const PAddr paddr{GetPhysicalAddr(addr)};
+ const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
ASSERT(paddr != 0);
page_linked_list.AddBlock(paddr, 1);
addr += PageSize;
}
}
-VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
- size_t align) {
+KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
+ u64 needed_num_pages, size_t align) {
if (m_enable_aslr) {
UNIMPLEMENTED();
}
@@ -2966,11 +2991,11 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u6
IsKernel() ? 1 : 4);
}
-Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
+Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation) {
ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(num_pages > 0);
ASSERT(num_pages == page_group.GetNumPages());
@@ -2983,7 +3008,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
const size_t size{node.GetNumPages() * PageSize};
// Map the pages.
- m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
+ m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
addr += size;
}
@@ -3001,12 +3026,12 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
R_SUCCEED();
}
-Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr) {
+Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
+ OperationType operation, KPhysicalAddress map_addr) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
- ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
ASSERT(ContainsPages(addr, num_pages));
switch (operation) {
@@ -3016,14 +3041,14 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
this->AddRegionToPages(addr, num_pages, pages_to_close);
- m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
+ m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
break;
}
case OperationType::MapFirst:
case OperationType::Map: {
ASSERT(map_addr);
- ASSERT(Common::IsAligned(map_addr, PageSize));
- m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
+ ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
+ m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
// Open references to pages, if we should.
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
@@ -3060,7 +3085,7 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
}
}
-VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
+KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
@@ -3132,11 +3157,11 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {
}
}
-bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
- const VAddr end = addr + size;
- const VAddr last = end - 1;
+bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ const KProcessAddress end = addr + size;
+ const KProcessAddress last = end - 1;
- const VAddr region_start = this->GetRegionAddress(state);
+ const KProcessAddress region_start = this->GetRegionAddress(state);
const size_t region_size = this->GetRegionSize(state);
const bool is_in_region =
@@ -3191,21 +3216,21 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
R_SUCCEED();
}
-Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask,
+Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
+ size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
- const VAddr last_addr = addr + size - 1;
+ const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
const size_t blocks_for_start_align =
- (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
while (true) {
// Validate against the provided masks.
@@ -3224,7 +3249,7 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
// If the end address isn't aligned, we need a block.
const size_t blocks_for_end_align =
- (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
@@ -3235,20 +3260,20 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- VAddr addr, size_t size, KMemoryState state_mask,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
- const VAddr last_addr = addr + size - 1;
+ const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
const size_t blocks_for_start_align =
- (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
// Validate all blocks in the range have correct state.
const KMemoryState first_state = info.m_state;
@@ -3277,7 +3302,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
// If the end address isn't aligned, we need a block.
const size_t blocks_for_end_align =
- (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
// Write output state.
if (out_state != nullptr) {
@@ -3295,11 +3320,12 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
R_SUCCEED();
}
-Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr) {
// Validate basic preconditions.
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
@@ -3329,8 +3355,8 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
attr_mask, attr));
// Get the physical address, if we're supposed to.
- if (out_paddr != nullptr) {
- ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ if (out_KPhysicalAddress != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
}
// Make the page group, if we're supposed to.
@@ -3361,7 +3387,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
R_SUCCEED();
}
-Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryPermission new_perm,
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 5c5356338..022d15f35 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -6,7 +6,6 @@
#include <memory>
#include "common/common_funcs.h"
-#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
@@ -15,6 +14,7 @@
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -65,45 +65,48 @@ public:
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
- VAddr code_addr, size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit);
+ KProcessAddress code_addr, size_t code_size,
+ KSystemResource* system_resource, KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory);
void Finalize();
- Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
+ Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
KMemoryPermission perm);
- Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
- Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
ICacheInvalidationStrategy icache_invalidation_strategy);
- Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
- VAddr src_addr);
- Result MapPhysicalMemory(VAddr addr, size_t size);
- Result UnmapPhysicalMemory(VAddr addr, size_t size);
- Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
- Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
- Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
- KMemoryInfo QueryInfo(VAddr addr);
- Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
- Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
+ Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
+ KProcessAddress src_addr);
+ Result MapPhysicalMemory(KProcessAddress addr, size_t size);
+ Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
+ Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
+ Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm);
+ KMemoryInfo QueryInfo(KProcessAddress addr);
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size);
- Result SetHeapSize(VAddr* out, size_t size);
- Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
+ Result SetHeapSize(u64* out, size_t size);
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap);
- Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
- Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
+ Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
- Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
- Result UnlockForIpcUserBuffer(VAddr address, size_t size);
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
- Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
- KMemoryPermission test_perm, KMemoryState dst_state, bool send);
- Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
- Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KPageTable& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send);
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
- Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
- Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
- Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
+ Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr);
@@ -120,7 +123,7 @@ public:
return m_block_info_manager;
}
- bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KProcessAddress region_start,
@@ -173,8 +176,8 @@ protected:
m_root = n;
}
- void Push(Core::Memory::Memory& memory, VAddr addr) {
- this->Push(memory.GetPointer<Node>(addr));
+ void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
+ this->Push(memory.GetPointer<Node>(GetInteger(addr)));
}
Node* Peek() const {
@@ -212,27 +215,28 @@ private:
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
- bool IsRegionContiguous(VAddr addr, u64 size) const;
- void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
- KMemoryInfo QueryInfoImpl(VAddr addr);
- VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
- size_t align);
- Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
+ bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
+ void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
+ KMemoryInfo QueryInfoImpl(KProcessAddress addr);
+ KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
+ u64 needed_num_pages, size_t align);
+ Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
OperationType operation);
- Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
- PAddr map_addr = 0);
+ Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
+ OperationType operation, KPhysicalAddress map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
- VAddr GetRegionAddress(KMemoryState state) const;
+ KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
- VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
- size_t alignment, size_t offset, size_t guard_pages);
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages);
- Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
+ Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
+ Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr) const {
@@ -244,12 +248,12 @@ private:
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
- size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
+ Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
@@ -258,39 +262,40 @@ private:
state_mask, state, perm_mask, perm, attr_mask, attr,
ignore_attr));
}
- Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
+ Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr));
}
- Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr);
- Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
+ Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr);
+ Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
const KPageGroup* pg);
- Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
- bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
+ Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return m_general_lock.IsLockedByCurrentThread();
}
- bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
+ bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
ASSERT(this->IsLockedByCurrentThread());
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
- bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
+ bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
ASSERT(this->IsLockedByCurrentThread());
*out = GetPhysicalAddr(virt_addr);
@@ -298,12 +303,13 @@ private:
return *out != 0;
}
- Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
- size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
- Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
+ Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size, KMemoryPermission test_perm,
+ KMemoryState dst_state);
+ Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
KMemoryPermission test_perm, KMemoryState dst_state,
KPageTable& src_page_table, bool send);
- void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
+ void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
size_t size, KMemoryPermission prot_perm);
Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
@@ -315,61 +321,61 @@ private:
mutable KLightLock m_map_physical_memory_lock;
public:
- constexpr VAddr GetAddressSpaceStart() const {
+ constexpr KProcessAddress GetAddressSpaceStart() const {
return m_address_space_start;
}
- constexpr VAddr GetAddressSpaceEnd() const {
+ constexpr KProcessAddress GetAddressSpaceEnd() const {
return m_address_space_end;
}
constexpr size_t GetAddressSpaceSize() const {
return m_address_space_end - m_address_space_start;
}
- constexpr VAddr GetHeapRegionStart() const {
+ constexpr KProcessAddress GetHeapRegionStart() const {
return m_heap_region_start;
}
- constexpr VAddr GetHeapRegionEnd() const {
+ constexpr KProcessAddress GetHeapRegionEnd() const {
return m_heap_region_end;
}
constexpr size_t GetHeapRegionSize() const {
return m_heap_region_end - m_heap_region_start;
}
- constexpr VAddr GetAliasRegionStart() const {
+ constexpr KProcessAddress GetAliasRegionStart() const {
return m_alias_region_start;
}
- constexpr VAddr GetAliasRegionEnd() const {
+ constexpr KProcessAddress GetAliasRegionEnd() const {
return m_alias_region_end;
}
constexpr size_t GetAliasRegionSize() const {
return m_alias_region_end - m_alias_region_start;
}
- constexpr VAddr GetStackRegionStart() const {
+ constexpr KProcessAddress GetStackRegionStart() const {
return m_stack_region_start;
}
- constexpr VAddr GetStackRegionEnd() const {
+ constexpr KProcessAddress GetStackRegionEnd() const {
return m_stack_region_end;
}
constexpr size_t GetStackRegionSize() const {
return m_stack_region_end - m_stack_region_start;
}
- constexpr VAddr GetKernelMapRegionStart() const {
+ constexpr KProcessAddress GetKernelMapRegionStart() const {
return m_kernel_map_region_start;
}
- constexpr VAddr GetKernelMapRegionEnd() const {
+ constexpr KProcessAddress GetKernelMapRegionEnd() const {
return m_kernel_map_region_end;
}
- constexpr VAddr GetCodeRegionStart() const {
+ constexpr KProcessAddress GetCodeRegionStart() const {
return m_code_region_start;
}
- constexpr VAddr GetCodeRegionEnd() const {
+ constexpr KProcessAddress GetCodeRegionEnd() const {
return m_code_region_end;
}
- constexpr VAddr GetAliasCodeRegionStart() const {
+ constexpr KProcessAddress GetAliasCodeRegionStart() const {
return m_alias_code_region_start;
}
- constexpr VAddr GetAliasCodeRegionEnd() const {
+ constexpr KProcessAddress GetAliasCodeRegionEnd() const {
return m_alias_code_region_end;
}
- constexpr VAddr GetAliasCodeRegionSize() const {
+ constexpr size_t GetAliasCodeRegionSize() const {
return m_alias_code_region_end - m_alias_code_region_start;
}
size_t GetNormalMemorySize() {
@@ -382,25 +388,25 @@ public:
constexpr size_t GetHeapSize() const {
return m_current_heap_end - m_heap_region_start;
}
- constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
+ constexpr bool IsInsideAddressSpace(KProcessAddress address, size_t size) const {
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
}
- constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
+ constexpr bool IsOutsideAliasRegion(KProcessAddress address, size_t size) const {
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
}
- constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
+ constexpr bool IsOutsideStackRegion(KProcessAddress address, size_t size) const {
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
}
- constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
+ constexpr bool IsInvalidRegion(KProcessAddress address, size_t size) const {
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
}
- constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
+ constexpr bool IsInsideHeapRegion(KProcessAddress address, size_t size) const {
return address + size > m_heap_region_start && m_heap_region_end > address;
}
- constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
+ constexpr bool IsInsideAliasRegion(KProcessAddress address, size_t size) const {
return address + size > m_alias_region_start && m_alias_region_end > address;
}
- constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
+ constexpr bool IsOutsideASLRRegion(KProcessAddress address, size_t size) const {
if (IsInvalidRegion(address, size)) {
return true;
}
@@ -412,47 +418,53 @@ public:
}
return {};
}
- constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
+ constexpr bool IsInsideASLRRegion(KProcessAddress address, size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
constexpr size_t GetNumGuardPages() const {
return IsKernel() ? 1 : 4;
}
- PAddr GetPhysicalAddr(VAddr addr) const {
+ KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
ASSERT(backing_addr);
- return backing_addr + addr;
+ return backing_addr + GetInteger(addr);
}
- constexpr bool Contains(VAddr addr) const {
+ constexpr bool Contains(KProcessAddress addr) const {
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
}
- constexpr bool Contains(VAddr addr, size_t size) const {
+ constexpr bool Contains(KProcessAddress addr, size_t size) const {
return m_address_space_start <= addr && addr < addr + size &&
addr + size - 1 <= m_address_space_end - 1;
}
public:
- static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
+ KPhysicalAddress addr) {
return layout.GetLinearVirtualAddress(addr);
}
- static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
+ KVirtualAddress addr) {
return layout.GetLinearPhysicalAddress(addr);
}
- static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
+ KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
- static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
+ KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
- static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
+ KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
- static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
+ KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
@@ -464,7 +476,7 @@ private:
return m_enable_aslr;
}
- constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
+ constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
return (m_address_space_start <= addr) &&
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
@@ -489,21 +501,21 @@ private:
};
private:
- VAddr m_address_space_start{};
- VAddr m_address_space_end{};
- VAddr m_heap_region_start{};
- VAddr m_heap_region_end{};
- VAddr m_current_heap_end{};
- VAddr m_alias_region_start{};
- VAddr m_alias_region_end{};
- VAddr m_stack_region_start{};
- VAddr m_stack_region_end{};
- VAddr m_kernel_map_region_start{};
- VAddr m_kernel_map_region_end{};
- VAddr m_code_region_start{};
- VAddr m_code_region_end{};
- VAddr m_alias_code_region_start{};
- VAddr m_alias_code_region_end{};
+ KProcessAddress m_address_space_start{};
+ KProcessAddress m_address_space_end{};
+ KProcessAddress m_heap_region_start{};
+ KProcessAddress m_heap_region_end{};
+ KProcessAddress m_current_heap_end{};
+ KProcessAddress m_alias_region_start{};
+ KProcessAddress m_alias_region_end{};
+ KProcessAddress m_stack_region_start{};
+ KProcessAddress m_stack_region_end{};
+ KProcessAddress m_kernel_map_region_start{};
+ KProcessAddress m_kernel_map_region_end{};
+ KProcessAddress m_code_region_start{};
+ KProcessAddress m_code_region_end{};
+ KProcessAddress m_alias_code_region_start{};
+ KProcessAddress m_alias_code_region_end{};
size_t m_max_heap_size{};
size_t m_mapped_physical_memory_size{};
@@ -535,6 +547,7 @@ private:
Core::System& m_system;
KernelCore& m_kernel;
+ Core::Memory::Memory* m_memory{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h
index 91a45cde3..4b0e034d0 100644
--- a/src/core/hle/kernel/k_page_table_manager.h
+++ b/src/core/hle/kernel/k_page_table_manager.h
@@ -5,9 +5,9 @@
#include <atomic>
-#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_page_table_slab_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -26,23 +26,23 @@ public:
BaseHeap::Initialize(page_allocator, pt_heap);
}
- VAddr Allocate() {
- return VAddr(BaseHeap::Allocate());
+ KVirtualAddress Allocate() {
+ return KVirtualAddress(BaseHeap::Allocate());
}
- RefCount GetRefCount(VAddr addr) const {
+ RefCount GetRefCount(KVirtualAddress addr) const {
return m_pt_heap->GetRefCount(addr);
}
- void Open(VAddr addr, int count) {
+ void Open(KVirtualAddress addr, int count) {
return m_pt_heap->Open(addr, count);
}
- bool Close(VAddr addr, int count) {
+ bool Close(KVirtualAddress addr, int count) {
return m_pt_heap->Close(addr, count);
}
- bool IsInPageTableHeap(VAddr addr) const {
+ bool IsInPageTableHeap(KVirtualAddress addr) const {
return m_pt_heap->IsInRange(addr);
}
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h
index 9a8d77316..7da0ea669 100644
--- a/src/core/hle/kernel/k_page_table_slab_heap.h
+++ b/src/core/hle/kernel/k_page_table_slab_heap.h
@@ -6,8 +6,8 @@
#include <array>
#include <vector>
-#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
@@ -45,12 +45,12 @@ public:
this->Initialize(rc);
}
- RefCount GetRefCount(VAddr addr) {
+ RefCount GetRefCount(KVirtualAddress addr) {
ASSERT(this->IsInRange(addr));
return *this->GetRefCountPointer(addr);
}
- void Open(VAddr addr, int count) {
+ void Open(KVirtualAddress addr, int count) {
ASSERT(this->IsInRange(addr));
*this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
@@ -58,7 +58,7 @@ public:
ASSERT(this->GetRefCount(addr) > 0);
}
- bool Close(VAddr addr, int count) {
+ bool Close(KVirtualAddress addr, int count) {
ASSERT(this->IsInRange(addr));
ASSERT(this->GetRefCount(addr) >= count);
@@ -66,7 +66,7 @@ public:
return this->GetRefCount(addr) == 0;
}
- bool IsInPageTableHeap(VAddr addr) const {
+ bool IsInPageTableHeap(KVirtualAddress addr) const {
return this->IsInRange(addr);
}
@@ -81,7 +81,7 @@ private:
}
}
- RefCount* GetRefCountPointer(VAddr addr) {
+ RefCount* GetRefCountPointer(KVirtualAddress addr) {
return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
}
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 9d18f4049..efe86ad27 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -36,8 +36,9 @@ namespace {
* @param owner_process The parent process for the main thread
* @param priority The priority to give the main thread
*/
-void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {
- const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
+void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
+ KProcessAddress stack_top) {
+ const KProcessAddress entry_point = owner_process.PageTable().GetCodeRegionStart();
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
KThread* thread = KThread::Create(system.Kernel());
@@ -219,7 +220,7 @@ void KProcess::UnpinThread(KThread* thread) {
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
}
-Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(m_state_lock);
@@ -248,7 +249,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
R_SUCCEED();
}
-void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
[[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(m_state_lock);
@@ -366,8 +367,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
// Initialize process address space
if (const Result result{m_page_table.InitializeForProcess(
metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
- 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()),
- m_resource_limit)};
+ 0x8000000, code_size, std::addressof(m_kernel.GetAppSystemResource()), m_resource_limit,
+ m_kernel.System().ApplicationMemory())};
result.IsError()) {
R_RETURN(result);
}
@@ -399,8 +400,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
m_memory_usage_capacity =
- m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart() +
- m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart();
+ (m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) +
+ (m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart());
break;
default:
@@ -492,9 +493,9 @@ void KProcess::Finalize() {
KSynchronizationObject::Finalize();
}
-Result KProcess::CreateThreadLocalRegion(VAddr* out) {
+Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
KThreadLocalPage* tlp = nullptr;
- VAddr tlr = 0;
+ KProcessAddress tlr = 0;
// See if we can get a region from a partially used TLP.
{
@@ -543,7 +544,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
R_SUCCEED();
}
-Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
+Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
KThreadLocalPage* page_to_free = nullptr;
// Release the region.
@@ -551,10 +552,10 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KScopedSchedulerLock sl{m_kernel};
// Try to find the page in the partially used list.
- auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
+ auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
if (it == m_partially_used_tlp_tree.end()) {
// If we don't find it, it has to be in the fully used list.
- it = m_fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
+ it = m_fully_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
// Release the region.
@@ -591,8 +592,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
R_SUCCEED();
}
-bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
- DebugWatchpointType type) {
+bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
return wp.type == DebugWatchpointType::None;
})};
@@ -605,16 +605,16 @@ bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
watch->end_address = addr + size;
watch->type = type;
- for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
m_debug_page_refcounts[page]++;
- system.Memory().MarkRegionDebug(page, PageSize, true);
+ this->GetMemory().MarkRegionDebug(page, PageSize, true);
}
return true;
}
-bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
- DebugWatchpointType type) {
+bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
})};
@@ -627,24 +627,24 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
watch->end_address = 0;
watch->type = DebugWatchpointType::None;
- for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
m_debug_page_refcounts[page]--;
if (!m_debug_page_refcounts[page]) {
- system.Memory().MarkRegionDebug(page, PageSize, false);
+ this->GetMemory().MarkRegionDebug(page, PageSize, false);
}
}
return true;
}
-void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
+void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
};
- m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
- code_set.memory.size());
+ this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
@@ -703,4 +703,9 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
R_SUCCEED();
}
+Core::Memory::Memory& KProcess::GetMemory() const {
+ // TODO: per-process memory
+ return m_kernel.System().ApplicationMemory();
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 7b7a971b8..925981d06 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -8,7 +8,6 @@
#include <list>
#include <map>
#include <string>
-#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
@@ -16,14 +15,19 @@
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread_local_page.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Core {
+namespace Memory {
+class Memory;
+};
+
class System;
-}
+} // namespace Core
namespace FileSys {
class ProgramMetadata;
@@ -59,8 +63,8 @@ enum class DebugWatchpointType : u8 {
DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
struct DebugWatchpoint {
- VAddr start_address;
- VAddr end_address;
+ KProcessAddress start_address;
+ KProcessAddress end_address;
DebugWatchpointType type;
};
@@ -135,11 +139,14 @@ public:
return m_handle_table;
}
- Result SignalToAddress(VAddr address) {
+ /// Gets a reference to process's memory.
+ Core::Memory::Memory& GetMemory() const;
+
+ Result SignalToAddress(KProcessAddress address) {
return m_condition_var.SignalToAddress(address);
}
- Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
+ Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
return m_condition_var.WaitForAddress(handle, address, tag);
}
@@ -147,20 +154,21 @@ public:
return m_condition_var.Signal(cv_key, count);
}
- Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
+ Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
}
- Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
+ Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
+ s32 count) {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
- Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
- VAddr GetProcessLocalRegionAddress() const {
+ KProcessAddress GetProcessLocalRegionAddress() const {
return m_plr_address;
}
@@ -352,7 +360,7 @@ public:
*/
void PrepareForTermination();
- void LoadModule(CodeSet code_set, VAddr base_addr);
+ void LoadModule(CodeSet code_set, KProcessAddress base_addr);
bool IsInitialized() const override {
return m_is_initialized;
@@ -380,26 +388,26 @@ public:
return m_state_lock;
}
- Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
- void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
+ Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
+ void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
+ [[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
// Frees a used TLS slot identified by the given address
- Result DeleteThreadLocalRegion(VAddr addr);
+ Result DeleteThreadLocalRegion(KProcessAddress addr);
///////////////////////////////////////////////////////////////////////////////////////////////
// Debug watchpoint management
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
- bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+ bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
// Attempts to remove the watchpoint specified by the given parameters.
- bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+ bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
return m_watchpoints;
@@ -457,7 +465,7 @@ private:
/// Resource limit descriptor for this process
KResourceLimit* m_resource_limit{};
- VAddr m_system_resource_address{};
+ KVirtualAddress m_system_resource_address{};
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 m_ideal_core = 0;
@@ -485,7 +493,7 @@ private:
KConditionVariable m_condition_var;
/// Address indicating the location of the process' dedicated TLS region.
- VAddr m_plr_address = 0;
+ KProcessAddress m_plr_address = 0;
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
@@ -497,7 +505,7 @@ private:
std::list<KSharedMemoryInfo*> m_shared_memory_list;
/// Address of the top of the main thread's stack
- VAddr m_main_thread_stack_top{};
+ KProcessAddress m_main_thread_stack_top{};
/// Size of the main thread's stack
std::size_t m_main_thread_stack_size{};
@@ -527,7 +535,7 @@ private:
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
- std::map<VAddr, u64> m_debug_page_refcounts;
+ std::map<KProcessAddress, u64> m_debug_page_refcounts;
KThread* m_exception_thread{};
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index ecadf2916..faa12b4f0 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -511,7 +511,7 @@ void KScheduler::Reload(KThread* thread) {
auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTlsAddress());
+ cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
cpu_core.ClearExclusiveState();
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 2288ee435..c66aff501 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -222,7 +222,7 @@ Result KServerSession::SendReply(bool is_hle) {
// HLE servers write directly to a pointer to the thread command buffer. Therefore
// the reply has already been written in this case.
} else {
- Core::Memory::Memory& memory{m_kernel.System().Memory()};
+ Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()};
KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
@@ -319,7 +319,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
// bool recv_list_broken = false;
// Receive the message.
- Core::Memory::Memory& memory{m_kernel.System().Memory()};
+ Core::Memory::Memory& memory{client_thread->GetOwnerProcess()->GetMemory()};
if (out_context != nullptr) {
// HLE request.
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp
index a329e5690..9a69b4ffc 100644
--- a/src/core/hle/kernel/k_session_request.cpp
+++ b/src/core/hle/kernel/k_session_request.cpp
@@ -6,8 +6,8 @@
namespace Kernel {
-Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
- KMemoryState state, size_t index) {
+Result KSessionRequest::SessionMappings::PushMap(KProcessAddress client, KProcessAddress server,
+ size_t size, KMemoryState state, size_t index) {
// At most 15 buffers of each type (4-bit descriptor counts).
ASSERT(index < ((1ul << 4) - 1) * 3);
@@ -33,20 +33,21 @@ Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, siz
R_SUCCEED();
}
-Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
- KMemoryState state) {
+Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server,
+ size_t size, KMemoryState state) {
ASSERT(m_num_recv == 0);
ASSERT(m_num_exch == 0);
R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
}
-Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
- KMemoryState state) {
+Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server,
+ size_t size, KMemoryState state) {
ASSERT(m_num_exch == 0);
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
}
-Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
+Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client,
+ KProcessAddress server, size_t size,
KMemoryState state) {
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
}
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
index 5685048ba..b5f04907b 100644
--- a/src/core/hle/kernel/k_session_request.h
+++ b/src/core/hle/kernel/k_session_request.h
@@ -26,17 +26,17 @@ public:
class Mapping {
public:
- constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
+ constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
m_client_address = c;
m_server_address = s;
m_size = sz;
m_state = st;
}
- constexpr VAddr GetClientAddress() const {
+ constexpr KProcessAddress GetClientAddress() const {
return m_client_address;
}
- constexpr VAddr GetServerAddress() const {
+ constexpr KProcessAddress GetServerAddress() const {
return m_server_address;
}
constexpr size_t GetSize() const {
@@ -47,8 +47,8 @@ public:
}
private:
- VAddr m_client_address{};
- VAddr m_server_address{};
+ KProcessAddress m_client_address{};
+ KProcessAddress m_server_address{};
size_t m_size{};
KMemoryState m_state{};
};
@@ -69,14 +69,17 @@ public:
return m_num_exch;
}
- Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
- Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
- Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
+ Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state);
+ Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state);
+ Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state);
- VAddr GetSendClientAddress(size_t i) const {
+ KProcessAddress GetSendClientAddress(size_t i) const {
return GetSendMapping(i).GetClientAddress();
}
- VAddr GetSendServerAddress(size_t i) const {
+ KProcessAddress GetSendServerAddress(size_t i) const {
return GetSendMapping(i).GetServerAddress();
}
size_t GetSendSize(size_t i) const {
@@ -86,10 +89,10 @@ public:
return GetSendMapping(i).GetMemoryState();
}
- VAddr GetReceiveClientAddress(size_t i) const {
+ KProcessAddress GetReceiveClientAddress(size_t i) const {
return GetReceiveMapping(i).GetClientAddress();
}
- VAddr GetReceiveServerAddress(size_t i) const {
+ KProcessAddress GetReceiveServerAddress(size_t i) const {
return GetReceiveMapping(i).GetServerAddress();
}
size_t GetReceiveSize(size_t i) const {
@@ -99,10 +102,10 @@ public:
return GetReceiveMapping(i).GetMemoryState();
}
- VAddr GetExchangeClientAddress(size_t i) const {
+ KProcessAddress GetExchangeClientAddress(size_t i) const {
return GetExchangeMapping(i).GetClientAddress();
}
- VAddr GetExchangeServerAddress(size_t i) const {
+ KProcessAddress GetExchangeServerAddress(size_t i) const {
return GetExchangeMapping(i).GetServerAddress();
}
size_t GetExchangeSize(size_t i) const {
@@ -113,7 +116,8 @@ public:
}
private:
- Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
+ Result PushMap(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state, size_t index);
const Mapping& GetSendMapping(size_t i) const {
ASSERT(i < m_num_send);
@@ -227,22 +231,25 @@ public:
return m_mappings.GetExchangeCount();
}
- Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ Result PushSend(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state) {
return m_mappings.PushSend(client, server, size, state);
}
- Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state) {
return m_mappings.PushReceive(client, server, size, state);
}
- Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
+ Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size,
+ KMemoryState state) {
return m_mappings.PushExchange(client, server, size, state);
}
- VAddr GetSendClientAddress(size_t i) const {
+ KProcessAddress GetSendClientAddress(size_t i) const {
return m_mappings.GetSendClientAddress(i);
}
- VAddr GetSendServerAddress(size_t i) const {
+ KProcessAddress GetSendServerAddress(size_t i) const {
return m_mappings.GetSendServerAddress(i);
}
size_t GetSendSize(size_t i) const {
@@ -252,10 +259,10 @@ public:
return m_mappings.GetSendMemoryState(i);
}
- VAddr GetReceiveClientAddress(size_t i) const {
+ KProcessAddress GetReceiveClientAddress(size_t i) const {
return m_mappings.GetReceiveClientAddress(i);
}
- VAddr GetReceiveServerAddress(size_t i) const {
+ KProcessAddress GetReceiveServerAddress(size_t i) const {
return m_mappings.GetReceiveServerAddress(i);
}
size_t GetReceiveSize(size_t i) const {
@@ -265,10 +272,10 @@ public:
return m_mappings.GetReceiveMemoryState(i);
}
- VAddr GetExchangeClientAddress(size_t i) const {
+ KProcessAddress GetExchangeClientAddress(size_t i) const {
return m_mappings.GetExchangeClientAddress(i);
}
- VAddr GetExchangeServerAddress(size_t i) const {
+ KProcessAddress GetExchangeServerAddress(size_t i) const {
return m_mappings.GetExchangeServerAddress(i);
}
size_t GetExchangeSize(size_t i) const {
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 954e5befe..efb5699de 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -76,7 +76,7 @@ void KSharedMemory::Finalize() {
m_resource_limit->Close();
}
-Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
+Result KSharedMemory::Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
Svc::MemoryPermission map_perm) {
// Validate the size.
R_UNLESS(m_size == map_size, ResultInvalidSize);
@@ -94,7 +94,8 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
ConvertToKMemoryPermission(map_perm)));
}
-Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
+Result KSharedMemory::Unmap(KProcess& target_process, KProcessAddress address,
+ std::size_t unmap_size) {
// Validate the size.
R_UNLESS(m_size == unmap_size, ResultInvalidSize);
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index b4c4125bb..54b23d7ac 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -6,11 +6,11 @@
#include <optional>
#include <string>
-#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -37,7 +37,7 @@ public:
* @param map_size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
- Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Result Map(KProcess& target_process, KProcessAddress address, std::size_t map_size,
Svc::MemoryPermission permissions);
/**
@@ -46,7 +46,7 @@ public:
* @param address Address in system memory to unmap shared memory block
* @param unmap_size Size of the shared memory block to unmap
*/
- Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
+ Result Unmap(KProcess& target_process, KProcessAddress address, std::size_t unmap_size);
/**
* Gets a pointer to the shared memory block
@@ -79,7 +79,7 @@ private:
std::optional<KPageGroup> m_page_group{};
Svc::MemoryPermission m_owner_permission{};
Svc::MemoryPermission m_user_permission{};
- PAddr m_physical_address{};
+ KPhysicalAddress m_physical_address{};
std::size_t m_size{};
KResourceLimit* m_resource_limit{};
bool m_is_initialized{};
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h
index d36aaa9bd..6ea482185 100644
--- a/src/core/hle/kernel/k_system_resource.h
+++ b/src/core/hle/kernel/k_system_resource.h
@@ -130,7 +130,7 @@ private:
KBlockInfoSlabHeap m_block_info_heap;
KPageTableSlabHeap m_page_table_heap;
KResourceLimit* m_resource_limit{};
- VAddr m_resource_address{};
+ KVirtualAddress m_resource_address{};
size_t m_resource_size{};
};
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index c0e3ecb45..70480b725 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -48,8 +48,8 @@ static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32
context.fpscr = 0;
}
-static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top,
- VAddr entry_point, u64 arg) {
+static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, u64 stack_top,
+ u64 entry_point, u64 arg) {
context = {};
context.cpu_registers[0] = arg;
context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1;
@@ -100,8 +100,8 @@ KThread::KThread(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {}
KThread::~KThread() = default;
-Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner, ThreadType type) {
+Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top,
+ s32 prio, s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -221,9 +221,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
// Initialize thread context.
- ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg);
- ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top),
- static_cast<u32>(func), static_cast<u32>(arg));
+ ResetThreadContext64(m_thread_context_64, GetInteger(user_stack_top), GetInteger(func), arg);
+ ResetThreadContext32(m_thread_context_32, static_cast<u32>(GetInteger(user_stack_top)),
+ static_cast<u32>(GetInteger(func)), static_cast<u32>(arg));
// Setup the stack parameters.
StackParameters& sp = this->GetStackParameters();
@@ -249,8 +249,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
}
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
- ThreadType type, std::function<void()>&& init_func) {
+ KProcessAddress user_stack_top, s32 prio, s32 core,
+ KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
@@ -288,8 +289,8 @@ Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thre
}
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
- KProcess* owner) {
+ uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
@@ -545,7 +546,7 @@ u16 KThread::GetUserDisableCount() const {
return {};
}
- auto& memory = m_kernel.System().Memory();
+ auto& memory = this->GetOwnerProcess()->GetMemory();
return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count));
}
@@ -555,7 +556,7 @@ void KThread::SetInterruptFlag() {
return;
}
- auto& memory = m_kernel.System().Memory();
+ auto& memory = this->GetOwnerProcess()->GetMemory();
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
}
@@ -565,7 +566,7 @@ void KThread::ClearInterruptFlag() {
return;
}
- auto& memory = m_kernel.System().Memory();
+ auto& memory = this->GetOwnerProcess()->GetMemory();
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
}
@@ -951,7 +952,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
m_held_lock_info_list.push_front(*lock_info);
}
-KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key,
+KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key,
bool is_kernel_address_key) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
@@ -1087,7 +1088,8 @@ void KThread::RemoveWaiter(KThread* thread) {
}
}
-KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
+KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
+ bool is_kernel_address_key_) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Get the relevant lock info.
@@ -1420,6 +1422,11 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
return GetCurrentThread(kernel).GetCurrentCore();
}
+Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) {
+ // TODO: per-process memory
+ return kernel.System().ApplicationMemory();
+}
+
KScopedDisableDispatch::~KScopedDisableDispatch() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (m_kernel.IsShuttingDown()) {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 53fa64369..9c1a41128 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -14,7 +14,6 @@
#include <boost/intrusive/list.hpp>
-#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
@@ -23,6 +22,7 @@
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_timer_task.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_common.h"
@@ -34,6 +34,9 @@ class Fiber;
}
namespace Core {
+namespace Memory {
+class Memory;
+}
class ARM_Interface;
class System;
} // namespace Core
@@ -46,7 +49,7 @@ class KProcess;
class KScheduler;
class KThreadQueue;
-using KThreadFunction = VAddr;
+using KThreadFunction = KProcessAddress;
enum class ThreadType : u32 {
Main = 0,
@@ -113,6 +116,7 @@ KThread& GetCurrentThread(KernelCore& kernel);
KProcess* GetCurrentProcessPointer(KernelCore& kernel);
KProcess& GetCurrentProcess(KernelCore& kernel);
s32 GetCurrentCoreId(KernelCore& kernel);
+Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel);
class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>,
public boost::intrusive::list_base_hook<>,
@@ -230,9 +234,9 @@ public:
/*
* Returns the Thread Local Storage address of the current thread
- * @returns VAddr of the thread's TLS
+ * @returns Address of the thread's TLS
*/
- VAddr GetTlsAddress() const {
+ KProcessAddress GetTlsAddress() const {
return m_tls_address;
}
@@ -419,8 +423,8 @@ public:
KThreadFunction func, uintptr_t arg, s32 virt_core);
static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
- KProcess* owner);
+ uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner);
static Result InitializeServiceThread(Core::System& system, KThread* thread,
std::function<void()>&& thread_func, s32 prio,
@@ -565,15 +569,15 @@ public:
Result GetThreadContext3(std::vector<u8>& out);
- KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) {
+ KThread* RemoveUserWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
return this->RemoveWaiterByKey(out_has_waiters, key, false);
}
- KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) {
+ KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, KProcessAddress key) {
return this->RemoveWaiterByKey(out_has_waiters, key, true);
}
- VAddr GetAddressKey() const {
+ KProcessAddress GetAddressKey() const {
return m_address_key;
}
@@ -591,14 +595,14 @@ public:
// to cope with arbitrary host pointers making their way
// into things.
- void SetUserAddressKey(VAddr key, u32 val) {
+ void SetUserAddressKey(KProcessAddress key, u32 val) {
ASSERT(m_waiting_lock_info == nullptr);
m_address_key = key;
m_address_key_value = val;
m_is_kernel_address_key = false;
}
- void SetKernelAddressKey(VAddr key) {
+ void SetKernelAddressKey(KProcessAddress key) {
ASSERT(m_waiting_lock_info == nullptr);
m_address_key = key;
m_is_kernel_address_key = true;
@@ -637,12 +641,13 @@ public:
return m_argument;
}
- VAddr GetUserStackTop() const {
+ KProcessAddress GetUserStackTop() const {
return m_stack_top;
}
private:
- KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key);
+ KThread* RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
+ bool is_kernel_address_key);
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
@@ -695,12 +700,13 @@ private:
void IncreaseBasePriority(s32 priority);
- Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
+ Result Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
s32 virt_core, KProcess* owner, ThreadType type);
static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
- ThreadType type, std::function<void()>&& init_func);
+ KProcessAddress user_stack_top, s32 prio, s32 core,
+ KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func);
// For core KThread implementation
ThreadContext32 m_thread_context_32{};
@@ -749,7 +755,8 @@ public:
public:
explicit LockWithPriorityInheritanceInfo(KernelCore&) {}
- static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel, VAddr address_key,
+ static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel,
+ KProcessAddress address_key,
bool is_kernel_address_key) {
// Create a new lock info.
auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel);
@@ -797,7 +804,7 @@ public:
return m_tree;
}
- VAddr GetAddressKey() const {
+ KProcessAddress GetAddressKey() const {
return m_address_key;
}
bool GetIsKernelAddressKey() const {
@@ -812,7 +819,7 @@ public:
private:
LockWithPriorityInheritanceThreadTree m_tree{};
- VAddr m_address_key{};
+ KProcessAddress m_address_key{};
KThread* m_owner{};
u32 m_waiter_count{};
bool m_is_kernel_address_key{};
@@ -827,7 +834,8 @@ public:
}
void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info);
- LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key, bool is_kernel_address_key);
+ LockWithPriorityInheritanceInfo* FindHeldLock(KProcessAddress address_key,
+ bool is_kernel_address_key);
private:
using LockWithPriorityInheritanceInfoList =
@@ -839,11 +847,11 @@ private:
KAffinityMask m_physical_affinity_mask{};
u64 m_thread_id{};
std::atomic<s64> m_cpu_time{};
- VAddr m_address_key{};
+ KProcessAddress m_address_key{};
KProcess* m_parent{};
- VAddr m_kernel_stack_top{};
+ KVirtualAddress m_kernel_stack_top{};
u32* m_light_ipc_data{};
- VAddr m_tls_address{};
+ KProcessAddress m_tls_address{};
KLightLock m_activity_pause_lock;
s64 m_schedule_count{};
s64 m_last_scheduled_tick{};
@@ -887,16 +895,16 @@ private:
// For debugging
std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{};
- VAddr m_mutex_wait_address_for_debugging{};
+ KProcessAddress m_mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
uintptr_t m_argument{};
- VAddr m_stack_top{};
+ KProcessAddress m_stack_top{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
- void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
- u32 value) {
+ void SetConditionVariable(ConditionVariableThreadTree* tree, KProcessAddress address,
+ u64 cv_key, u32 value) {
ASSERT(m_waiting_lock_info == nullptr);
m_condvar_tree = tree;
m_condvar_key = cv_key;
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index c2af6898a..b4a1e3cdb 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,7 +37,7 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
- const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
+ const KPhysicalAddress phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
ASSERT(phys_addr);
// Unmap the page.
@@ -49,7 +49,7 @@ Result KThreadLocalPage::Finalize() {
return ResultSuccess;
}
-VAddr KThreadLocalPage::Reserve() {
+KProcessAddress KThreadLocalPage::Reserve() {
for (size_t i = 0; i < m_is_region_free.size(); i++) {
if (m_is_region_free[i]) {
m_is_region_free[i] = false;
@@ -60,7 +60,7 @@ VAddr KThreadLocalPage::Reserve() {
return 0;
}
-void KThreadLocalPage::Release(VAddr addr) {
+void KThreadLocalPage::Release(KProcessAddress addr) {
m_is_region_free[this->GetRegionIndex(addr)] = true;
}
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index 71254eb55..813f32a7e 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -27,19 +27,20 @@ public:
static_assert(RegionsPerPage > 0);
public:
- constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
+ constexpr explicit KThreadLocalPage(KernelCore&, KProcessAddress addr = {})
+ : m_virt_addr(addr) {
m_is_region_free.fill(true);
}
- constexpr VAddr GetAddress() const {
+ constexpr KProcessAddress GetAddress() const {
return m_virt_addr;
}
Result Initialize(KernelCore& kernel, KProcess* process);
Result Finalize();
- VAddr Reserve();
- void Release(VAddr addr);
+ KProcessAddress Reserve();
+ void Release(KProcessAddress addr);
bool IsAllUsed() const {
return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
@@ -60,7 +61,7 @@ public:
}
public:
- using RedBlackKeyType = VAddr;
+ using RedBlackKeyType = KProcessAddress;
static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
return v;
@@ -72,8 +73,8 @@ public:
template <typename T>
requires(std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
static constexpr int Compare(const T& lhs, const KThreadLocalPage& rhs) {
- const VAddr lval = GetRedBlackKey(lhs);
- const VAddr rval = GetRedBlackKey(rhs);
+ const KProcessAddress lval = GetRedBlackKey(lhs);
+ const KProcessAddress rval = GetRedBlackKey(rhs);
if (lval < rval) {
return -1;
@@ -85,22 +86,22 @@ public:
}
private:
- constexpr VAddr GetRegionAddress(size_t i) const {
+ constexpr KProcessAddress GetRegionAddress(size_t i) const {
return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
}
- constexpr bool Contains(VAddr addr) const {
+ constexpr bool Contains(KProcessAddress addr) const {
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
}
- constexpr size_t GetRegionIndex(VAddr addr) const {
- ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
+ constexpr size_t GetRegionIndex(KProcessAddress addr) const {
+ ASSERT(Common::IsAligned(GetInteger(addr), Svc::ThreadLocalRegionSize));
ASSERT(this->Contains(addr));
return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
}
private:
- VAddr m_virt_addr{};
+ KProcessAddress m_virt_addr{};
KProcess* m_owner{};
KernelCore* m_kernel{};
std::array<bool, RegionsPerPage> m_is_region_free{};
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index 471349282..13d34125c 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -13,7 +13,7 @@ KTransferMemory::KTransferMemory(KernelCore& kernel)
KTransferMemory::~KTransferMemory() = default;
-Result KTransferMemory::Initialize(VAddr address, std::size_t size,
+Result KTransferMemory::Initialize(KProcessAddress address, std::size_t size,
Svc::MemoryPermission owner_perm) {
// Set members.
m_owner = GetCurrentProcessPointer(m_kernel);
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 3d4d795a5..54f97ccb4 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -26,7 +26,7 @@ public:
explicit KTransferMemory(KernelCore& kernel);
~KTransferMemory() override;
- Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm);
+ Result Initialize(KProcessAddress address, std::size_t size, Svc::MemoryPermission owner_perm);
void Finalize() override;
@@ -44,7 +44,7 @@ public:
return m_owner;
}
- VAddr GetSourceAddress() const {
+ KProcessAddress GetSourceAddress() const {
return m_address;
}
@@ -54,7 +54,7 @@ public:
private:
KProcess* m_owner{};
- VAddr m_address{};
+ KProcessAddress m_address{};
Svc::MemoryPermission m_owner_perm{};
size_t m_size{};
bool m_is_initialized{};
diff --git a/src/core/hle/kernel/k_typed_address.h b/src/core/hle/kernel/k_typed_address.h
new file mode 100644
index 000000000..d57535ba0
--- /dev/null
+++ b/src/core/hle/kernel/k_typed_address.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/typed_address.h"
+
+namespace Kernel {
+
+using KPhysicalAddress = Common::PhysicalAddress;
+using KVirtualAddress = Common::VirtualAddress;
+using KProcessAddress = Common::ProcessAddress;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 98ecaf12f..4f3366c9d 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -102,7 +102,7 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
cores[core_id]->Initialize((*application_process).Is64BitProcess());
- system.Memory().SetCurrentPageTable(*application_process, core_id);
+ system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
}
}
@@ -206,7 +206,7 @@ struct KernelCore::Impl {
void InitializePhysicalCores() {
exclusive_monitor =
- Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
+ Core::MakeExclusiveMonitor(system.ApplicationMemory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
const s32 core{static_cast<s32>(i)};
@@ -271,9 +271,9 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
- void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) {
+ void InitializeResourceManagers(KernelCore& kernel, KVirtualAddress address, size_t size) {
// Ensure that the buffer is suitable for our use.
- ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Ensure that we have space for our reference counts.
@@ -462,29 +462,30 @@ struct KernelCore::Impl {
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
// Save start and end for ease of use.
- const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
- const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
+ constexpr KVirtualAddress code_start_virt_addr = KernelVirtualAddressCodeBase;
+ constexpr KVirtualAddress code_end_virt_addr = KernelVirtualAddressCodeEnd;
// Setup the containing kernel region.
constexpr size_t KernelRegionSize = 1_GiB;
constexpr size_t KernelRegionAlign = 1_GiB;
- constexpr VAddr kernel_region_start =
- Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
+ constexpr KVirtualAddress kernel_region_start =
+ Common::AlignDown(GetInteger(code_start_virt_addr), KernelRegionAlign);
size_t kernel_region_size = KernelRegionSize;
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
- kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
+ kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start);
}
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
+ GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region.
constexpr size_t CodeRegionAlign = PageSize;
- constexpr VAddr code_region_start =
- Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
- constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
+ constexpr KVirtualAddress code_region_start =
+ Common::AlignDown(GetInteger(code_start_virt_addr), CodeRegionAlign);
+ constexpr KVirtualAddress code_region_end =
+ Common::AlignUp(GetInteger(code_end_virt_addr), CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- code_region_start, code_region_size, KMemoryRegionType_KernelCode));
+ GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions.
Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
@@ -520,11 +521,11 @@ struct KernelCore::Impl {
ASSERT(misc_region_size > 0);
// Setup the misc region.
- const VAddr misc_region_start =
+ const KVirtualAddress misc_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
+ GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
// Determine if we'll use extra thread resources.
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
@@ -532,11 +533,11 @@ struct KernelCore::Impl {
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
- const VAddr stack_region_start =
+ const KVirtualAddress stack_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
+ GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
const size_t resource_region_size =
@@ -548,29 +549,29 @@ struct KernelCore::Impl {
ASSERT(slab_region_size <= resource_region_size);
// Setup the slab region.
- const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
- const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
- const PAddr slab_start_phys_addr = code_end_phys_addr;
- const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
+ const KPhysicalAddress code_start_phys_addr = KernelPhysicalAddressCodeBase;
+ const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
+ const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
+ const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
const size_t slab_region_needed_size =
- Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
- Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
- const VAddr slab_region_start =
+ Common::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) -
+ Common::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
+ const KVirtualAddress slab_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
- (code_end_phys_addr % SlabRegionAlign);
+ (GetInteger(code_end_phys_addr) % SlabRegionAlign);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
+ GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment;
- const VAddr temp_region_start =
+ const KVirtualAddress temp_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
- KMemoryRegionType_KernelTemp));
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
+ GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes.
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
@@ -596,35 +597,37 @@ struct KernelCore::Impl {
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
// Create a virtual pair region and insert it into the tree.
- const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
+ const KPhysicalAddress map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
const size_t map_size =
- Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
- const VAddr map_virt_addr =
+ Common::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
+ const KVirtualAddress map_virt_addr =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
- region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
+ GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
+ region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() -
+ GetInteger(map_phys_addr));
}
Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
+ GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
+ GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB.
- const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
+ const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
+ GetInteger(slab_end_phys_addr), page_table_heap_size,
+ KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them.
@@ -646,20 +649,21 @@ struct KernelCore::Impl {
// Setup the linear mapping region.
constexpr size_t LinearRegionAlign = 1_GiB;
- const PAddr aligned_linear_phys_start =
+ const KPhysicalAddress aligned_linear_phys_start =
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
const size_t linear_region_size =
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
- aligned_linear_phys_start;
- const VAddr linear_region_start =
+ GetInteger(aligned_linear_phys_start);
+ const KVirtualAddress linear_region_start =
memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
- const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
+ const u64 linear_region_phys_to_virt_diff =
+ GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
// Map and create regions for all the linearly-mapped data.
{
- PAddr cur_phys_addr = 0;
+ KPhysicalAddress cur_phys_addr = 0;
u64 cur_size = 0;
for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
@@ -678,15 +682,16 @@ struct KernelCore::Impl {
cur_size = region.GetSize();
}
- const VAddr region_virt_addr =
+ const KVirtualAddress region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff;
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- region_virt_addr, region.GetSize(),
+ GetInteger(region_virt_addr), region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType())));
- region.SetPairAddress(region_virt_addr);
+ region.SetPairAddress(GetInteger(region_virt_addr));
KMemoryRegion* virt_region =
- memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
+ memory_layout->GetVirtualMemoryRegionTree().FindModifiable(
+ GetInteger(region_virt_addr));
ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress());
}
@@ -694,10 +699,11 @@ struct KernelCore::Impl {
// Insert regions for the initial page table region.
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
- resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
+ GetInteger(resource_end_phys_addr), KernelPageTableHeapSize,
+ KMemoryRegionType_DramKernelInitPt));
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
- resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
- KMemoryRegionType_VirtualDramKernelInitPt));
+ GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff,
+ KernelPageTableHeapSize, KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them.
@@ -969,12 +975,12 @@ void KernelCore::InvalidateAllInstructionCaches() {
}
}
-void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
+void KernelCore::InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
if (!physical_core->IsInitialized()) {
continue;
}
- physical_core->ArmInterface().InvalidateCacheRange(addr, size);
+ physical_core->ArmInterface().InvalidateCacheRange(GetInteger(addr), size);
}
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 183a4d227..d5b08eeb5 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -14,6 +14,7 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
+#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
@@ -185,7 +186,7 @@ public:
void InvalidateAllInstructionCaches();
- void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
+ void InvalidateCpuInstructionCacheRange(KProcessAddress addr, std::size_t size);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index 92b8b37ac..18de675cc 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -6,6 +6,7 @@
#include <array>
#include "common/common_types.h"
+#include "core/hle/kernel/k_typed_address.h"
namespace Kernel {
@@ -14,7 +15,4 @@ constexpr std::size_t PageSize{1 << PageBits};
using Page = std::array<u8, PageSize>;
-using KPhysicalAddress = PAddr;
-using KProcessAddress = VAddr;
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc/svc_address_arbiter.cpp b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
index 22071731b..04cc5ea64 100644
--- a/src/core/hle/kernel/svc/svc_address_arbiter.cpp
+++ b/src/core/hle/kernel/svc/svc_address_arbiter.cpp
@@ -37,7 +37,7 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
} // namespace
// Wait for an address (via Address Arbiter)
-Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
+Result WaitForAddress(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
address, arb_type, value, timeout_ns);
@@ -68,7 +68,7 @@ Result WaitForAddress(Core::System& system, VAddr address, ArbitrationType arb_t
}
// Signals to an address (via Address Arbiter)
-Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_type, s32 value,
+Result SignalToAddress(Core::System& system, u64 address, SignalType signal_type, s32 value,
s32 count) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
address, signal_type, value, count);
@@ -82,12 +82,12 @@ Result SignalToAddress(Core::System& system, VAddr address, SignalType signal_ty
.SignalAddressArbiter(address, signal_type, value, count));
}
-Result WaitForAddress64(Core::System& system, VAddr address, ArbitrationType arb_type, s32 value,
+Result WaitForAddress64(Core::System& system, u64 address, ArbitrationType arb_type, s32 value,
s64 timeout_ns) {
R_RETURN(WaitForAddress(system, address, arb_type, value, timeout_ns));
}
-Result SignalToAddress64(Core::System& system, VAddr address, SignalType signal_type, s32 value,
+Result SignalToAddress64(Core::System& system, u64 address, SignalType signal_type, s32 value,
s32 count) {
R_RETURN(SignalToAddress(system, address, signal_type, value, count));
}
diff --git a/src/core/hle/kernel/svc/svc_cache.cpp b/src/core/hle/kernel/svc/svc_cache.cpp
index 1779832d3..082942dab 100644
--- a/src/core/hle/kernel/svc/svc_cache.cpp
+++ b/src/core/hle/kernel/svc/svc_cache.cpp
@@ -46,7 +46,7 @@ Result FlushProcessDataCache(Core::System& system, Handle process_handle, u64 ad
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Perform the operation.
- R_RETURN(system.Memory().FlushDataCache(*process, address, size));
+ R_RETURN(GetCurrentMemory(system.Kernel()).FlushDataCache(address, size));
}
void FlushEntireDataCache64(Core::System& system) {
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
index 43feab986..687baff82 100644
--- a/src/core/hle/kernel/svc/svc_code_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -29,7 +29,7 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(MemoryPermission perm)
} // namespace
-Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64_t size) {
+Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t size) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
// Get kernel instance.
@@ -64,7 +64,7 @@ Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, uint64
}
Result ControlCodeMemory(Core::System& system, Handle code_memory_handle,
- CodeMemoryOperation operation, VAddr address, uint64_t size,
+ CodeMemoryOperation operation, u64 address, uint64_t size,
MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
diff --git a/src/core/hle/kernel/svc/svc_condition_variable.cpp b/src/core/hle/kernel/svc/svc_condition_variable.cpp
index 648ed23d0..ca120d67e 100644
--- a/src/core/hle/kernel/svc/svc_condition_variable.cpp
+++ b/src/core/hle/kernel/svc/svc_condition_variable.cpp
@@ -11,7 +11,7 @@
namespace Kernel::Svc {
/// Wait process wide key atomic
-Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
+Result WaitProcessWideKeyAtomic(Core::System& system, u64 address, u64 cv_key, u32 tag,
s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
cv_key, tag, timeout_ns);
@@ -43,7 +43,7 @@ Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_ke
}
/// Signal process wide key
-void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
+void SignalProcessWideKey(Core::System& system, u64 cv_key, s32 count) {
LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
// Signal the condition variable.
diff --git a/src/core/hle/kernel/svc/svc_debug_string.cpp b/src/core/hle/kernel/svc/svc_debug_string.cpp
index d4bf062d1..4c14ce668 100644
--- a/src/core/hle/kernel/svc/svc_debug_string.cpp
+++ b/src/core/hle/kernel/svc/svc_debug_string.cpp
@@ -2,17 +2,18 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/svc.h"
#include "core/memory.h"
namespace Kernel::Svc {
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
-Result OutputDebugString(Core::System& system, VAddr address, u64 len) {
+Result OutputDebugString(Core::System& system, u64 address, u64 len) {
R_SUCCEED_IF(len == 0);
std::string str(len, '\0');
- system.Memory().ReadBlock(address, str.data(), str.size());
+ GetCurrentMemory(system.Kernel()).ReadBlock(address, str.data(), str.size());
LOG_DEBUG(Debug_Emulated, "{}", str);
R_SUCCEED();
diff --git a/src/core/hle/kernel/svc/svc_exception.cpp b/src/core/hle/kernel/svc/svc_exception.cpp
index c2782908d..580cf2f75 100644
--- a/src/core/hle/kernel/svc/svc_exception.cpp
+++ b/src/core/hle/kernel/svc/svc_exception.cpp
@@ -20,12 +20,12 @@ void Break(Core::System& system, BreakReason reason, u64 info1, u64 info2) {
bool has_dumped_buffer{};
std::vector<u8> debug_buffer;
- const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
+ const auto handle_debug_buffer = [&](u64 addr, u64 sz) {
if (sz == 0 || addr == 0 || has_dumped_buffer) {
return;
}
- auto& memory = system.Memory();
+ auto& memory = GetCurrentMemory(system.Kernel());
// This typically is an error code so we're going to assume this is the case
if (sz == sizeof(u32)) {
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index 04b6d6964..2b2c878b5 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -54,7 +54,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::AliasRegionAddress:
- *result = process->PageTable().GetAliasRegionStart();
+ *result = GetInteger(process->PageTable().GetAliasRegionStart());
R_SUCCEED();
case InfoType::AliasRegionSize:
@@ -62,7 +62,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::HeapRegionAddress:
- *result = process->PageTable().GetHeapRegionStart();
+ *result = GetInteger(process->PageTable().GetHeapRegionStart());
R_SUCCEED();
case InfoType::HeapRegionSize:
@@ -70,7 +70,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::AslrRegionAddress:
- *result = process->PageTable().GetAliasCodeRegionStart();
+ *result = GetInteger(process->PageTable().GetAliasCodeRegionStart());
R_SUCCEED();
case InfoType::AslrRegionSize:
@@ -78,7 +78,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::StackRegionAddress:
- *result = process->PageTable().GetStackRegionStart();
+ *result = GetInteger(process->PageTable().GetStackRegionStart());
R_SUCCEED();
case InfoType::StackRegionSize:
@@ -107,7 +107,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::UserExceptionContextAddress:
- *result = process->GetProcessLocalRegionAddress();
+ *result = GetInteger(process->GetProcessLocalRegionAddress());
R_SUCCEED();
case InfoType::TotalNonSystemMemorySize:
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
index 46fd0f2ea..ea03068aa 100644
--- a/src/core/hle/kernel/svc/svc_ipc.cpp
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -17,7 +17,7 @@ Result SendSyncRequest(Core::System& system, Handle handle) {
GetCurrentProcess(system.Kernel()).GetHandleTable().GetObject<KClientSession>(handle);
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
- LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
+ LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}", handle);
R_RETURN(session->SendSyncRequest());
}
@@ -41,12 +41,12 @@ Result ReplyAndReceive(Core::System& system, s32* out_index, uint64_t handles_ad
auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
- R_UNLESS(system.Memory().IsValidVirtualAddressRange(
+ R_UNLESS(GetCurrentMemory(kernel).IsValidVirtualAddressRange(
handles_addr, static_cast<u64>(sizeof(Handle) * num_handles)),
ResultInvalidPointer);
std::vector<Handle> handles(num_handles);
- system.Memory().ReadBlock(handles_addr, handles.data(), sizeof(Handle) * num_handles);
+ GetCurrentMemory(kernel).ReadBlock(handles_addr, handles.data(), sizeof(Handle) * num_handles);
// Convert handle list to object table.
std::vector<KSynchronizationObject*> objs(num_handles);
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
index 3681279d6..1d7bc4246 100644
--- a/src/core/hle/kernel/svc/svc_lock.cpp
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -9,7 +9,7 @@
namespace Kernel::Svc {
/// Attempts to locks a mutex
-Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
+Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u32 tag) {
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
thread_handle, address, tag);
@@ -21,7 +21,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
}
/// Unlock a mutex
-Result ArbitrateUnlock(Core::System& system, VAddr address) {
+Result ArbitrateUnlock(Core::System& system, u64 address) {
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
// Validate the input address.
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 4db25a3b7..5dcb7f045 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -22,15 +22,14 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
-constexpr bool IsValidAddressRange(VAddr address, u64 size) {
+constexpr bool IsValidAddressRange(u64 address, u64 size) {
return address + size > address;
}
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
- u64 size) {
+Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
R_THROW(ResultInvalidAddress);
@@ -99,7 +98,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAd
} // namespace
-Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, MemoryPermission perm) {
+Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
perm);
@@ -120,7 +119,7 @@ Result SetMemoryPermission(Core::System& system, VAddr address, u64 size, Memory
R_RETURN(page_table.SetMemoryPermission(address, size, perm));
}
-Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attr) {
+Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, u32 attr) {
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attr);
@@ -145,7 +144,7 @@ Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mas
}
/// Maps a memory range into a different range.
-Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+Result MapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
@@ -160,7 +159,7 @@ Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size)
}
/// Unmaps a region that was previously mapped with svcMapMemory
-Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+Result UnmapMemory(Core::System& system, u64 dst_addr, u64 src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index 63196e1ed..c2fbfb59a 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -8,7 +8,7 @@
namespace Kernel::Svc {
/// Set the process heap to a given Size. It can both extend and shrink the heap.
-Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
+Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
// Validate size.
@@ -20,7 +20,7 @@ Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
}
/// Maps memory at a desired address
-Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -69,7 +69,7 @@ Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
}
/// Unmaps memory previously mapped via MapPhysicalMemory
-Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
index 0b5556bc4..abba757c7 100644
--- a/src/core/hle/kernel/svc/svc_port.cpp
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -12,9 +12,10 @@
namespace Kernel::Svc {
-Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr user_name) {
+Result ConnectToNamedPort(Core::System& system, Handle* out, u64 user_name) {
// Copy the provided name from user memory to kernel memory.
- auto string_name = system.Memory().ReadCString(user_name, KObjectName::NameLengthMax);
+ auto string_name =
+ GetCurrentMemory(system.Kernel()).ReadCString(user_name, KObjectName::NameLengthMax);
std::array<char, KObjectName::NameLengthMax> name{};
std::strncpy(name.data(), string_name.c_str(), KObjectName::NameLengthMax - 1);
@@ -62,7 +63,8 @@ Result ConnectToPort(Core::System& system, Handle* out_handle, Handle port) {
Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t user_name,
int32_t max_sessions) {
// Copy the provided name from user memory to kernel memory.
- auto string_name = system.Memory().ReadCString(user_name, KObjectName::NameLengthMax);
+ auto string_name =
+ GetCurrentMemory(system.Kernel()).ReadCString(user_name, KObjectName::NameLengthMax);
// Copy the provided name from user memory to kernel memory.
std::array<char, KObjectName::NameLengthMax> name{};
diff --git a/src/core/hle/kernel/svc/svc_process.cpp b/src/core/hle/kernel/svc/svc_process.cpp
index b538c37e7..619ed16a3 100644
--- a/src/core/hle/kernel/svc/svc_process.cpp
+++ b/src/core/hle/kernel/svc/svc_process.cpp
@@ -50,7 +50,7 @@ Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
R_SUCCEED();
}
-Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_process_ids,
+Result GetProcessList(Core::System& system, s32* out_num_processes, u64 out_process_ids,
int32_t out_process_ids_size) {
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
out_process_ids, out_process_ids_size);
@@ -73,7 +73,7 @@ Result GetProcessList(Core::System& system, s32* out_num_processes, VAddr out_pr
R_THROW(ResultInvalidCurrentMemory);
}
- auto& memory = system.Memory();
+ auto& memory = GetCurrentMemory(kernel);
const auto& process_list = kernel.GetProcessList();
const auto num_processes = process_list.size();
const auto copy_amount =
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index f9210ca1e..aee0f2f36 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -8,7 +8,7 @@
namespace Kernel::Svc {
namespace {
-constexpr bool IsValidAddressRange(VAddr address, u64 size) {
+constexpr bool IsValidAddressRange(u64 address, u64 size) {
return address + size > address;
}
@@ -26,7 +26,7 @@ constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
} // namespace
-Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
+Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, u64 address,
u64 size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
@@ -56,8 +56,8 @@ Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, V
R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm));
}
-Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+Result MapProcessMemory(Core::System& system, u64 dst_address, Handle process_handle,
+ u64 src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -97,8 +97,8 @@ Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_
KMemoryPermission::UserReadWrite));
}
-Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+Result UnmapProcessMemory(Core::System& system, u64 dst_address, Handle process_handle,
+ u64 src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 457ebf950..4d9fcd25f 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -8,7 +8,7 @@
namespace Kernel::Svc {
Result QueryMemory(Core::System& system, uint64_t out_memory_info, PageInfo* out_page_info,
- VAddr query_address) {
+ u64 query_address) {
LOG_TRACE(Kernel_SVC,
"called, out_memory_info=0x{:016X}, "
"query_address=0x{:016X}",
@@ -30,10 +30,10 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
R_THROW(ResultInvalidHandle);
}
- auto& memory{system.Memory()};
+ auto& current_memory{GetCurrentMemory(system.Kernel())};
const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()};
- memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
+ current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
//! This is supposed to be part of the QueryInfo call.
*out_page_info = {};
diff --git a/src/core/hle/kernel/svc/svc_shared_memory.cpp b/src/core/hle/kernel/svc/svc_shared_memory.cpp
index 40d878f17..a698596aa 100644
--- a/src/core/hle/kernel/svc/svc_shared_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_shared_memory.cpp
@@ -26,7 +26,7 @@ constexpr bool IsValidSharedMemoryPermission(MemoryPermission perm) {
} // namespace
-Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
+Result MapSharedMemory(Core::System& system, Handle shmem_handle, u64 address, u64 size,
Svc::MemoryPermission map_perm) {
LOG_TRACE(Kernel_SVC,
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
@@ -64,7 +64,7 @@ Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
R_RETURN(shmem->Map(process, address, size, map_perm));
}
-Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size) {
+Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, u64 address, u64 size) {
// Validate the address/size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 660b45c23..04d65f0bd 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -80,7 +80,7 @@ static Result WaitSynchronization(Core::System& system, int32_t* out_index, cons
}
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-Result WaitSynchronization(Core::System& system, int32_t* out_index, VAddr user_handles,
+Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_handles,
int32_t num_handles, int64_t timeout_ns) {
LOG_TRACE(Kernel_SVC, "called user_handles={:#x}, num_handles={}, timeout_ns={}", user_handles,
num_handles, timeout_ns);
@@ -90,7 +90,8 @@ Result WaitSynchronization(Core::System& system, int32_t* out_index, VAddr user_
std::vector<Handle> handles(num_handles);
if (num_handles > 0) {
- system.Memory().ReadBlock(user_handles, handles.data(), num_handles * sizeof(Handle));
+ GetCurrentMemory(system.Kernel())
+ .ReadBlock(user_handles, handles.data(), num_handles * sizeof(Handle));
}
R_RETURN(WaitSynchronization(system, out_index, handles.data(), num_handles, timeout_ns));
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 50991fb62..37b54079c 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -19,8 +19,8 @@ constexpr bool IsValidVirtualCoreId(int32_t core_id) {
} // Anonymous namespace
/// Creates a new thread
-Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_bottom, s32 priority, s32 core_id) {
+Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u64 arg,
+ u64 stack_bottom, s32 priority, s32 core_id) {
LOG_DEBUG(Kernel_SVC,
"called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
"priority=0x{:08X}, core_id=0x{:08X}",
@@ -129,7 +129,7 @@ void SleepThread(Core::System& system, s64 nanoseconds) {
}
/// Gets the thread context
-Result GetThreadContext3(Core::System& system, VAddr out_context, Handle thread_handle) {
+Result GetThreadContext3(Core::System& system, u64 out_context, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
thread_handle);
@@ -178,7 +178,7 @@ Result GetThreadContext3(Core::System& system, VAddr out_context, Handle thread_
R_TRY(thread->GetThreadContext3(context));
// Copy the thread context to user space.
- system.Memory().WriteBlock(out_context, context.data(), context.size());
+ GetCurrentMemory(kernel).WriteBlock(out_context, context.data(), context.size());
R_SUCCEED();
}
@@ -217,7 +217,7 @@ Result SetThreadPriority(Core::System& system, Handle thread_handle, s32 priorit
R_SUCCEED();
}
-Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_thread_ids,
+Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_ids,
s32 out_thread_ids_size, Handle debug_handle) {
// TODO: Handle this case when debug events are supported.
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
@@ -242,7 +242,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa
R_THROW(ResultInvalidCurrentMemory);
}
- auto& memory = system.Memory();
+ auto& memory = GetCurrentMemory(system.Kernel());
const auto& thread_list = current_process->GetThreadList();
const auto num_threads = thread_list.size();
const auto copy_amount = std::min(static_cast<std::size_t>(out_thread_ids_size), num_threads);
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
index 394f06728..82d469a37 100644
--- a/src/core/hle/kernel/svc/svc_transfer_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -25,7 +25,7 @@ constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
} // Anonymous namespace
/// Creates a TransferMemory object
-Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
+Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64 size,
MemoryPermission map_perm) {
auto& kernel = system.Kernel();
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 39355d9c4..7f380ca4f 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -253,7 +253,7 @@ struct LastThreadContext {
};
struct PhysicalMemoryInfo {
- PAddr physical_address;
+ u64 physical_address;
u64 virtual_address;
u64 size;
};
@@ -359,7 +359,7 @@ struct LastThreadContext {
};
struct PhysicalMemoryInfo {
- PAddr physical_address;
+ u64 physical_address;
u32 virtual_address;
u32 size;
};
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index deeca925d..a17c46121 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -945,7 +945,7 @@ public:
{0, &ILibraryAppletAccessor::GetAppletStateChangedEvent, "GetAppletStateChangedEvent"},
{1, &ILibraryAppletAccessor::IsCompleted, "IsCompleted"},
{10, &ILibraryAppletAccessor::Start, "Start"},
- {20, nullptr, "RequestExit"},
+ {20, &ILibraryAppletAccessor::RequestExit, "RequestExit"},
{25, nullptr, "Terminate"},
{30, &ILibraryAppletAccessor::GetResult, "GetResult"},
{50, nullptr, "SetOutOfFocusApplicationSuspendingEnabled"},
@@ -1010,6 +1010,15 @@ private:
rb.Push(ResultSuccess);
}
+ void RequestExit(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_AM, "called");
+
+ ASSERT(applet != nullptr);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(applet->RequestExit());
+ }
+
void PushInData(HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called");
@@ -1265,7 +1274,8 @@ void ILibraryAppletCreator::CreateTransferMemoryStorage(HLERequestContext& ctx)
}
std::vector<u8> memory(transfer_mem->GetSize());
- system.Memory().ReadBlock(transfer_mem->GetSourceAddress(), memory.data(), memory.size());
+ system.ApplicationMemory().ReadBlock(transfer_mem->GetSourceAddress(), memory.data(),
+ memory.size());
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
@@ -1298,7 +1308,8 @@ void ILibraryAppletCreator::CreateHandleStorage(HLERequestContext& ctx) {
}
std::vector<u8> memory(transfer_mem->GetSize());
- system.Memory().ReadBlock(transfer_mem->GetSourceAddress(), memory.data(), memory.size());
+ system.ApplicationMemory().ReadBlock(transfer_mem->GetSourceAddress(), memory.data(),
+ memory.size());
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/am/applets/applet_cabinet.cpp b/src/core/hle/service/am/applets/applet_cabinet.cpp
index 162687b29..93c9f2a55 100644
--- a/src/core/hle/service/am/applets/applet_cabinet.cpp
+++ b/src/core/hle/service/am/applets/applet_cabinet.cpp
@@ -174,4 +174,9 @@ void Cabinet::Cancel() {
broker.SignalStateChanged();
}
+Result Cabinet::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
} // namespace Service::AM::Applets
diff --git a/src/core/hle/service/am/applets/applet_cabinet.h b/src/core/hle/service/am/applets/applet_cabinet.h
index 84197a807..edd295a27 100644
--- a/src/core/hle/service/am/applets/applet_cabinet.h
+++ b/src/core/hle/service/am/applets/applet_cabinet.h
@@ -89,6 +89,7 @@ public:
void Execute() override;
void DisplayCompleted(bool apply_changes, std::string_view amiibo_name);
void Cancel();
+ Result RequestExit() override;
private:
const Core::Frontend::CabinetApplet& frontend;
diff --git a/src/core/hle/service/am/applets/applet_controller.cpp b/src/core/hle/service/am/applets/applet_controller.cpp
index 58484519b..9840d2547 100644
--- a/src/core/hle/service/am/applets/applet_controller.cpp
+++ b/src/core/hle/service/am/applets/applet_controller.cpp
@@ -224,7 +224,8 @@ void Controller::Execute() {
parameters.allow_dual_joycons, parameters.allow_left_joycon,
parameters.allow_right_joycon);
- frontend.ReconfigureControllers([this] { ConfigurationComplete(); }, parameters);
+ frontend.ReconfigureControllers(
+ [this](bool is_success) { ConfigurationComplete(is_success); }, parameters);
break;
}
case ControllerSupportMode::ShowControllerStrapGuide:
@@ -232,16 +233,16 @@ void Controller::Execute() {
case ControllerSupportMode::ShowControllerKeyRemappingForSystem:
UNIMPLEMENTED_MSG("ControllerSupportMode={} is not implemented",
controller_private_arg.mode);
- ConfigurationComplete();
+ ConfigurationComplete(true);
break;
default: {
- ConfigurationComplete();
+ ConfigurationComplete(true);
break;
}
}
}
-void Controller::ConfigurationComplete() {
+void Controller::ConfigurationComplete(bool is_success) {
ControllerSupportResultInfo result_info{};
// If enable_single_mode is enabled, player_count is 1 regardless of any other parameters.
@@ -250,7 +251,8 @@ void Controller::ConfigurationComplete() {
result_info.selected_id = static_cast<u32>(system.HIDCore().GetFirstNpadId());
- result_info.result = 0;
+ result_info.result =
+ is_success ? ControllerSupportResult::Success : ControllerSupportResult::Cancel;
LOG_DEBUG(Service_HID, "Result Info: player_count={}, selected_id={}, result={}",
result_info.player_count, result_info.selected_id, result_info.result);
@@ -262,4 +264,9 @@ void Controller::ConfigurationComplete() {
broker.SignalStateChanged();
}
+Result Controller::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
} // namespace Service::AM::Applets
diff --git a/src/core/hle/service/am/applets/applet_controller.h b/src/core/hle/service/am/applets/applet_controller.h
index 1f9adec65..f6c64f633 100644
--- a/src/core/hle/service/am/applets/applet_controller.h
+++ b/src/core/hle/service/am/applets/applet_controller.h
@@ -48,6 +48,11 @@ enum class ControllerSupportCaller : u8 {
MaxControllerSupportCaller,
};
+enum class ControllerSupportResult : u32 {
+ Success = 0,
+ Cancel = 2,
+};
+
struct ControllerSupportArgPrivate {
u32 arg_private_size{};
u32 arg_size{};
@@ -112,7 +117,7 @@ struct ControllerSupportResultInfo {
s8 player_count{};
INSERT_PADDING_BYTES(3);
u32 selected_id{};
- u32 result{};
+ ControllerSupportResult result{};
};
static_assert(sizeof(ControllerSupportResultInfo) == 0xC,
"ControllerSupportResultInfo has incorrect size.");
@@ -129,8 +134,9 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
- void ConfigurationComplete();
+ void ConfigurationComplete(bool is_success);
private:
const Core::Frontend::ControllerApplet& frontend;
diff --git a/src/core/hle/service/am/applets/applet_error.cpp b/src/core/hle/service/am/applets/applet_error.cpp
index b013896b4..b46ea840c 100644
--- a/src/core/hle/service/am/applets/applet_error.cpp
+++ b/src/core/hle/service/am/applets/applet_error.cpp
@@ -209,4 +209,9 @@ void Error::DisplayCompleted() {
broker.SignalStateChanged();
}
+Result Error::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
} // namespace Service::AM::Applets
diff --git a/src/core/hle/service/am/applets/applet_error.h b/src/core/hle/service/am/applets/applet_error.h
index d78d6f1d1..d822a32bb 100644
--- a/src/core/hle/service/am/applets/applet_error.h
+++ b/src/core/hle/service/am/applets/applet_error.h
@@ -34,6 +34,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
void DisplayCompleted();
diff --git a/src/core/hle/service/am/applets/applet_general_backend.cpp b/src/core/hle/service/am/applets/applet_general_backend.cpp
index 1eefa85e3..8b352020e 100644
--- a/src/core/hle/service/am/applets/applet_general_backend.cpp
+++ b/src/core/hle/service/am/applets/applet_general_backend.cpp
@@ -150,6 +150,11 @@ void Auth::AuthFinished(bool is_successful) {
broker.SignalStateChanged();
}
+Result Auth::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
PhotoViewer::PhotoViewer(Core::System& system_, LibraryAppletMode applet_mode_,
const Core::Frontend::PhotoViewerApplet& frontend_)
: Applet{system_, applet_mode_}, frontend{frontend_}, system{system_} {}
@@ -202,6 +207,11 @@ void PhotoViewer::ViewFinished() {
broker.SignalStateChanged();
}
+Result PhotoViewer::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
StubApplet::StubApplet(Core::System& system_, AppletId id_, LibraryAppletMode applet_mode_)
: Applet{system_, applet_mode_}, id{id_}, system{system_} {}
@@ -250,4 +260,9 @@ void StubApplet::Execute() {
broker.SignalStateChanged();
}
+Result StubApplet::RequestExit() {
+ // Nothing to do.
+ R_SUCCEED();
+}
+
} // namespace Service::AM::Applets
diff --git a/src/core/hle/service/am/applets/applet_general_backend.h b/src/core/hle/service/am/applets/applet_general_backend.h
index a9f2535a2..34ecaebb9 100644
--- a/src/core/hle/service/am/applets/applet_general_backend.h
+++ b/src/core/hle/service/am/applets/applet_general_backend.h
@@ -28,6 +28,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
void AuthFinished(bool is_successful = true);
@@ -59,6 +60,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
void ViewFinished();
@@ -80,6 +82,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
private:
AppletId id;
diff --git a/src/core/hle/service/am/applets/applet_mii_edit.cpp b/src/core/hle/service/am/applets/applet_mii_edit.cpp
index ae80ef506..d1f652c09 100644
--- a/src/core/hle/service/am/applets/applet_mii_edit.cpp
+++ b/src/core/hle/service/am/applets/applet_mii_edit.cpp
@@ -135,4 +135,9 @@ void MiiEdit::MiiEditOutputForCharInfoEditing(MiiEditResult result,
broker.SignalStateChanged();
}
+Result MiiEdit::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
} // namespace Service::AM::Applets
diff --git a/src/core/hle/service/am/applets/applet_mii_edit.h b/src/core/hle/service/am/applets/applet_mii_edit.h
index d18dd3cf5..3f46fae1b 100644
--- a/src/core/hle/service/am/applets/applet_mii_edit.h
+++ b/src/core/hle/service/am/applets/applet_mii_edit.h
@@ -25,6 +25,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
void MiiEditOutput(MiiEditResult result, s32 index);
diff --git a/src/core/hle/service/am/applets/applet_profile_select.cpp b/src/core/hle/service/am/applets/applet_profile_select.cpp
index 1d69f5447..89cb323e9 100644
--- a/src/core/hle/service/am/applets/applet_profile_select.cpp
+++ b/src/core/hle/service/am/applets/applet_profile_select.cpp
@@ -25,13 +25,29 @@ void ProfileSelect::Initialize() {
final_data.clear();
Applet::Initialize();
+ profile_select_version = ProfileSelectAppletVersion{common_args.library_version};
const auto user_config_storage = broker.PopNormalDataToApplet();
ASSERT(user_config_storage != nullptr);
const auto& user_config = user_config_storage->GetData();
- ASSERT(user_config.size() >= sizeof(UserSelectionConfig));
- std::memcpy(&config, user_config.data(), sizeof(UserSelectionConfig));
+ LOG_INFO(Service_AM, "Initializing Profile Select Applet with version={}",
+ profile_select_version);
+
+ switch (profile_select_version) {
+ case ProfileSelectAppletVersion::Version1:
+ ASSERT(user_config.size() == sizeof(UiSettingsV1));
+ std::memcpy(&config_old, user_config.data(), sizeof(UiSettingsV1));
+ break;
+ case ProfileSelectAppletVersion::Version2:
+ case ProfileSelectAppletVersion::Version3:
+ ASSERT(user_config.size() == sizeof(UiSettings));
+ std::memcpy(&config, user_config.data(), sizeof(UiSettings));
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unknown profile_select_version = {}", profile_select_version);
+ break;
+ }
}
bool ProfileSelect::TransactionComplete() const {
@@ -52,11 +68,37 @@ void ProfileSelect::Execute() {
return;
}
- frontend.SelectProfile([this](std::optional<Common::UUID> uuid) { SelectionComplete(uuid); });
+ Core::Frontend::ProfileSelectParameters parameters{};
+
+ switch (profile_select_version) {
+ case ProfileSelectAppletVersion::Version1:
+ parameters = {
+ .mode = config_old.mode,
+ .invalid_uid_list = config_old.invalid_uid_list,
+ .display_options = config_old.display_options,
+ .purpose = UserSelectionPurpose::General,
+ };
+ break;
+ case ProfileSelectAppletVersion::Version2:
+ case ProfileSelectAppletVersion::Version3:
+ parameters = {
+ .mode = config.mode,
+ .invalid_uid_list = config.invalid_uid_list,
+ .display_options = config.display_options,
+ .purpose = config.purpose,
+ };
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unknown profile_select_version = {}", profile_select_version);
+ break;
+ }
+
+ frontend.SelectProfile([this](std::optional<Common::UUID> uuid) { SelectionComplete(uuid); },
+ parameters);
}
void ProfileSelect::SelectionComplete(std::optional<Common::UUID> uuid) {
- UserSelectionOutput output{};
+ UiReturnArg output{};
if (uuid.has_value() && uuid->IsValid()) {
output.result = 0;
@@ -67,10 +109,15 @@ void ProfileSelect::SelectionComplete(std::optional<Common::UUID> uuid) {
output.uuid_selected = Common::InvalidUUID;
}
- final_data = std::vector<u8>(sizeof(UserSelectionOutput));
+ final_data = std::vector<u8>(sizeof(UiReturnArg));
std::memcpy(final_data.data(), &output, final_data.size());
broker.PushNormalDataFromApplet(std::make_shared<IStorage>(system, std::move(final_data)));
broker.SignalStateChanged();
}
+Result ProfileSelect::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
} // namespace Service::AM::Applets
diff --git a/src/core/hle/service/am/applets/applet_profile_select.h b/src/core/hle/service/am/applets/applet_profile_select.h
index b77f1d205..369f9250f 100644
--- a/src/core/hle/service/am/applets/applet_profile_select.h
+++ b/src/core/hle/service/am/applets/applet_profile_select.h
@@ -16,19 +16,100 @@ class System;
namespace Service::AM::Applets {
-struct UserSelectionConfig {
- // TODO(DarkLordZach): RE this structure
- // It seems to be flags and the like that determine the UI of the applet on the switch... from
- // my research this is safe to ignore for now.
- INSERT_PADDING_BYTES(0xA0);
+enum class ProfileSelectAppletVersion : u32 {
+ Version1 = 0x1, // 1.0.0+
+ Version2 = 0x10000, // 2.0.0+
+ Version3 = 0x20000, // 6.0.0+
};
-static_assert(sizeof(UserSelectionConfig) == 0xA0, "UserSelectionConfig has incorrect size.");
-struct UserSelectionOutput {
+// This is nn::account::UiMode
+enum class UiMode {
+ UserSelector,
+ UserCreator,
+ EnsureNetworkServiceAccountAvailable,
+ UserIconEditor,
+ UserNicknameEditor,
+ UserCreatorForStarter,
+ NintendoAccountAuthorizationRequestContext,
+ IntroduceExternalNetworkServiceAccount,
+ IntroduceExternalNetworkServiceAccountForRegistration,
+ NintendoAccountNnidLinker,
+ LicenseRequirementsForNetworkService,
+ LicenseRequirementsForNetworkServiceWithUserContextImpl,
+ UserCreatorForImmediateNaLoginTest,
+ UserQualificationPromoter,
+};
+
+// This is nn::account::UserSelectionPurpose
+enum class UserSelectionPurpose {
+ General,
+ GameCardRegistration,
+ EShopLaunch,
+ EShopItemShow,
+ PicturePost,
+ NintendoAccountLinkage,
+ SettingsUpdate,
+ SaveDataDeletion,
+ UserMigration,
+ SaveDataTransfer,
+};
+
+// This is nn::account::NintendoAccountStartupDialogType
+enum class NintendoAccountStartupDialogType {
+ LoginAndCreate,
+ Login,
+ Create,
+};
+
+// This is nn::account::UserSelectionSettingsForSystemService
+struct UserSelectionSettingsForSystemService {
+ UserSelectionPurpose purpose;
+ bool enable_user_creation;
+ INSERT_PADDING_BYTES(0x3);
+};
+static_assert(sizeof(UserSelectionSettingsForSystemService) == 0x8,
+ "UserSelectionSettingsForSystemService has incorrect size.");
+
+struct UiSettingsDisplayOptions {
+ bool is_network_service_account_required;
+ bool is_skip_enabled;
+ bool is_system_or_launcher;
+ bool is_registration_permitted;
+ bool show_skip_button;
+ bool aditional_select;
+ bool show_user_selector;
+ bool is_unqualified_user_selectable;
+};
+static_assert(sizeof(UiSettingsDisplayOptions) == 0x8,
+ "UiSettingsDisplayOptions has incorrect size.");
+
+struct UiSettingsV1 {
+ UiMode mode;
+ INSERT_PADDING_BYTES(0x4);
+ std::array<Common::UUID, 8> invalid_uid_list;
+ u64 application_id;
+ UiSettingsDisplayOptions display_options;
+};
+static_assert(sizeof(UiSettingsV1) == 0x98, "UiSettings has incorrect size.");
+
+// This is nn::account::UiSettings
+struct UiSettings {
+ UiMode mode;
+ INSERT_PADDING_BYTES(0x4);
+ std::array<Common::UUID, 8> invalid_uid_list;
+ u64 application_id;
+ UiSettingsDisplayOptions display_options;
+ UserSelectionPurpose purpose;
+ INSERT_PADDING_BYTES(0x4);
+};
+static_assert(sizeof(UiSettings) == 0xA0, "UiSettings has incorrect size.");
+
+// This is nn::account::UiReturnArg
+struct UiReturnArg {
u64 result;
Common::UUID uuid_selected;
};
-static_assert(sizeof(UserSelectionOutput) == 0x18, "UserSelectionOutput has incorrect size.");
+static_assert(sizeof(UiReturnArg) == 0x18, "UiReturnArg has incorrect size.");
class ProfileSelect final : public Applet {
public:
@@ -42,13 +123,17 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
void SelectionComplete(std::optional<Common::UUID> uuid);
private:
const Core::Frontend::ProfileSelectApplet& frontend;
- UserSelectionConfig config;
+ UiSettings config;
+ UiSettingsV1 config_old;
+ ProfileSelectAppletVersion profile_select_version;
+
bool complete = false;
Result status = ResultSuccess;
std::vector<u8> final_data;
diff --git a/src/core/hle/service/am/applets/applet_software_keyboard.cpp b/src/core/hle/service/am/applets/applet_software_keyboard.cpp
index c18236045..4145bb84f 100644
--- a/src/core/hle/service/am/applets/applet_software_keyboard.cpp
+++ b/src/core/hle/service/am/applets/applet_software_keyboard.cpp
@@ -770,6 +770,11 @@ void SoftwareKeyboard::ExitKeyboard() {
broker.SignalStateChanged();
}
+Result SoftwareKeyboard::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
// Inline Software Keyboard Requests
void SoftwareKeyboard::RequestFinalize(const std::vector<u8>& request_data) {
diff --git a/src/core/hle/service/am/applets/applet_software_keyboard.h b/src/core/hle/service/am/applets/applet_software_keyboard.h
index b01b31c98..2e919811b 100644
--- a/src/core/hle/service/am/applets/applet_software_keyboard.h
+++ b/src/core/hle/service/am/applets/applet_software_keyboard.h
@@ -31,6 +31,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
/**
* Submits the input text to the application.
diff --git a/src/core/hle/service/am/applets/applet_web_browser.cpp b/src/core/hle/service/am/applets/applet_web_browser.cpp
index f061bae80..2accf7898 100644
--- a/src/core/hle/service/am/applets/applet_web_browser.cpp
+++ b/src/core/hle/service/am/applets/applet_web_browser.cpp
@@ -363,6 +363,11 @@ void WebBrowser::WebBrowserExit(WebExitReason exit_reason, std::string last_url)
broker.SignalStateChanged();
}
+Result WebBrowser::RequestExit() {
+ frontend.Close();
+ R_SUCCEED();
+}
+
bool WebBrowser::InputTLVExistsInMap(WebArgInputTLVType input_tlv_type) const {
return web_arg_input_tlv_map.find(input_tlv_type) != web_arg_input_tlv_map.end();
}
diff --git a/src/core/hle/service/am/applets/applet_web_browser.h b/src/core/hle/service/am/applets/applet_web_browser.h
index fd727fac8..99fe18659 100644
--- a/src/core/hle/service/am/applets/applet_web_browser.h
+++ b/src/core/hle/service/am/applets/applet_web_browser.h
@@ -35,6 +35,7 @@ public:
Result GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
+ Result RequestExit() override;
void ExtractOfflineRomFS();
diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h
index a22eb62a8..12f374199 100644
--- a/src/core/hle/service/am/applets/applets.h
+++ b/src/core/hle/service/am/applets/applets.h
@@ -142,6 +142,7 @@ public:
virtual Result GetStatus() const = 0;
virtual void ExecuteInteractive() = 0;
virtual void Execute() = 0;
+ virtual Result RequestExit() = 0;
AppletDataBroker& GetBroker() {
return broker;
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.cpp b/src/core/hle/service/hid/controllers/console_sixaxis.cpp
index 478d38590..bcb272eaf 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.cpp
+++ b/src/core/hle/service/hid/controllers/console_sixaxis.cpp
@@ -60,10 +60,11 @@ void Controller_ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_ti
// Update seven six axis transfer memory
seven_sixaxis_lifo.WriteNextEntry(next_seven_sixaxis_state);
- system.Memory().WriteBlock(transfer_memory, &seven_sixaxis_lifo, sizeof(seven_sixaxis_lifo));
+ system.ApplicationMemory().WriteBlock(transfer_memory, &seven_sixaxis_lifo,
+ sizeof(seven_sixaxis_lifo));
}
-void Controller_ConsoleSixAxis::SetTransferMemoryAddress(VAddr t_mem) {
+void Controller_ConsoleSixAxis::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
transfer_memory = t_mem;
}
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.h b/src/core/hle/service/hid/controllers/console_sixaxis.h
index 8d3e4081b..7015d924c 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.h
+++ b/src/core/hle/service/hid/controllers/console_sixaxis.h
@@ -5,8 +5,8 @@
#include <array>
-#include "common/common_types.h"
#include "common/quaternion.h"
+#include "common/typed_address.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/ring_lifo.h"
@@ -34,7 +34,7 @@ public:
void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
// Called on InitializeSevenSixAxisSensor
- void SetTransferMemoryAddress(VAddr t_mem);
+ void SetTransferMemoryAddress(Common::ProcessAddress t_mem);
// Called on ResetSevenSixAxisSensorTimestamp
void ResetTimestamp();
@@ -66,7 +66,7 @@ private:
static_assert(sizeof(seven_sixaxis_lifo) == 0xA70, "SevenSixAxisState is an invalid size");
SevenSixAxisState next_seven_sixaxis_state{};
- VAddr transfer_memory{};
+ Common::ProcessAddress transfer_memory{};
ConsoleSharedMemory* shared_memory = nullptr;
Core::HID::EmulatedConsole* console = nullptr;
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index ba6f04d8d..8abf71608 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -70,7 +70,6 @@ Result Controller_NPad::VerifyValidSixAxisSensorHandle(
const Core::HID::SixAxisSensorHandle& device_handle) {
const auto npad_id = IsNpadIdValid(static_cast<Core::HID::NpadIdType>(device_handle.npad_id));
const bool device_index = device_handle.device_index < Core::HID::DeviceIndex::MaxDeviceIndex;
- const bool npad_type = device_handle.npad_type < Core::HID::NpadStyleIndex::MaxNpadType;
if (!npad_id) {
return InvalidNpadId;
@@ -78,10 +77,6 @@ Result Controller_NPad::VerifyValidSixAxisSensorHandle(
if (!device_index) {
return NpadDeviceIndexOutOfRange;
}
- // This doesn't get validated on nnsdk
- if (!npad_type) {
- return NpadInvalidHandle;
- }
return ResultSuccess;
}
@@ -819,12 +814,12 @@ Controller_NPad::NpadCommunicationMode Controller_NPad::GetNpadCommunicationMode
return communication_mode;
}
-Result Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id,
- NpadJoyDeviceType npad_device_type,
- NpadJoyAssignmentMode assignment_mode) {
+bool Controller_NPad::SetNpadMode(Core::HID::NpadIdType& new_npad_id, Core::HID::NpadIdType npad_id,
+ NpadJoyDeviceType npad_device_type,
+ NpadJoyAssignmentMode assignment_mode) {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
- return InvalidNpadId;
+ return false;
}
auto& controller = GetControllerFromNpadIdType(npad_id);
@@ -833,7 +828,7 @@ Result Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id,
}
if (!controller.device->IsConnected()) {
- return ResultSuccess;
+ return false;
}
if (assignment_mode == NpadJoyAssignmentMode::Dual) {
@@ -842,52 +837,52 @@ Result Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id,
controller.is_dual_left_connected = true;
controller.is_dual_right_connected = false;
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id, true);
- return ResultSuccess;
+ return false;
}
if (controller.device->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight) {
DisconnectNpad(npad_id);
controller.is_dual_left_connected = false;
controller.is_dual_right_connected = true;
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id, true);
- return ResultSuccess;
+ return false;
}
- return ResultSuccess;
+ return false;
}
// This is for NpadJoyAssignmentMode::Single
// Only JoyconDual get affected by this function
if (controller.device->GetNpadStyleIndex() != Core::HID::NpadStyleIndex::JoyconDual) {
- return ResultSuccess;
+ return false;
}
if (controller.is_dual_left_connected && !controller.is_dual_right_connected) {
DisconnectNpad(npad_id);
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconLeft, npad_id, true);
- return ResultSuccess;
+ return false;
}
if (!controller.is_dual_left_connected && controller.is_dual_right_connected) {
DisconnectNpad(npad_id);
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconRight, npad_id, true);
- return ResultSuccess;
+ return false;
}
// We have two controllers connected to the same npad_id we need to split them
- const auto npad_id_2 = hid_core.GetFirstDisconnectedNpadId();
- auto& controller_2 = GetControllerFromNpadIdType(npad_id_2);
+ new_npad_id = hid_core.GetFirstDisconnectedNpadId();
+ auto& controller_2 = GetControllerFromNpadIdType(new_npad_id);
DisconnectNpad(npad_id);
if (npad_device_type == NpadJoyDeviceType::Left) {
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconLeft, npad_id, true);
controller_2.is_dual_left_connected = false;
controller_2.is_dual_right_connected = true;
- UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_2, true);
+ UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, new_npad_id, true);
} else {
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconRight, npad_id, true);
controller_2.is_dual_left_connected = true;
controller_2.is_dual_right_connected = false;
- UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_2, true);
+ UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, new_npad_id, true);
}
- return ResultSuccess;
+ return true;
}
bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
@@ -1131,6 +1126,7 @@ Result Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
WriteEmptyEntry(shared_memory);
return ResultSuccess;
}
+
Result Controller_NPad::SetGyroscopeZeroDriftMode(
const Core::HID::SixAxisSensorHandle& sixaxis_handle,
Core::HID::GyroscopeZeroDriftMode drift_mode) {
@@ -1388,7 +1384,8 @@ Result Controller_NPad::MergeSingleJoyAsDualJoy(Core::HID::NpadIdType npad_id_1,
return NpadIsDualJoycon;
}
- // Disconnect the joycon at the second id and connect the dual joycon at the first index.
+ // Disconnect the joycons and connect them as dual joycon at the first index.
+ DisconnectNpad(npad_id_1);
DisconnectNpad(npad_id_2);
controller_1.is_dual_left_connected = true;
controller_1.is_dual_right_connected = true;
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index a5998c453..9cfe298f1 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -102,8 +102,8 @@ public:
void SetNpadCommunicationMode(NpadCommunicationMode communication_mode_);
NpadCommunicationMode GetNpadCommunicationMode() const;
- Result SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyDeviceType npad_device_type,
- NpadJoyAssignmentMode assignment_mode);
+ bool SetNpadMode(Core::HID::NpadIdType& new_npad_id, Core::HID::NpadIdType npad_id,
+ NpadJoyDeviceType npad_device_type, NpadJoyAssignmentMode assignment_mode);
bool VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, std::size_t device_index,
const Core::HID::VibrationValue& vibration_value);
diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp
index bce51285c..14c67e454 100644
--- a/src/core/hle/service/hid/controllers/palma.cpp
+++ b/src/core/hle/service/hid/controllers/palma.cpp
@@ -152,7 +152,7 @@ Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandl
}
Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
- VAddr t_mem, u64 size) {
+ Common::ProcessAddress t_mem, u64 size) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
diff --git a/src/core/hle/service/hid/controllers/palma.h b/src/core/hle/service/hid/controllers/palma.h
index cf62f0dbc..a0491a819 100644
--- a/src/core/hle/service/hid/controllers/palma.h
+++ b/src/core/hle/service/hid/controllers/palma.h
@@ -5,7 +5,7 @@
#include <array>
#include "common/common_funcs.h"
-#include "common/common_types.h"
+#include "common/typed_address.h"
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/hid/errors.h"
@@ -125,8 +125,8 @@ public:
Result ReadPalmaUniqueCode(const PalmaConnectionHandle& handle);
Result SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle);
Result WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, u64 unknown);
- Result WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave, VAddr t_mem,
- u64 size);
+ Result WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
+ Common::ProcessAddress t_mem, u64 size);
Result SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle,
s32 database_id_version_);
Result GetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle);
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 4529ad643..87e7b864a 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -302,7 +302,7 @@ Hid::Hid(Core::System& system_)
{130, &Hid::SwapNpadAssignment, "SwapNpadAssignment"},
{131, &Hid::IsUnintendedHomeButtonInputProtectionEnabled, "IsUnintendedHomeButtonInputProtectionEnabled"},
{132, &Hid::EnableUnintendedHomeButtonInputProtection, "EnableUnintendedHomeButtonInputProtection"},
- {133, nullptr, "SetNpadJoyAssignmentModeSingleWithDestination"},
+ {133, &Hid::SetNpadJoyAssignmentModeSingleWithDestination, "SetNpadJoyAssignmentModeSingleWithDestination"},
{134, &Hid::SetNpadAnalogStickUseCenterClamp, "SetNpadAnalogStickUseCenterClamp"},
{135, &Hid::SetNpadCaptureButtonAssignment, "SetNpadCaptureButtonAssignment"},
{136, &Hid::ClearNpadCaptureButtonAssignment, "ClearNpadCaptureButtonAssignment"},
@@ -1180,8 +1180,10 @@ void Hid::SetNpadJoyAssignmentModeSingleByDefault(HLERequestContext& ctx) {
const auto parameters{rp.PopRaw<Parameters>()};
+ Core::HID::NpadIdType new_npad_id{};
auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyDeviceType::Left,
+ controller.SetNpadMode(new_npad_id, parameters.npad_id,
+ Controller_NPad::NpadJoyDeviceType::Left,
Controller_NPad::NpadJoyAssignmentMode::Single);
LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
@@ -1203,8 +1205,9 @@ void Hid::SetNpadJoyAssignmentModeSingle(HLERequestContext& ctx) {
const auto parameters{rp.PopRaw<Parameters>()};
+ Core::HID::NpadIdType new_npad_id{};
auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.SetNpadMode(parameters.npad_id, parameters.npad_joy_device_type,
+ controller.SetNpadMode(new_npad_id, parameters.npad_id, parameters.npad_joy_device_type,
Controller_NPad::NpadJoyAssignmentMode::Single);
LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
@@ -1226,8 +1229,10 @@ void Hid::SetNpadJoyAssignmentModeDual(HLERequestContext& ctx) {
const auto parameters{rp.PopRaw<Parameters>()};
+ Core::HID::NpadIdType new_npad_id{};
auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.SetNpadMode(parameters.npad_id, {}, Controller_NPad::NpadJoyAssignmentMode::Dual);
+ controller.SetNpadMode(new_npad_id, parameters.npad_id, {},
+ Controller_NPad::NpadJoyAssignmentMode::Dual);
LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
parameters.applet_resource_user_id);
@@ -1369,6 +1374,34 @@ void Hid::EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx) {
rb.Push(result);
}
+void Hid::SetNpadJoyAssignmentModeSingleWithDestination(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ Controller_NPad::NpadJoyDeviceType npad_joy_device_type;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::NpadIdType new_npad_id{};
+ auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
+ const auto is_reassigned =
+ controller.SetNpadMode(new_npad_id, parameters.npad_id, parameters.npad_joy_device_type,
+ Controller_NPad::NpadJoyAssignmentMode::Single);
+
+ LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
+ parameters.npad_id, parameters.applet_resource_user_id,
+ parameters.npad_joy_device_type);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push(is_reassigned);
+ rb.PushEnum(new_npad_id);
+}
+
void Hid::SetNpadAnalogStickUseCenterClamp(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index c69e5f3fb..f247b83c2 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -151,6 +151,7 @@ private:
void SwapNpadAssignment(HLERequestContext& ctx);
void IsUnintendedHomeButtonInputProtectionEnabled(HLERequestContext& ctx);
void EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx);
+ void SetNpadJoyAssignmentModeSingleWithDestination(HLERequestContext& ctx);
void SetNpadAnalogStickUseCenterClamp(HLERequestContext& ctx);
void SetNpadCaptureButtonAssignment(HLERequestContext& ctx);
void ClearNpadCaptureButtonAssignment(HLERequestContext& ctx);
diff --git a/src/core/hle/service/hid/hidbus/hidbus_base.cpp b/src/core/hle/service/hid/hidbus/hidbus_base.cpp
index dfd23ec04..ee522c36e 100644
--- a/src/core/hle/service/hid/hidbus/hidbus_base.cpp
+++ b/src/core/hle/service/hid/hidbus/hidbus_base.cpp
@@ -59,7 +59,7 @@ void HidbusBase::DisablePollingMode() {
polling_mode_enabled = false;
}
-void HidbusBase::SetTransferMemoryAddress(VAddr t_mem) {
+void HidbusBase::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
transfer_memory = t_mem;
}
diff --git a/src/core/hle/service/hid/hidbus/hidbus_base.h b/src/core/hle/service/hid/hidbus/hidbus_base.h
index 26313264d..ec41684e1 100644
--- a/src/core/hle/service/hid/hidbus/hidbus_base.h
+++ b/src/core/hle/service/hid/hidbus/hidbus_base.h
@@ -5,7 +5,7 @@
#include <array>
#include <span>
-#include "common/common_types.h"
+#include "common/typed_address.h"
#include "core/hle/result.h"
namespace Core {
@@ -138,7 +138,7 @@ public:
void DisablePollingMode();
// Called on EnableJoyPollingReceiveMode
- void SetTransferMemoryAddress(VAddr t_mem);
+ void SetTransferMemoryAddress(Common::ProcessAddress t_mem);
Kernel::KReadableEvent& GetSendCommandAsycEvent() const;
@@ -174,7 +174,7 @@ protected:
JoyEnableSixAxisDataAccessor enable_sixaxis_data{};
ButtonOnlyPollingDataAccessor button_only_data{};
- VAddr transfer_memory{};
+ Common::ProcessAddress transfer_memory{};
Core::System& system;
Kernel::KEvent* send_command_async_event;
diff --git a/src/core/hle/service/hid/hidbus/ringcon.cpp b/src/core/hle/service/hid/hidbus/ringcon.cpp
index 65a2dd521..378108012 100644
--- a/src/core/hle/service/hid/hidbus/ringcon.cpp
+++ b/src/core/hle/service/hid/hidbus/ringcon.cpp
@@ -64,8 +64,8 @@ void RingController::OnUpdate() {
curr_entry.polling_data.out_size = sizeof(ringcon_value);
std::memcpy(curr_entry.polling_data.data.data(), &ringcon_value, sizeof(ringcon_value));
- system.Memory().WriteBlock(transfer_memory, &enable_sixaxis_data,
- sizeof(enable_sixaxis_data));
+ system.ApplicationMemory().WriteBlock(transfer_memory, &enable_sixaxis_data,
+ sizeof(enable_sixaxis_data));
break;
}
default:
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
index a268750cd..803a6277c 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
@@ -58,16 +58,16 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType
if (camera_data.format != current_config.origin_format) {
LOG_WARNING(Service_IRS, "Wrong Input format {} expected {}", camera_data.format,
current_config.origin_format);
- system.Memory().ZeroBlock(*system.ApplicationProcess(), transfer_memory,
- GetDataSize(current_config.trimming_format));
+ system.ApplicationMemory().ZeroBlock(transfer_memory,
+ GetDataSize(current_config.trimming_format));
return;
}
if (current_config.origin_format > current_config.trimming_format) {
LOG_WARNING(Service_IRS, "Origin format {} is smaller than trimming format {}",
current_config.origin_format, current_config.trimming_format);
- system.Memory().ZeroBlock(*system.ApplicationProcess(), transfer_memory,
- GetDataSize(current_config.trimming_format));
+ system.ApplicationMemory().ZeroBlock(transfer_memory,
+ GetDataSize(current_config.trimming_format));
return;
}
@@ -84,8 +84,8 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType
"Trimming area ({}, {}, {}, {}) is outside of origin area ({}, {})",
current_config.trimming_start_x, current_config.trimming_start_y,
trimming_width, trimming_height, origin_width, origin_height);
- system.Memory().ZeroBlock(*system.ApplicationProcess(), transfer_memory,
- GetDataSize(current_config.trimming_format));
+ system.ApplicationMemory().ZeroBlock(transfer_memory,
+ GetDataSize(current_config.trimming_format));
return;
}
@@ -99,8 +99,8 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType
}
}
- system.Memory().WriteBlock(transfer_memory, window_data.data(),
- GetDataSize(current_config.trimming_format));
+ system.ApplicationMemory().WriteBlock(transfer_memory, window_data.data(),
+ GetDataSize(current_config.trimming_format));
if (!IsProcessorActive()) {
StartProcessor();
@@ -140,7 +140,7 @@ void ImageTransferProcessor::SetConfig(
npad_device->SetCameraFormat(current_config.origin_format);
}
-void ImageTransferProcessor::SetTransferMemoryAddress(VAddr t_mem) {
+void ImageTransferProcessor::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
transfer_memory = t_mem;
}
@@ -148,7 +148,7 @@ Core::IrSensor::ImageTransferProcessorState ImageTransferProcessor::GetState(
std::vector<u8>& data) const {
const auto size = GetDataSize(current_config.trimming_format);
data.resize(size);
- system.Memory().ReadBlock(transfer_memory, data.data(), size);
+ system.ApplicationMemory().ReadBlock(transfer_memory, data.data(), size);
return processor_state;
}
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.h b/src/core/hle/service/hid/irsensor/image_transfer_processor.h
index 7cfe04c8c..7f42d8453 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.h
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.h
@@ -3,7 +3,7 @@
#pragma once
-#include "common/common_types.h"
+#include "common/typed_address.h"
#include "core/hid/irs_types.h"
#include "core/hle/service/hid/irsensor/processor_base.h"
@@ -37,7 +37,7 @@ public:
void SetConfig(Core::IrSensor::PackedImageTransferProcessorExConfig config);
// Transfer memory where the image data will be stored
- void SetTransferMemoryAddress(VAddr t_mem);
+ void SetTransferMemoryAddress(Common::ProcessAddress t_mem);
Core::IrSensor::ImageTransferProcessorState GetState(std::vector<u8>& data) const;
@@ -72,6 +72,6 @@ private:
int callback_key{};
Core::System& system;
- VAddr transfer_memory{};
+ Common::ProcessAddress transfer_memory{};
};
} // namespace Service::IRS
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp
index cca697c64..2290df705 100644
--- a/src/core/hle/service/hle_ipc.cpp
+++ b/src/core/hle/service/hle_ipc.cpp
@@ -303,8 +303,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(Kernel::KThread& requesti
}
// Copy the translated command buffer back into the thread's command buffer area.
- memory.WriteBlock(owner_process, requesting_thread.GetTlsAddress(), cmd_buf.data(),
- write_size * sizeof(u32));
+ memory.WriteBlock(requesting_thread.GetTlsAddress(), cmd_buf.data(), write_size * sizeof(u32));
return ResultSuccess;
}
diff --git a/src/core/hle/service/jit/jit.cpp b/src/core/hle/service/jit/jit.cpp
index 46bcfd695..be996870f 100644
--- a/src/core/hle/service/jit/jit.cpp
+++ b/src/core/hle/service/jit/jit.cpp
@@ -24,8 +24,8 @@ class IJitEnvironment final : public ServiceFramework<IJitEnvironment> {
public:
explicit IJitEnvironment(Core::System& system_, Kernel::KProcess& process_, CodeRange user_rx,
CodeRange user_ro)
- : ServiceFramework{system_, "IJitEnvironment"}, process{&process_}, context{
- system_.Memory()} {
+ : ServiceFramework{system_, "IJitEnvironment"}, process{&process_},
+ context{system_.ApplicationMemory()} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &IJitEnvironment::GenerateCode, "GenerateCode"},
@@ -195,7 +195,7 @@ public:
}
// Set up the configuration with the required TransferMemory address
- configuration.transfer_memory.offset = tmem->GetSourceAddress();
+ configuration.transfer_memory.offset = GetInteger(tmem->GetSourceAddress());
configuration.transfer_memory.size = tmem_size;
// Gather up all the callbacks from the loaded plugin
@@ -383,12 +383,12 @@ public:
}
const CodeRange user_rx{
- .offset = rx_mem->GetSourceAddress(),
+ .offset = GetInteger(rx_mem->GetSourceAddress()),
.size = parameters.rx_size,
};
const CodeRange user_ro{
- .offset = ro_mem->GetSourceAddress(),
+ .offset = GetInteger(ro_mem->GetSourceAddress()),
.size = parameters.ro_size,
};
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 6de96ed5b..c42489ff9 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -225,7 +225,7 @@ public:
// Read NRR data from memory
std::vector<u8> nrr_data(nrr_size);
- system.Memory().ReadBlock(nrr_address, nrr_data.data(), nrr_size);
+ system.ApplicationMemory().ReadBlock(nrr_address, nrr_data.data(), nrr_size);
NRRHeader header;
std::memcpy(&header, nrr_data.data(), sizeof(NRRHeader));
@@ -314,7 +314,7 @@ public:
const auto is_region_available = [&](VAddr addr) {
const auto end_addr = addr + size;
while (addr < end_addr) {
- if (system.Memory().IsValidVirtualAddress(addr)) {
+ if (system.ApplicationMemory().IsValidVirtualAddress(addr)) {
return false;
}
@@ -337,7 +337,7 @@ public:
bool succeeded = false;
const auto map_region_end =
- page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize();
+ GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize();
while (current_map_addr < map_region_end) {
if (is_region_available(current_map_addr)) {
succeeded = true;
@@ -427,8 +427,8 @@ public:
const VAddr bss_end_addr{
Common::AlignUp(bss_start + nro_header.bss_size, Kernel::PageSize)};
- const auto CopyCode = [this, process](VAddr src_addr, VAddr dst_addr, u64 size) {
- system.Memory().CopyBlock(*process, dst_addr, src_addr, size);
+ const auto CopyCode = [this](VAddr src_addr, VAddr dst_addr, u64 size) {
+ system.ApplicationMemory().CopyBlock(dst_addr, src_addr, size);
};
CopyCode(nro_addr + nro_header.segment_headers[TEXT_INDEX].memory_offset, text_start,
nro_header.segment_headers[TEXT_INDEX].memory_size);
@@ -506,7 +506,7 @@ public:
// Read NRO data from memory
std::vector<u8> nro_data(nro_size);
- system.Memory().ReadBlock(nro_address, nro_data.data(), nro_size);
+ system.ApplicationMemory().ReadBlock(nro_address, nro_data.data(), nro_size);
SHA256Hash hash{};
mbedtls_sha256_ret(nro_data.data(), nro_data.size(), hash.data(), 0);
@@ -642,7 +642,8 @@ public:
LOG_WARNING(Service_LDR, "(STUBBED) called");
initialized = true;
- current_map_addr = system.ApplicationProcess()->PageTable().GetAliasCodeRegionStart();
+ current_map_addr =
+ GetInteger(system.ApplicationProcess()->PageTable().GetAliasCodeRegionStart());
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/mii/mii_manager.cpp b/src/core/hle/service/mii/mii_manager.cpp
index 3a2fe938f..c920650f5 100644
--- a/src/core/hle/service/mii/mii_manager.cpp
+++ b/src/core/hle/service/mii/mii_manager.cpp
@@ -510,7 +510,7 @@ CharInfo MiiManager::ConvertV3ToCharInfo(const Ver3StoreData& mii_v3) const {
return mii;
}
-Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
+Ver3StoreData MiiManager::BuildFromStoreData(const CharInfo& mii) const {
Service::Mii::MiiManager manager;
Ver3StoreData mii_v3{};
@@ -534,16 +534,13 @@ Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
mii_v3.region_information.character_set.Assign(mii.font_region);
mii_v3.appearance_bits1.face_shape.Assign(mii.faceline_type);
- mii_v3.appearance_bits1.skin_color.Assign(mii.faceline_color);
mii_v3.appearance_bits2.wrinkles.Assign(mii.faceline_wrinkle);
mii_v3.appearance_bits2.makeup.Assign(mii.faceline_make);
mii_v3.hair_style = mii.hair_type;
- mii_v3.appearance_bits3.hair_color.Assign(mii.hair_color);
mii_v3.appearance_bits3.flip_hair.Assign(mii.hair_flip);
mii_v3.appearance_bits4.eye_type.Assign(mii.eye_type);
- mii_v3.appearance_bits4.eye_color.Assign(mii.eye_color);
mii_v3.appearance_bits4.eye_scale.Assign(mii.eye_scale);
mii_v3.appearance_bits4.eye_vertical_stretch.Assign(mii.eye_aspect);
mii_v3.appearance_bits4.eye_rotation.Assign(mii.eye_rotate);
@@ -551,7 +548,6 @@ Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
mii_v3.appearance_bits4.eye_y_position.Assign(mii.eye_y);
mii_v3.appearance_bits5.eyebrow_style.Assign(mii.eyebrow_type);
- mii_v3.appearance_bits5.eyebrow_color.Assign(mii.eyebrow_color);
mii_v3.appearance_bits5.eyebrow_scale.Assign(mii.eyebrow_scale);
mii_v3.appearance_bits5.eyebrow_yscale.Assign(mii.eyebrow_aspect);
mii_v3.appearance_bits5.eyebrow_rotation.Assign(mii.eyebrow_rotate);
@@ -563,7 +559,6 @@ Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
mii_v3.appearance_bits6.nose_y_position.Assign(mii.nose_y);
mii_v3.appearance_bits7.mouth_type.Assign(mii.mouth_type);
- mii_v3.appearance_bits7.mouth_color.Assign(mii.mouth_color);
mii_v3.appearance_bits7.mouth_scale.Assign(mii.mouth_scale);
mii_v3.appearance_bits7.mouth_horizontal_stretch.Assign(mii.mouth_aspect);
mii_v3.appearance_bits8.mouth_y_position.Assign(mii.mouth_y);
@@ -573,10 +568,7 @@ Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
mii_v3.appearance_bits9.mustache_y_position.Assign(mii.mustache_y);
mii_v3.appearance_bits9.bear_type.Assign(mii.beard_type);
- mii_v3.appearance_bits9.facial_hair_color.Assign(mii.beard_color);
- mii_v3.appearance_bits10.glasses_type.Assign(mii.glasses_type);
- mii_v3.appearance_bits10.glasses_color.Assign(mii.glasses_color);
mii_v3.appearance_bits10.glasses_scale.Assign(mii.glasses_scale);
mii_v3.appearance_bits10.glasses_y_position.Assign(mii.glasses_y);
@@ -585,11 +577,36 @@ Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
mii_v3.appearance_bits11.mole_x_position.Assign(mii.mole_x);
mii_v3.appearance_bits11.mole_y_position.Assign(mii.mole_y);
+ // These types are converted to V3 from a table
+ mii_v3.appearance_bits1.skin_color.Assign(Ver3FacelineColorTable[mii.faceline_color]);
+ mii_v3.appearance_bits3.hair_color.Assign(Ver3HairColorTable[mii.hair_color]);
+ mii_v3.appearance_bits4.eye_color.Assign(Ver3EyeColorTable[mii.eye_color]);
+ mii_v3.appearance_bits5.eyebrow_color.Assign(Ver3HairColorTable[mii.eyebrow_color]);
+ mii_v3.appearance_bits7.mouth_color.Assign(Ver3MouthlineColorTable[mii.mouth_color]);
+ mii_v3.appearance_bits9.facial_hair_color.Assign(Ver3HairColorTable[mii.beard_color]);
+ mii_v3.appearance_bits10.glasses_color.Assign(Ver3GlassColorTable[mii.glasses_color]);
+ mii_v3.appearance_bits10.glasses_type.Assign(Ver3GlassTypeTable[mii.glasses_type]);
+
+ mii_v3.crc = GenerateCrc16(&mii_v3, sizeof(Ver3StoreData) - sizeof(u16));
+
// TODO: Validate mii_v3 data
return mii_v3;
}
+NfpStoreDataExtension MiiManager::SetFromStoreData(const CharInfo& mii) const {
+ return {
+ .faceline_color = static_cast<u8>(mii.faceline_color & 0xf),
+ .hair_color = static_cast<u8>(mii.hair_color & 0x7f),
+ .eye_color = static_cast<u8>(mii.eyebrow_color & 0x7f),
+ .eyebrow_color = static_cast<u8>(mii.eyebrow_color & 0x7f),
+ .mouth_color = static_cast<u8>(mii.mouth_color & 0x7f),
+ .beard_color = static_cast<u8>(mii.beard_color & 0x7f),
+ .glass_color = static_cast<u8>(mii.glasses_color & 0x7f),
+ .glass_type = static_cast<u8>(mii.glasses_type & 0x1f),
+ };
+}
+
bool MiiManager::ValidateV3Info(const Ver3StoreData& mii_v3) const {
bool is_valid = mii_v3.version == 0 || mii_v3.version == 3;
diff --git a/src/core/hle/service/mii/mii_manager.h b/src/core/hle/service/mii/mii_manager.h
index 83ad3d343..5525fcd1c 100644
--- a/src/core/hle/service/mii/mii_manager.h
+++ b/src/core/hle/service/mii/mii_manager.h
@@ -23,11 +23,16 @@ public:
CharInfo BuildRandom(Age age, Gender gender, Race race);
CharInfo BuildDefault(std::size_t index);
CharInfo ConvertV3ToCharInfo(const Ver3StoreData& mii_v3) const;
- Ver3StoreData ConvertCharInfoToV3(const CharInfo& mii) const;
bool ValidateV3Info(const Ver3StoreData& mii_v3) const;
ResultVal<std::vector<MiiInfoElement>> GetDefault(SourceFlag source_flag);
Result GetIndex(const CharInfo& info, u32& index);
+ // This is nn::mii::detail::Ver::StoreDataRaw::BuildFromStoreData
+ Ver3StoreData BuildFromStoreData(const CharInfo& mii) const;
+
+ // This is nn::mii::detail::NfpStoreDataExtentionRaw::SetFromStoreData
+ NfpStoreDataExtension SetFromStoreData(const CharInfo& mii) const;
+
private:
const Common::UUID user_id{};
u64 update_counter{};
diff --git a/src/core/hle/service/mii/types.h b/src/core/hle/service/mii/types.h
index 9e3247397..c48d08d79 100644
--- a/src/core/hle/service/mii/types.h
+++ b/src/core/hle/service/mii/types.h
@@ -365,10 +365,68 @@ struct Ver3StoreData {
} appearance_bits11;
std::array<u16_le, 0xA> author_name;
- INSERT_PADDING_BYTES(0x4);
+ INSERT_PADDING_BYTES(0x2);
+ u16_be crc;
};
static_assert(sizeof(Ver3StoreData) == 0x60, "Ver3StoreData is an invalid size");
+struct NfpStoreDataExtension {
+ u8 faceline_color;
+ u8 hair_color;
+ u8 eye_color;
+ u8 eyebrow_color;
+ u8 mouth_color;
+ u8 beard_color;
+ u8 glass_color;
+ u8 glass_type;
+};
+static_assert(sizeof(NfpStoreDataExtension) == 0x8, "NfpStoreDataExtension is an invalid size");
+
+constexpr std::array<u8, 0x10> Ver3FacelineColorTable{
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x0, 0x1, 0x5, 0x5,
+};
+
+constexpr std::array<u8, 100> Ver3HairColorTable{
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x0, 0x4, 0x3, 0x5, 0x4, 0x4, 0x6, 0x2, 0x0,
+ 0x6, 0x4, 0x3, 0x2, 0x2, 0x7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x0, 0x0, 0x4,
+ 0x4, 0x4, 0x4, 0x4, 0x4, 0x0, 0x0, 0x0, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x5, 0x5, 0x5,
+ 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x5, 0x7, 0x5, 0x7, 0x7, 0x7, 0x7, 0x7, 0x6, 0x7,
+ 0x7, 0x7, 0x7, 0x7, 0x3, 0x7, 0x7, 0x7, 0x7, 0x7, 0x0, 0x4, 0x4, 0x4, 0x4,
+};
+
+constexpr std::array<u8, 100> Ver3EyeColorTable{
+ 0x0, 0x2, 0x2, 0x2, 0x1, 0x3, 0x2, 0x3, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x2, 0x2, 0x4,
+ 0x2, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2, 0x0, 0x0, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x1, 0x0, 0x4, 0x4,
+ 0x4, 0x4, 0x4, 0x4, 0x4, 0x0, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5,
+ 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x2, 0x2,
+ 0x3, 0x3, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+};
+
+constexpr std::array<u8, 100> Ver3MouthlineColorTable{
+ 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x3, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x1, 0x4,
+ 0x4, 0x4, 0x0, 0x1, 0x2, 0x3, 0x4, 0x4, 0x2, 0x3, 0x3, 0x4, 0x4, 0x4, 0x4, 0x1, 0x4,
+ 0x4, 0x2, 0x3, 0x3, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x3, 0x3, 0x3, 0x4, 0x4, 0x4,
+ 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x4, 0x4, 0x4, 0x4, 0x3, 0x3, 0x3, 0x3, 0x4, 0x4, 0x4,
+ 0x4, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x4, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3,
+ 0x3, 0x3, 0x3, 0x3, 0x4, 0x0, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, 0x3, 0x3,
+};
+
+constexpr std::array<u8, 100> Ver3GlassColorTable{
+ 0x0, 0x1, 0x1, 0x1, 0x5, 0x1, 0x1, 0x4, 0x0, 0x5, 0x1, 0x1, 0x3, 0x5, 0x1, 0x2, 0x3,
+ 0x4, 0x5, 0x4, 0x2, 0x2, 0x4, 0x4, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3,
+ 0x3, 0x3, 0x3, 0x3, 0x3, 0x0, 0x0, 0x0, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x0, 0x5, 0x5,
+ 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5, 0x1, 0x4,
+ 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x5, 0x5, 0x5, 0x5, 0x5, 0x5,
+};
+
+constexpr std::array<u8, 20> Ver3GlassTypeTable{
+ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x1,
+ 0x2, 0x1, 0x3, 0x7, 0x7, 0x6, 0x7, 0x8, 0x7, 0x7,
+};
+
struct MiiStoreData {
using Name = std::array<char16_t, 10>;
diff --git a/src/core/hle/service/nfc/nfc_device.cpp b/src/core/hle/service/nfc/nfc_device.cpp
index 3f17d0c7a..c7db74d14 100644
--- a/src/core/hle/service/nfc/nfc_device.cpp
+++ b/src/core/hle/service/nfc/nfc_device.cpp
@@ -42,8 +42,18 @@ NfcDevice::~NfcDevice() {
};
void NfcDevice::NpadUpdate(Core::HID::ControllerTriggerType type) {
- if (type == Core::HID::ControllerTriggerType::Connected ||
- type == Core::HID::ControllerTriggerType::Disconnected) {
+ if (!is_initalized) {
+ return;
+ }
+
+ if (type == Core::HID::ControllerTriggerType::Connected) {
+ Initialize();
+ availability_change_event->Signal();
+ return;
+ }
+
+ if (type == Core::HID::ControllerTriggerType::Disconnected) {
+ device_state = NFP::DeviceState::Unavailable;
availability_change_event->Signal();
return;
}
@@ -113,6 +123,7 @@ void NfcDevice::Initialize() {
device_state =
npad_device->HasNfc() ? NFP::DeviceState::Initialized : NFP::DeviceState::Unavailable;
encrypted_tag_data = {};
+ is_initalized = true;
}
void NfcDevice::Finalize() {
@@ -121,6 +132,7 @@ void NfcDevice::Finalize() {
StopDetection();
}
device_state = NFP::DeviceState::Unavailable;
+ is_initalized = false;
}
Result NfcDevice::StartDetection(NFP::TagProtocol allowed_protocol) {
diff --git a/src/core/hle/service/nfc/nfc_device.h b/src/core/hle/service/nfc/nfc_device.h
index a6e114d36..ea63f0537 100644
--- a/src/core/hle/service/nfc/nfc_device.h
+++ b/src/core/hle/service/nfc/nfc_device.h
@@ -67,6 +67,7 @@ private:
Kernel::KEvent* deactivate_event = nullptr;
Kernel::KEvent* availability_change_event = nullptr;
+ bool is_initalized{};
NFP::TagProtocol allowed_protocols{};
NFP::DeviceState device_state{NFP::DeviceState::Unavailable};
diff --git a/src/core/hle/service/nfp/amiibo_crypto.cpp b/src/core/hle/service/nfp/amiibo_crypto.cpp
index ad73edbda..a3622e792 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.cpp
+++ b/src/core/hle/service/nfp/amiibo_crypto.cpp
@@ -70,6 +70,10 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
return true;
}
+bool IsAmiiboValid(const NTAG215File& ntag_file) {
+ return IsAmiiboValid(EncodedDataToNfcData(ntag_file));
+}
+
NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) {
NTAG215File encoded_data{};
@@ -88,8 +92,9 @@ NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) {
encoded_data.application_area_id = nfc_data.user_memory.application_area_id;
encoded_data.application_id_byte = nfc_data.user_memory.application_id_byte;
encoded_data.unknown = nfc_data.user_memory.unknown;
+ encoded_data.mii_extension = nfc_data.user_memory.mii_extension;
encoded_data.unknown2 = nfc_data.user_memory.unknown2;
- encoded_data.application_area_crc = nfc_data.user_memory.application_area_crc;
+ encoded_data.register_info_crc = nfc_data.user_memory.register_info_crc;
encoded_data.application_area = nfc_data.user_memory.application_area;
encoded_data.hmac_tag = nfc_data.user_memory.hmac_tag;
encoded_data.lock_bytes = nfc_data.uuid.lock_bytes;
@@ -122,8 +127,9 @@ EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) {
nfc_data.user_memory.application_area_id = encoded_data.application_area_id;
nfc_data.user_memory.application_id_byte = encoded_data.application_id_byte;
nfc_data.user_memory.unknown = encoded_data.unknown;
+ nfc_data.user_memory.mii_extension = encoded_data.mii_extension;
nfc_data.user_memory.unknown2 = encoded_data.unknown2;
- nfc_data.user_memory.application_area_crc = encoded_data.application_area_crc;
+ nfc_data.user_memory.register_info_crc = encoded_data.register_info_crc;
nfc_data.user_memory.application_area = encoded_data.application_area;
nfc_data.user_memory.hmac_tag = encoded_data.hmac_tag;
nfc_data.user_memory.model_info = encoded_data.model_info;
diff --git a/src/core/hle/service/nfp/amiibo_crypto.h b/src/core/hle/service/nfp/amiibo_crypto.h
index c9fd67a39..f6208ee6b 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.h
+++ b/src/core/hle/service/nfp/amiibo_crypto.h
@@ -60,6 +60,9 @@ static_assert(sizeof(DerivedKeys) == 0x30, "DerivedKeys is an invalid size");
/// Validates that the amiibo file is not corrupted
bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file);
+/// Validates that the amiibo file is not corrupted
+bool IsAmiiboValid(const NTAG215File& ntag_file);
+
/// Converts from encrypted file format to encoded file format
NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data);
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp
index ddff90d6a..607e70968 100644
--- a/src/core/hle/service/nfp/nfp_device.cpp
+++ b/src/core/hle/service/nfp/nfp_device.cpp
@@ -3,6 +3,17 @@
#include <array>
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4701) // Potentially uninitialized local variable 'result' used
+#endif
+
+#include <boost/crc.hpp>
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
#include "common/input.h"
#include "common/logging/log.h"
#include "common/string_util.h"
@@ -55,8 +66,18 @@ NfpDevice::~NfpDevice() {
};
void NfpDevice::NpadUpdate(Core::HID::ControllerTriggerType type) {
- if (type == Core::HID::ControllerTriggerType::Connected ||
- type == Core::HID::ControllerTriggerType::Disconnected) {
+ if (!is_initalized) {
+ return;
+ }
+
+ if (type == Core::HID::ControllerTriggerType::Connected) {
+ Initialize();
+ availability_change_event->Signal();
+ return;
+ }
+
+ if (type == Core::HID::ControllerTriggerType::Disconnected) {
+ device_state = DeviceState::Unavailable;
availability_change_event->Signal();
return;
}
@@ -100,7 +121,16 @@ bool NfpDevice::LoadAmiibo(std::span<const u8> data) {
// TODO: Filter by allowed_protocols here
- memcpy(&encrypted_tag_data, data.data(), sizeof(EncryptedNTAG215File));
+ memcpy(&tag_data, data.data(), sizeof(EncryptedNTAG215File));
+ is_plain_amiibo = AmiiboCrypto::IsAmiiboValid(tag_data);
+
+ if (is_plain_amiibo) {
+ encrypted_tag_data = AmiiboCrypto::EncodedDataToNfcData(tag_data);
+ LOG_INFO(Service_NFP, "Using plain amiibo");
+ } else {
+ tag_data = {};
+ memcpy(&encrypted_tag_data, data.data(), sizeof(EncryptedNTAG215File));
+ }
device_state = DeviceState::TagFound;
deactivate_event->GetReadableEvent().Clear();
@@ -134,6 +164,7 @@ void NfpDevice::Initialize() {
device_state = npad_device->HasNfc() ? DeviceState::Initialized : DeviceState::Unavailable;
encrypted_tag_data = {};
tag_data = {};
+ is_initalized = true;
}
void NfpDevice::Finalize() {
@@ -144,6 +175,7 @@ void NfpDevice::Finalize() {
StopDetection();
}
device_state = DeviceState::Unavailable;
+ is_initalized = false;
}
Result NfpDevice::StartDetection(TagProtocol allowed_protocol) {
@@ -209,13 +241,17 @@ Result NfpDevice::Flush() {
tag_data.write_counter++;
- if (!AmiiboCrypto::EncodeAmiibo(tag_data, encrypted_tag_data)) {
- LOG_ERROR(Service_NFP, "Failed to encode data");
- return WriteAmiiboFailed;
- }
+ std::vector<u8> data(sizeof(EncryptedNTAG215File));
+ if (is_plain_amiibo) {
+ memcpy(data.data(), &tag_data, sizeof(tag_data));
+ } else {
+ if (!AmiiboCrypto::EncodeAmiibo(tag_data, encrypted_tag_data)) {
+ LOG_ERROR(Service_NFP, "Failed to encode data");
+ return WriteAmiiboFailed;
+ }
- std::vector<u8> data(sizeof(encrypted_tag_data));
- memcpy(data.data(), &encrypted_tag_data, sizeof(encrypted_tag_data));
+ memcpy(data.data(), &encrypted_tag_data, sizeof(encrypted_tag_data));
+ }
if (!npad_device->WriteNfc(data)) {
LOG_ERROR(Service_NFP, "Error writing to file");
@@ -233,6 +269,13 @@ Result NfpDevice::Mount(MountTarget mount_target_) {
return WrongDeviceState;
}
+ // The loaded amiibo is not encrypted
+ if (is_plain_amiibo) {
+ device_state = DeviceState::TagMounted;
+ mount_target = mount_target_;
+ return ResultSuccess;
+ }
+
if (!AmiiboCrypto::IsAmiiboValid(encrypted_tag_data)) {
LOG_ERROR(Service_NFP, "Not an amiibo");
return NotAnAmiibo;
@@ -448,7 +491,7 @@ Result NfpDevice::DeleteRegisterInfo() {
rng.GenerateRandomBytes(&tag_data.unknown, sizeof(u8));
rng.GenerateRandomBytes(&tag_data.unknown2[0], sizeof(u32));
rng.GenerateRandomBytes(&tag_data.unknown2[1], sizeof(u32));
- rng.GenerateRandomBytes(&tag_data.application_area_crc, sizeof(u32));
+ rng.GenerateRandomBytes(&tag_data.register_info_crc, sizeof(u32));
rng.GenerateRandomBytes(&tag_data.settings.init_date, sizeof(u32));
tag_data.settings.settings.font_region.Assign(0);
tag_data.settings.settings.amiibo_initialized.Assign(0);
@@ -471,6 +514,7 @@ Result NfpDevice::SetRegisterInfoPrivate(const AmiiboName& amiibo_name) {
}
Service::Mii::MiiManager manager;
+ const auto mii = manager.BuildDefault(0);
auto& settings = tag_data.settings;
if (tag_data.settings.settings.amiibo_initialized == 0) {
@@ -479,16 +523,15 @@ Result NfpDevice::SetRegisterInfoPrivate(const AmiiboName& amiibo_name) {
}
SetAmiiboName(settings, amiibo_name);
- tag_data.owner_mii = manager.ConvertCharInfoToV3(manager.BuildDefault(0));
+ tag_data.owner_mii = manager.BuildFromStoreData(mii);
+ tag_data.mii_extension = manager.SetFromStoreData(mii);
tag_data.unknown = 0;
- tag_data.unknown2[6] = 0;
+ tag_data.unknown2 = {};
settings.country_code_id = 0;
settings.settings.font_region.Assign(0);
settings.settings.amiibo_initialized.Assign(1);
- // TODO: this is a mix of tag.file input
- std::array<u8, 0x7e> unknown_input{};
- tag_data.application_area_crc = CalculateCrc(unknown_input);
+ UpdateRegisterInfoCrc();
return Flush();
}
@@ -685,6 +728,11 @@ Result NfpDevice::RecreateApplicationArea(u32 access_id, std::span<const u8> dat
return WrongDeviceState;
}
+ if (is_app_area_open) {
+ LOG_ERROR(Service_NFP, "Application area is open");
+ return WrongDeviceState;
+ }
+
if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
return WrongDeviceState;
@@ -715,10 +763,9 @@ Result NfpDevice::RecreateApplicationArea(u32 access_id, std::span<const u8> dat
tag_data.settings.settings.appdata_initialized.Assign(1);
tag_data.application_area_id = access_id;
tag_data.unknown = {};
+ tag_data.unknown2 = {};
- // TODO: this is a mix of tag_data input
- std::array<u8, 0x7e> unknown_input{};
- tag_data.application_area_crc = CalculateCrc(unknown_input);
+ UpdateRegisterInfoCrc();
return Flush();
}
@@ -752,6 +799,10 @@ Result NfpDevice::DeleteApplicationArea() {
rng.GenerateRandomBytes(&tag_data.application_id_byte, sizeof(u8));
tag_data.settings.settings.appdata_initialized.Assign(0);
tag_data.unknown = {};
+ tag_data.unknown2 = {};
+ is_app_area_open = false;
+
+ UpdateRegisterInfoCrc();
return Flush();
}
@@ -835,32 +886,34 @@ void NfpDevice::UpdateSettingsCrc() {
// TODO: this reads data from a global, find what it is
std::array<u8, 8> unknown_input{};
- settings.crc = CalculateCrc(unknown_input);
-}
-
-u32 NfpDevice::CalculateCrc(std::span<const u8> data) {
- constexpr u32 magic = 0xedb88320;
- u32 crc = 0xffffffff;
-
- if (data.size() == 0) {
- return 0;
- }
-
- for (u8 input : data) {
- u32 temp = (crc ^ input) >> 1;
- if (((crc ^ input) & 1) != 0) {
- temp = temp ^ magic;
- }
-
- for (std::size_t step = 0; step < 7; ++step) {
- crc = temp >> 1;
- if ((temp & 1) != 0) {
- crc = temp >> 1 ^ magic;
- }
- }
- }
+ boost::crc_32_type crc;
+ crc.process_bytes(&unknown_input, sizeof(unknown_input));
+ settings.crc = crc.checksum();
+}
+
+void NfpDevice::UpdateRegisterInfoCrc() {
+#pragma pack(push, 1)
+ struct CrcData {
+ Mii::Ver3StoreData mii;
+ u8 application_id_byte;
+ u8 unknown;
+ Mii::NfpStoreDataExtension mii_extension;
+ std::array<u32, 0x5> unknown2;
+ };
+ static_assert(sizeof(CrcData) == 0x7e, "CrcData is an invalid size");
+#pragma pack(pop)
+
+ const CrcData crc_data{
+ .mii = tag_data.owner_mii,
+ .application_id_byte = tag_data.application_id_byte,
+ .unknown = tag_data.unknown,
+ .mii_extension = tag_data.mii_extension,
+ .unknown2 = tag_data.unknown2,
+ };
- return ~crc;
+ boost::crc_32_type crc;
+ crc.process_bytes(&crc_data, sizeof(CrcData));
+ tag_data.register_info_crc = crc.checksum();
}
} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp_device.h b/src/core/hle/service/nfp/nfp_device.h
index 06386401d..7f963730d 100644
--- a/src/core/hle/service/nfp/nfp_device.h
+++ b/src/core/hle/service/nfp/nfp_device.h
@@ -80,7 +80,7 @@ private:
AmiiboDate GetAmiiboDate(s64 posix_time) const;
u64 RemoveVersionByte(u64 application_id) const;
void UpdateSettingsCrc();
- u32 CalculateCrc(std::span<const u8>);
+ void UpdateRegisterInfoCrc();
bool is_controller_set{};
int callback_key;
@@ -92,8 +92,10 @@ private:
Kernel::KEvent* deactivate_event = nullptr;
Kernel::KEvent* availability_change_event = nullptr;
+ bool is_initalized{};
bool is_data_moddified{};
bool is_app_area_open{};
+ bool is_plain_amiibo{};
TagProtocol allowed_protocols{};
s64 current_posix_time{};
MountTarget mount_target{MountTarget::None};
diff --git a/src/core/hle/service/nfp/nfp_types.h b/src/core/hle/service/nfp/nfp_types.h
index 142343d6e..70c878552 100644
--- a/src/core/hle/service/nfp/nfp_types.h
+++ b/src/core/hle/service/nfp/nfp_types.h
@@ -259,8 +259,9 @@ struct EncryptedAmiiboFile {
u32_be application_area_id; // Encrypted Game id
u8 application_id_byte;
u8 unknown;
- std::array<u32, 0x7> unknown2;
- u32_be application_area_crc;
+ Service::Mii::NfpStoreDataExtension mii_extension;
+ std::array<u32, 0x5> unknown2;
+ u32_be register_info_crc;
ApplicationArea application_area; // Encrypted Game data
};
static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size");
@@ -280,8 +281,9 @@ struct NTAG215File {
u32_be application_area_id;
u8 application_id_byte;
u8 unknown;
- std::array<u32, 0x7> unknown2;
- u32_be application_area_crc;
+ Service::Mii::NfpStoreDataExtension mii_extension;
+ std::array<u32, 0x5> unknown2;
+ u32_be register_info_crc;
ApplicationArea application_area; // Encrypted Game data
HashData hmac_tag; // Hash
UniqueSerialNumber uid; // Unique serial number
@@ -307,7 +309,8 @@ struct EncryptedNTAG215File {
u32 CFG1; // Defines number of verification attempts
NTAG215Password password; // Password data
};
-static_assert(sizeof(EncryptedNTAG215File) == 0x21C, "EncryptedNTAG215File is an invalid size");
+static_assert(sizeof(EncryptedNTAG215File) == sizeof(NTAG215File),
+ "EncryptedNTAG215File is an invalid size");
static_assert(std::is_trivially_copyable_v<EncryptedNTAG215File>,
"EncryptedNTAG215File must be trivially copyable.");
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index d2308fffc..453a965dc 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -304,8 +304,8 @@ NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::vector<u8>
Tegra::CommandList entries(params.num_entries);
if (kickoff) {
- system.Memory().ReadBlock(params.address, entries.command_lists.data(),
- params.num_entries * sizeof(Tegra::CommandListHeader));
+ system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(),
+ params.num_entries * sizeof(Tegra::CommandListHeader));
} else {
std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)],
params.num_entries * sizeof(Tegra::CommandListHeader));
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 7bcef105b..1ab51f10b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -105,8 +105,8 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input,
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
- system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
- cmdlist.size() * sizeof(u32));
+ system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
+ cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
diff --git a/src/core/internal_network/socket_proxy.h b/src/core/internal_network/socket_proxy.h
index 9421492bc..6e991fa38 100644
--- a/src/core/internal_network/socket_proxy.h
+++ b/src/core/internal_network/socket_proxy.h
@@ -16,9 +16,6 @@ namespace Network {
class ProxySocket : public SocketBase {
public:
- YUZU_NON_COPYABLE(ProxySocket);
- YUZU_NON_MOVEABLE(ProxySocket);
-
explicit ProxySocket(RoomNetwork& room_network_) noexcept;
~ProxySocket() override;
diff --git a/src/core/internal_network/sockets.h b/src/core/internal_network/sockets.h
index 4c7489258..11e479e50 100644
--- a/src/core/internal_network/sockets.h
+++ b/src/core/internal_network/sockets.h
@@ -36,13 +36,10 @@ public:
SocketBase() = default;
explicit SocketBase(SOCKET fd_) : fd{fd_} {}
-
virtual ~SocketBase() = default;
- virtual SocketBase& operator=(const SocketBase&) = delete;
-
- // Avoid closing sockets implicitly
- virtual SocketBase& operator=(SocketBase&&) noexcept = delete;
+ YUZU_NON_COPYABLE(SocketBase);
+ YUZU_NON_MOVEABLE(SocketBase);
virtual Errno Initialize(Domain domain, Type type, Protocol protocol) = 0;
@@ -109,14 +106,8 @@ public:
~Socket() override;
- Socket(const Socket&) = delete;
- Socket& operator=(const Socket&) = delete;
-
Socket(Socket&& rhs) noexcept;
- // Avoid closing sockets implicitly
- Socket& operator=(Socket&&) noexcept = delete;
-
Errno Initialize(Domain domain, Type type, Protocol protocol) override;
Errno Close() override;
diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp
index 192571d35..3be9b71cf 100644
--- a/src/core/loader/deconstructed_rom_directory.cpp
+++ b/src/core/loader/deconstructed_rom_directory.cpp
@@ -153,7 +153,7 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
// Load NSO modules
modules.clear();
- const VAddr base_address{process.PageTable().GetCodeRegionStart()};
+ const VAddr base_address{GetInteger(process.PageTable().GetCodeRegionStart())};
VAddr next_load_addr{base_address};
const FileSys::PatchManager pm{metadata.GetTitleID(), system.GetFileSystemController(),
system.GetContentProvider()};
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index d8a1bf82a..709e2564f 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -96,7 +96,7 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process,
}
codeset.memory = std::move(program_image);
- const VAddr base_address = process.PageTable().GetCodeRegionStart();
+ const VAddr base_address = GetInteger(process.PageTable().GetCodeRegionStart());
process.LoadModule(std::move(codeset), base_address);
LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", kip->GetName(), base_address);
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index a5c384fb5..79639f5e4 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -167,7 +167,7 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::KProcess& process, Core::S
modules.clear();
// Load module
- const VAddr base_address = process.PageTable().GetCodeRegionStart();
+ const VAddr base_address = GetInteger(process.PageTable().GetCodeRegionStart());
if (!LoadModule(process, system, *file, base_address, true, true)) {
return {ResultStatus::ErrorLoadingNSO, {}};
}
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 4397fcfb1..432310632 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -35,31 +35,35 @@ struct Memory::Impl {
system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
}
- void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
+ void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ Common::PhysicalAddress target) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
- ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
+ ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
+ ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
+ GetInteger(target));
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
Common::PageType::Memory);
if (Settings::IsFastmemEnabled()) {
- system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
+ system.DeviceMemory().buffer.Map(GetInteger(base),
+ GetInteger(target) - DramMemoryMap::Base, size);
}
}
- void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
+ void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
+ ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
Common::PageType::Unmapped);
if (Settings::IsFastmemEnabled()) {
- system.DeviceMemory().buffer.Unmap(base, size);
+ system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
}
}
- [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
- const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
+ [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
+ const Common::PhysicalAddress paddr{
+ current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
if (!paddr) {
return {};
@@ -68,8 +72,9 @@ struct Memory::Impl {
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
}
- [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
- const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
+ [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
+ const Common::PhysicalAddress paddr{
+ current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
if (paddr == 0) {
return {};
@@ -78,11 +83,11 @@ struct Memory::Impl {
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
}
- u8 Read8(const VAddr addr) {
+ u8 Read8(const Common::ProcessAddress addr) {
return Read<u8>(addr);
}
- u16 Read16(const VAddr addr) {
+ u16 Read16(const Common::ProcessAddress addr) {
if ((addr & 1) == 0) {
return Read<u16_le>(addr);
} else {
@@ -92,7 +97,7 @@ struct Memory::Impl {
}
}
- u32 Read32(const VAddr addr) {
+ u32 Read32(const Common::ProcessAddress addr) {
if ((addr & 3) == 0) {
return Read<u32_le>(addr);
} else {
@@ -102,7 +107,7 @@ struct Memory::Impl {
}
}
- u64 Read64(const VAddr addr) {
+ u64 Read64(const Common::ProcessAddress addr) {
if ((addr & 7) == 0) {
return Read<u64_le>(addr);
} else {
@@ -112,11 +117,11 @@ struct Memory::Impl {
}
}
- void Write8(const VAddr addr, const u8 data) {
+ void Write8(const Common::ProcessAddress addr, const u8 data) {
Write<u8>(addr, data);
}
- void Write16(const VAddr addr, const u16 data) {
+ void Write16(const Common::ProcessAddress addr, const u16 data) {
if ((addr & 1) == 0) {
Write<u16_le>(addr, data);
} else {
@@ -125,7 +130,7 @@ struct Memory::Impl {
}
}
- void Write32(const VAddr addr, const u32 data) {
+ void Write32(const Common::ProcessAddress addr, const u32 data) {
if ((addr & 3) == 0) {
Write<u32_le>(addr, data);
} else {
@@ -134,7 +139,7 @@ struct Memory::Impl {
}
}
- void Write64(const VAddr addr, const u64 data) {
+ void Write64(const Common::ProcessAddress addr, const u64 data) {
if ((addr & 7) == 0) {
Write<u64_le>(addr, data);
} else {
@@ -143,23 +148,23 @@ struct Memory::Impl {
}
}
- bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) {
+ bool WriteExclusive8(const Common::ProcessAddress addr, const u8 data, const u8 expected) {
return WriteExclusive<u8>(addr, data, expected);
}
- bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) {
+ bool WriteExclusive16(const Common::ProcessAddress addr, const u16 data, const u16 expected) {
return WriteExclusive<u16_le>(addr, data, expected);
}
- bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) {
+ bool WriteExclusive32(const Common::ProcessAddress addr, const u32 data, const u32 expected) {
return WriteExclusive<u32_le>(addr, data, expected);
}
- bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) {
+ bool WriteExclusive64(const Common::ProcessAddress addr, const u64 data, const u64 expected) {
return WriteExclusive<u64_le>(addr, data, expected);
}
- std::string ReadCString(VAddr vaddr, std::size_t max_length) {
+ std::string ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
std::string string;
string.reserve(max_length);
for (std::size_t i = 0; i < max_length; ++i) {
@@ -174,8 +179,9 @@ struct Memory::Impl {
return string;
}
- void WalkBlock(const Kernel::KProcess& process, const VAddr addr, const std::size_t size,
- auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
+ void WalkBlock(const Kernel::KProcess& process, const Common::ProcessAddress addr,
+ const std::size_t size, auto on_unmapped, auto on_memory, auto on_rasterizer,
+ auto increment) {
const auto& page_table = process.PageTable().PageTableImpl();
std::size_t remaining_size = size;
std::size_t page_index = addr >> YUZU_PAGEBITS;
@@ -185,7 +191,7 @@ struct Memory::Impl {
const std::size_t copy_amount =
std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
const auto current_vaddr =
- static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
+ static_cast<u64>((page_index << YUZU_PAGEBITS) + page_offset);
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
switch (type) {
@@ -220,24 +226,24 @@ struct Memory::Impl {
}
template <bool UNSAFE>
- void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
- const std::size_t size) {
+ void ReadBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress src_addr,
+ void* dest_buffer, const std::size_t size) {
WalkBlock(
process, src_addr, size,
[src_addr, size, &dest_buffer](const std::size_t copy_amount,
- const VAddr current_vaddr) {
+ const Common::ProcessAddress current_vaddr) {
LOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
- current_vaddr, src_addr, size);
+ GetInteger(current_vaddr), GetInteger(src_addr), size);
std::memset(dest_buffer, 0, copy_amount);
},
[&](const std::size_t copy_amount, const u8* const src_ptr) {
std::memcpy(dest_buffer, src_ptr, copy_amount);
},
- [&](const VAddr current_vaddr, const std::size_t copy_amount,
+ [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
const u8* const host_ptr) {
if constexpr (!UNSAFE) {
- system.GPU().FlushRegion(current_vaddr, copy_amount);
+ system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
}
std::memcpy(dest_buffer, host_ptr, copy_amount);
},
@@ -246,30 +252,34 @@ struct Memory::Impl {
});
}
- void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
+ void ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
+ const std::size_t size) {
ReadBlockImpl<false>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
}
- void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
+ void ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
+ const std::size_t size) {
ReadBlockImpl<true>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
}
template <bool UNSAFE>
- void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr,
+ void WriteBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
const void* src_buffer, const std::size_t size) {
WalkBlock(
process, dest_addr, size,
- [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) {
+ [dest_addr, size](const std::size_t copy_amount,
+ const Common::ProcessAddress current_vaddr) {
LOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
- current_vaddr, dest_addr, size);
+ GetInteger(current_vaddr), GetInteger(dest_addr), size);
},
[&](const std::size_t copy_amount, u8* const dest_ptr) {
std::memcpy(dest_ptr, src_buffer, copy_amount);
},
- [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
+ [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
+ u8* const host_ptr) {
if constexpr (!UNSAFE) {
- system.GPU().InvalidateRegion(current_vaddr, copy_amount);
+ system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
}
std::memcpy(host_ptr, src_buffer, copy_amount);
},
@@ -278,71 +288,77 @@ struct Memory::Impl {
});
}
- void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
+ void WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
+ const std::size_t size) {
WriteBlockImpl<false>(*system.ApplicationProcess(), dest_addr, src_buffer, size);
}
- void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
+ void WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
+ const std::size_t size) {
WriteBlockImpl<true>(*system.ApplicationProcess(), dest_addr, src_buffer, size);
}
- void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) {
+ void ZeroBlock(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
+ const std::size_t size) {
WalkBlock(
process, dest_addr, size,
- [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) {
+ [dest_addr, size](const std::size_t copy_amount,
+ const Common::ProcessAddress current_vaddr) {
LOG_ERROR(HW_Memory,
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
- current_vaddr, dest_addr, size);
+ GetInteger(current_vaddr), GetInteger(dest_addr), size);
},
[](const std::size_t copy_amount, u8* const dest_ptr) {
std::memset(dest_ptr, 0, copy_amount);
},
- [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
- system.GPU().InvalidateRegion(current_vaddr, copy_amount);
+ [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
+ u8* const host_ptr) {
+ system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
std::memset(host_ptr, 0, copy_amount);
},
[](const std::size_t copy_amount) {});
}
- void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
- const std::size_t size) {
+ void CopyBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
+ Common::ProcessAddress src_addr, const std::size_t size) {
WalkBlock(
process, dest_addr, size,
- [&](const std::size_t copy_amount, const VAddr current_vaddr) {
+ [&](const std::size_t copy_amount, const Common::ProcessAddress current_vaddr) {
LOG_ERROR(HW_Memory,
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
- current_vaddr, src_addr, size);
+ GetInteger(current_vaddr), GetInteger(src_addr), size);
ZeroBlock(process, dest_addr, copy_amount);
},
[&](const std::size_t copy_amount, const u8* const src_ptr) {
WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount);
},
- [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) {
- system.GPU().FlushRegion(current_vaddr, copy_amount);
+ [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
+ u8* const host_ptr) {
+ system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
},
[&](const std::size_t copy_amount) {
- dest_addr += static_cast<VAddr>(copy_amount);
- src_addr += static_cast<VAddr>(copy_amount);
+ dest_addr += copy_amount;
+ src_addr += copy_amount;
});
}
template <typename Callback>
- Result PerformCacheOperation(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size,
- Callback&& cb) {
+ Result PerformCacheOperation(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
+ std::size_t size, Callback&& cb) {
class InvalidMemoryException : public std::exception {};
try {
WalkBlock(
process, dest_addr, size,
- [&](const std::size_t block_size, const VAddr current_vaddr) {
- LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}", current_vaddr);
+ [&](const std::size_t block_size, const Common::ProcessAddress current_vaddr) {
+ LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}",
+ GetInteger(current_vaddr));
throw InvalidMemoryException();
},
[&](const std::size_t block_size, u8* const host_ptr) {},
- [&](const VAddr current_vaddr, const std::size_t block_size, u8* const host_ptr) {
- cb(current_vaddr, block_size);
- },
+ [&](const Common::ProcessAddress current_vaddr, const std::size_t block_size,
+ u8* const host_ptr) { cb(current_vaddr, block_size); },
[](const std::size_t block_size) {});
} catch (InvalidMemoryException&) {
return Kernel::ResultInvalidCurrentMemory;
@@ -351,34 +367,40 @@ struct Memory::Impl {
return ResultSuccess;
}
- Result InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
- auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) {
+ Result InvalidateDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
+ std::size_t size) {
+ auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
+ const std::size_t block_size) {
// dc ivac: Invalidate to point of coherency
// GPU flush -> CPU invalidate
- system.GPU().FlushRegion(current_vaddr, block_size);
+ system.GPU().FlushRegion(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
}
- Result StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
- auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) {
+ Result StoreDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
+ std::size_t size) {
+ auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
+ const std::size_t block_size) {
// dc cvac: Store to point of coherency
// CPU flush -> GPU invalidate
- system.GPU().InvalidateRegion(current_vaddr, block_size);
+ system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
}
- Result FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) {
- auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) {
+ Result FlushDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
+ std::size_t size) {
+ auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
+ const std::size_t block_size) {
// dc civac: Store to point of coherency, and invalidate from cache
// CPU flush -> GPU invalidate
- system.GPU().InvalidateRegion(current_vaddr, block_size);
+ system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
}
- void MarkRegionDebug(VAddr vaddr, u64 size, bool debug) {
+ void MarkRegionDebug(u64 vaddr, u64 size, bool debug) {
if (vaddr == 0) {
return;
}
@@ -434,7 +456,7 @@ struct Memory::Impl {
}
}
- void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
+ void RasterizerMarkRegionCached(u64 vaddr, u64 size, bool cached) {
if (vaddr == 0) {
return;
}
@@ -514,10 +536,12 @@ struct Memory::Impl {
* @param target The target address to begin mapping from.
* @param type The page type to map the memory as.
*/
- void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
- Common::PageType type) {
- LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
- (base + size) * YUZU_PAGESIZE);
+ void MapPages(Common::PageTable& page_table, Common::ProcessAddress base_address, u64 size,
+ Common::PhysicalAddress target, Common::PageType type) {
+ auto base = GetInteger(base_address);
+
+ LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
+ base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
// During boot, current_page_table might not be set yet, in which case we need not flush
if (system.IsPoweredOn()) {
@@ -530,7 +554,7 @@ struct Memory::Impl {
}
}
- const VAddr end = base + size;
+ const Common::ProcessAddress end = base + size;
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
base + page_table.pointers.size());
@@ -548,7 +572,7 @@ struct Memory::Impl {
while (base != end) {
page_table.pointers[base].Store(
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
- page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
+ page_table.backing_addr[base] = GetInteger(target) - (base << YUZU_PAGEBITS);
ASSERT_MSG(page_table.pointers[base].Pointer(),
"memory mapping base yield a nullptr within the table");
@@ -559,9 +583,9 @@ struct Memory::Impl {
}
}
- [[nodiscard]] u8* GetPointerImpl(VAddr vaddr, auto on_unmapped, auto on_rasterizer) const {
+ [[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const {
// AARCH64 masks the upper 16 bit of all memory accesses
- vaddr &= 0xffffffffffffULL;
+ vaddr = vaddr & 0xffffffffffffULL;
if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) {
on_unmapped();
@@ -593,15 +617,18 @@ struct Memory::Impl {
return nullptr;
}
- [[nodiscard]] u8* GetPointer(const VAddr vaddr) const {
+ [[nodiscard]] u8* GetPointer(const Common::ProcessAddress vaddr) const {
return GetPointerImpl(
- vaddr, [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", vaddr); },
+ GetInteger(vaddr),
+ [vaddr]() {
+ LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", GetInteger(vaddr));
+ },
[]() {});
}
- [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const {
+ [[nodiscard]] u8* GetPointerSilent(const Common::ProcessAddress vaddr) const {
return GetPointerImpl(
- vaddr, []() {}, []() {});
+ GetInteger(vaddr), []() {}, []() {});
}
/**
@@ -616,14 +643,15 @@ struct Memory::Impl {
* @returns The instance of T read from the specified virtual address.
*/
template <typename T>
- T Read(VAddr vaddr) {
+ T Read(Common::ProcessAddress vaddr) {
T result = 0;
const u8* const ptr = GetPointerImpl(
- vaddr,
+ GetInteger(vaddr),
[vaddr]() {
- LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr);
+ LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8,
+ GetInteger(vaddr));
},
- [&]() { system.GPU().FlushRegion(vaddr, sizeof(T)); });
+ [&]() { system.GPU().FlushRegion(GetInteger(vaddr), sizeof(T)); });
if (ptr) {
std::memcpy(&result, ptr, sizeof(T));
}
@@ -640,28 +668,28 @@ struct Memory::Impl {
* is undefined.
*/
template <typename T>
- void Write(VAddr vaddr, const T data) {
+ void Write(Common::ProcessAddress vaddr, const T data) {
u8* const ptr = GetPointerImpl(
- vaddr,
+ GetInteger(vaddr),
[vaddr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
- vaddr, static_cast<u64>(data));
+ GetInteger(vaddr), static_cast<u64>(data));
},
- [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
+ [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(T)); });
if (ptr) {
std::memcpy(ptr, &data, sizeof(T));
}
}
template <typename T>
- bool WriteExclusive(VAddr vaddr, const T data, const T expected) {
+ bool WriteExclusive(Common::ProcessAddress vaddr, const T data, const T expected) {
u8* const ptr = GetPointerImpl(
- vaddr,
+ GetInteger(vaddr),
[vaddr, data]() {
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
- sizeof(T) * 8, vaddr, static_cast<u64>(data));
+ sizeof(T) * 8, GetInteger(vaddr), static_cast<u64>(data));
},
- [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); });
+ [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(T)); });
if (ptr) {
const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -669,14 +697,14 @@ struct Memory::Impl {
return true;
}
- bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) {
+ bool WriteExclusive128(Common::ProcessAddress vaddr, const u128 data, const u128 expected) {
u8* const ptr = GetPointerImpl(
- vaddr,
+ GetInteger(vaddr),
[vaddr, data]() {
LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
- vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0]));
+ GetInteger(vaddr), static_cast<u64>(data[1]), static_cast<u64>(data[0]));
},
- [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); });
+ [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(u128)); });
if (ptr) {
const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -702,15 +730,16 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
impl->SetCurrentPageTable(process, core_id);
}
-void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
+void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ Common::PhysicalAddress target) {
impl->MapMemoryRegion(page_table, base, size, target);
}
-void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
+void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
impl->UnmapRegion(page_table, base, size);
}
-bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
+bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
const Kernel::KProcess& process = *system.ApplicationProcess();
const auto& page_table = process.PageTable().PageTableImpl();
const size_t page = vaddr >> YUZU_PAGEBITS;
@@ -722,9 +751,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
type == Common::PageType::DebugMemory;
}
-bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
- VAddr end = base + size;
- VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
+bool Memory::IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const {
+ Common::ProcessAddress end = base + size;
+ Common::ProcessAddress page = Common::AlignDown(GetInteger(base), YUZU_PAGESIZE);
for (; page < end; page += YUZU_PAGESIZE) {
if (!IsValidVirtualAddress(page)) {
@@ -735,131 +764,121 @@ bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
return true;
}
-u8* Memory::GetPointer(VAddr vaddr) {
+u8* Memory::GetPointer(Common::ProcessAddress vaddr) {
return impl->GetPointer(vaddr);
}
-u8* Memory::GetPointerSilent(VAddr vaddr) {
+u8* Memory::GetPointerSilent(Common::ProcessAddress vaddr) {
return impl->GetPointerSilent(vaddr);
}
-const u8* Memory::GetPointer(VAddr vaddr) const {
+const u8* Memory::GetPointer(Common::ProcessAddress vaddr) const {
return impl->GetPointer(vaddr);
}
-u8 Memory::Read8(const VAddr addr) {
+u8 Memory::Read8(const Common::ProcessAddress addr) {
return impl->Read8(addr);
}
-u16 Memory::Read16(const VAddr addr) {
+u16 Memory::Read16(const Common::ProcessAddress addr) {
return impl->Read16(addr);
}
-u32 Memory::Read32(const VAddr addr) {
+u32 Memory::Read32(const Common::ProcessAddress addr) {
return impl->Read32(addr);
}
-u64 Memory::Read64(const VAddr addr) {
+u64 Memory::Read64(const Common::ProcessAddress addr) {
return impl->Read64(addr);
}
-void Memory::Write8(VAddr addr, u8 data) {
+void Memory::Write8(Common::ProcessAddress addr, u8 data) {
impl->Write8(addr, data);
}
-void Memory::Write16(VAddr addr, u16 data) {
+void Memory::Write16(Common::ProcessAddress addr, u16 data) {
impl->Write16(addr, data);
}
-void Memory::Write32(VAddr addr, u32 data) {
+void Memory::Write32(Common::ProcessAddress addr, u32 data) {
impl->Write32(addr, data);
}
-void Memory::Write64(VAddr addr, u64 data) {
+void Memory::Write64(Common::ProcessAddress addr, u64 data) {
impl->Write64(addr, data);
}
-bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) {
+bool Memory::WriteExclusive8(Common::ProcessAddress addr, u8 data, u8 expected) {
return impl->WriteExclusive8(addr, data, expected);
}
-bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) {
+bool Memory::WriteExclusive16(Common::ProcessAddress addr, u16 data, u16 expected) {
return impl->WriteExclusive16(addr, data, expected);
}
-bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) {
+bool Memory::WriteExclusive32(Common::ProcessAddress addr, u32 data, u32 expected) {
return impl->WriteExclusive32(addr, data, expected);
}
-bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) {
+bool Memory::WriteExclusive64(Common::ProcessAddress addr, u64 data, u64 expected) {
return impl->WriteExclusive64(addr, data, expected);
}
-bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) {
+bool Memory::WriteExclusive128(Common::ProcessAddress addr, u128 data, u128 expected) {
return impl->WriteExclusive128(addr, data, expected);
}
-std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
+std::string Memory::ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
return impl->ReadCString(vaddr, max_length);
}
-void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer,
+void Memory::ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
const std::size_t size) {
- impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size);
-}
-
-void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
impl->ReadBlock(src_addr, dest_buffer, size);
}
-void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
+void Memory::ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
+ const std::size_t size) {
impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
}
-void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer,
- std::size_t size) {
- impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size);
-}
-
-void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
+void Memory::WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
+ const std::size_t size) {
impl->WriteBlock(dest_addr, src_buffer, size);
}
-void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer,
+void Memory::WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
const std::size_t size) {
impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
}
-void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
+void Memory::CopyBlock(Common::ProcessAddress dest_addr, Common::ProcessAddress src_addr,
const std::size_t size) {
- impl->CopyBlock(process, dest_addr, src_addr, size);
+ impl->CopyBlock(*system.ApplicationProcess(), dest_addr, src_addr, size);
}
-void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, const std::size_t size) {
- impl->ZeroBlock(process, dest_addr, size);
+void Memory::ZeroBlock(Common::ProcessAddress dest_addr, const std::size_t size) {
+ impl->ZeroBlock(*system.ApplicationProcess(), dest_addr, size);
}
-Result Memory::InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr,
- const std::size_t size) {
- return impl->InvalidateDataCache(process, dest_addr, size);
+Result Memory::InvalidateDataCache(Common::ProcessAddress dest_addr, const std::size_t size) {
+ return impl->InvalidateDataCache(*system.ApplicationProcess(), dest_addr, size);
}
-Result Memory::StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr,
- const std::size_t size) {
- return impl->StoreDataCache(process, dest_addr, size);
+Result Memory::StoreDataCache(Common::ProcessAddress dest_addr, const std::size_t size) {
+ return impl->StoreDataCache(*system.ApplicationProcess(), dest_addr, size);
}
-Result Memory::FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr,
- const std::size_t size) {
- return impl->FlushDataCache(process, dest_addr, size);
+Result Memory::FlushDataCache(Common::ProcessAddress dest_addr, const std::size_t size) {
+ return impl->FlushDataCache(*system.ApplicationProcess(), dest_addr, size);
}
-void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
- impl->RasterizerMarkRegionCached(vaddr, size, cached);
+void Memory::RasterizerMarkRegionCached(Common::ProcessAddress vaddr, u64 size, bool cached) {
+ impl->RasterizerMarkRegionCached(GetInteger(vaddr), size, cached);
}
-void Memory::MarkRegionDebug(VAddr vaddr, u64 size, bool debug) {
- impl->MarkRegionDebug(vaddr, size, debug);
+void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) {
+ impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
}
} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index 31fe699d8..72a0be813 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -6,7 +6,7 @@
#include <cstddef>
#include <memory>
#include <string>
-#include "common/common_types.h"
+#include "common/typed_address.h"
#include "core/hle/result.h"
namespace Common {
@@ -33,7 +33,7 @@ constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
/// Virtual user-space memory regions
-enum : VAddr {
+enum : u64 {
/// TLS (Thread-Local Storage) related.
TLS_ENTRY_SIZE = 0x200,
@@ -74,7 +74,8 @@ public:
* @param target Buffer with the memory backing the mapping. Must be of length at least
* `size`.
*/
- void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target);
+ void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ Common::PhysicalAddress target);
/**
* Unmaps a region of the emulated process address space.
@@ -83,7 +84,7 @@ public:
* @param base The address to begin unmapping at.
* @param size The amount of bytes to unmap.
*/
- void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size);
+ void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
/**
* Checks whether or not the supplied address is a valid virtual
@@ -93,7 +94,7 @@ public:
*
* @returns True if the given virtual address is valid, false otherwise.
*/
- [[nodiscard]] bool IsValidVirtualAddress(VAddr vaddr) const;
+ [[nodiscard]] bool IsValidVirtualAddress(Common::ProcessAddress vaddr) const;
/**
* Checks whether or not the supplied range of addresses are all valid
@@ -104,7 +105,7 @@ public:
*
* @returns True if all bytes in the given range are valid, false otherwise.
*/
- [[nodiscard]] bool IsValidVirtualAddressRange(VAddr base, u64 size) const;
+ [[nodiscard]] bool IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const;
/**
* Gets a pointer to the given address.
@@ -114,11 +115,11 @@ public:
* @returns The pointer to the given address, if the address is valid.
* If the address is not valid, nullptr will be returned.
*/
- u8* GetPointer(VAddr vaddr);
- u8* GetPointerSilent(VAddr vaddr);
+ u8* GetPointer(Common::ProcessAddress vaddr);
+ u8* GetPointerSilent(Common::ProcessAddress vaddr);
template <typename T>
- T* GetPointer(VAddr vaddr) {
+ T* GetPointer(Common::ProcessAddress vaddr) {
return reinterpret_cast<T*>(GetPointer(vaddr));
}
@@ -130,10 +131,10 @@ public:
* @returns The pointer to the given address, if the address is valid.
* If the address is not valid, nullptr will be returned.
*/
- [[nodiscard]] const u8* GetPointer(VAddr vaddr) const;
+ [[nodiscard]] const u8* GetPointer(Common::ProcessAddress vaddr) const;
template <typename T>
- const T* GetPointer(VAddr vaddr) const {
+ const T* GetPointer(Common::ProcessAddress vaddr) const {
return reinterpret_cast<T*>(GetPointer(vaddr));
}
@@ -145,7 +146,7 @@ public:
*
* @returns the read 8-bit unsigned value.
*/
- u8 Read8(VAddr addr);
+ u8 Read8(Common::ProcessAddress addr);
/**
* Reads a 16-bit unsigned value from the current process' address space
@@ -155,7 +156,7 @@ public:
*
* @returns the read 16-bit unsigned value.
*/
- u16 Read16(VAddr addr);
+ u16 Read16(Common::ProcessAddress addr);
/**
* Reads a 32-bit unsigned value from the current process' address space
@@ -165,7 +166,7 @@ public:
*
* @returns the read 32-bit unsigned value.
*/
- u32 Read32(VAddr addr);
+ u32 Read32(Common::ProcessAddress addr);
/**
* Reads a 64-bit unsigned value from the current process' address space
@@ -175,7 +176,7 @@ public:
*
* @returns the read 64-bit value.
*/
- u64 Read64(VAddr addr);
+ u64 Read64(Common::ProcessAddress addr);
/**
* Writes an 8-bit unsigned integer to the given virtual address in
@@ -186,7 +187,7 @@ public:
*
* @post The memory at the given virtual address contains the specified data value.
*/
- void Write8(VAddr addr, u8 data);
+ void Write8(Common::ProcessAddress addr, u8 data);
/**
* Writes a 16-bit unsigned integer to the given virtual address in
@@ -197,7 +198,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- void Write16(VAddr addr, u16 data);
+ void Write16(Common::ProcessAddress addr, u16 data);
/**
* Writes a 32-bit unsigned integer to the given virtual address in
@@ -208,7 +209,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- void Write32(VAddr addr, u32 data);
+ void Write32(Common::ProcessAddress addr, u32 data);
/**
* Writes a 64-bit unsigned integer to the given virtual address in
@@ -219,7 +220,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- void Write64(VAddr addr, u64 data);
+ void Write64(Common::ProcessAddress addr, u64 data);
/**
* Writes a 8-bit unsigned integer to the given virtual address in
@@ -232,7 +233,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- bool WriteExclusive8(VAddr addr, u8 data, u8 expected);
+ bool WriteExclusive8(Common::ProcessAddress addr, u8 data, u8 expected);
/**
* Writes a 16-bit unsigned integer to the given virtual address in
@@ -245,7 +246,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- bool WriteExclusive16(VAddr addr, u16 data, u16 expected);
+ bool WriteExclusive16(Common::ProcessAddress addr, u16 data, u16 expected);
/**
* Writes a 32-bit unsigned integer to the given virtual address in
@@ -258,7 +259,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- bool WriteExclusive32(VAddr addr, u32 data, u32 expected);
+ bool WriteExclusive32(Common::ProcessAddress addr, u32 data, u32 expected);
/**
* Writes a 64-bit unsigned integer to the given virtual address in
@@ -271,7 +272,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- bool WriteExclusive64(VAddr addr, u64 data, u64 expected);
+ bool WriteExclusive64(Common::ProcessAddress addr, u64 data, u64 expected);
/**
* Writes a 128-bit unsigned integer to the given virtual address in
@@ -284,7 +285,7 @@ public:
*
* @post The memory range [addr, sizeof(data)) contains the given data value.
*/
- bool WriteExclusive128(VAddr addr, u128 data, u128 expected);
+ bool WriteExclusive128(Common::ProcessAddress addr, u128 data, u128 expected);
/**
* Reads a null-terminated string from the given virtual address.
@@ -301,27 +302,7 @@ public:
*
* @returns The read string.
*/
- std::string ReadCString(VAddr vaddr, std::size_t max_length);
-
- /**
- * Reads a contiguous block of bytes from a specified process' address space.
- *
- * @param process The process to read the data from.
- * @param src_addr The virtual address to begin reading from.
- * @param dest_buffer The buffer to place the read bytes into.
- * @param size The amount of data to read, in bytes.
- *
- * @note If a size of 0 is specified, then this function reads nothing and
- * no attempts to access memory are made at all.
- *
- * @pre dest_buffer must be at least size bytes in length, otherwise a
- * buffer overrun will occur.
- *
- * @post The range [dest_buffer, size) contains the read bytes from the
- * process' address space.
- */
- void ReadBlock(const Kernel::KProcess& process, VAddr src_addr, void* dest_buffer,
- std::size_t size);
+ std::string ReadCString(Common::ProcessAddress vaddr, std::size_t max_length);
/**
* Reads a contiguous block of bytes from the current process' address space.
@@ -339,7 +320,7 @@ public:
* @post The range [dest_buffer, size) contains the read bytes from the
* current process' address space.
*/
- void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size);
+ void ReadBlock(Common::ProcessAddress src_addr, void* dest_buffer, std::size_t size);
/**
* Reads a contiguous block of bytes from the current process' address space.
@@ -358,30 +339,7 @@ public:
* @post The range [dest_buffer, size) contains the read bytes from the
* current process' address space.
*/
- void ReadBlockUnsafe(VAddr src_addr, void* dest_buffer, std::size_t size);
-
- /**
- * Writes a range of bytes into a given process' address space at the specified
- * virtual address.
- *
- * @param process The process to write data into the address space of.
- * @param dest_addr The destination virtual address to begin writing the data at.
- * @param src_buffer The data to write into the process' address space.
- * @param size The size of the data to write, in bytes.
- *
- * @post The address range [dest_addr, size) in the process' address space
- * contains the data that was within src_buffer.
- *
- * @post If an attempt is made to write into an unmapped region of memory, the writes
- * will be ignored and an error will be logged.
- *
- * @post If a write is performed into a region of memory that is considered cached
- * rasterizer memory, will cause the currently active rasterizer to be notified
- * and will mark that region as invalidated to caches that the active
- * graphics backend may be maintaining over the course of execution.
- */
- void WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer,
- std::size_t size);
+ void ReadBlockUnsafe(Common::ProcessAddress src_addr, void* dest_buffer, std::size_t size);
/**
* Writes a range of bytes into the current process' address space at the specified
@@ -402,7 +360,7 @@ public:
* and will mark that region as invalidated to caches that the active
* graphics backend may be maintaining over the course of execution.
*/
- void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size);
+ void WriteBlock(Common::ProcessAddress dest_addr, const void* src_buffer, std::size_t size);
/**
* Writes a range of bytes into the current process' address space at the specified
@@ -420,13 +378,13 @@ public:
* will be ignored and an error will be logged.
*
*/
- void WriteBlockUnsafe(VAddr dest_addr, const void* src_buffer, std::size_t size);
+ void WriteBlockUnsafe(Common::ProcessAddress dest_addr, const void* src_buffer,
+ std::size_t size);
/**
* Copies data within a process' address space to another location within the
* same address space.
*
- * @param process The process that will have data copied within its address space.
* @param dest_addr The destination virtual address to begin copying the data into.
* @param src_addr The source virtual address to begin copying the data from.
* @param size The size of the data to copy, in bytes.
@@ -434,54 +392,50 @@ public:
* @post The range [dest_addr, size) within the process' address space contains the
* same data within the range [src_addr, size).
*/
- void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr,
+ void CopyBlock(Common::ProcessAddress dest_addr, Common::ProcessAddress src_addr,
std::size_t size);
/**
* Zeros a range of bytes within the current process' address space at the specified
* virtual address.
*
- * @param process The process that will have data zeroed within its address space.
* @param dest_addr The destination virtual address to zero the data from.
* @param size The size of the range to zero out, in bytes.
*
* @post The range [dest_addr, size) within the process' address space contains the
* value 0.
*/
- void ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size);
+ void ZeroBlock(Common::ProcessAddress dest_addr, std::size_t size);
/**
* Invalidates a range of bytes within the current process' address space at the specified
* virtual address.
*
- * @param process The process that will have data invalidated within its address space.
* @param dest_addr The destination virtual address to invalidate the data from.
* @param size The size of the range to invalidate, in bytes.
*
*/
- Result InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size);
+ Result InvalidateDataCache(Common::ProcessAddress dest_addr, std::size_t size);
/**
* Stores a range of bytes within the current process' address space at the specified
* virtual address.
*
- * @param process The process that will have data stored within its address space.
* @param dest_addr The destination virtual address to store the data from.
* @param size The size of the range to store, in bytes.
*
*/
- Result StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size);
+ Result StoreDataCache(Common::ProcessAddress dest_addr, std::size_t size);
/**
* Flushes a range of bytes within the current process' address space at the specified
* virtual address.
*
- * @param process The process that will have data flushed within its address space.
* @param dest_addr The destination virtual address to flush the data from.
* @param size The size of the range to flush, in bytes.
*
*/
- Result FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size);
+ Result FlushDataCache(Common::ProcessAddress dest_addr, std::size_t size);
/**
* Marks each page within the specified address range as cached or uncached.
@@ -491,7 +445,7 @@ public:
* @param cached Whether or not any pages within the address range should be
* marked as cached or uncached.
*/
- void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
+ void RasterizerMarkRegionCached(Common::ProcessAddress vaddr, u64 size, bool cached);
/**
* Marks each page within the specified address range as debug or non-debug.
@@ -502,7 +456,7 @@ public:
* @param debug Whether or not any pages within the address range should be
* marked as debug or non-debug.
*/
- void MarkRegionDebug(VAddr vaddr, u64 size, bool debug);
+ void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
private:
Core::System& system;
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index de729955f..8742dd164 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -39,11 +39,11 @@ StandardVmCallbacks::StandardVmCallbacks(System& system_, const CheatProcessMeta
StandardVmCallbacks::~StandardVmCallbacks() = default;
void StandardVmCallbacks::MemoryRead(VAddr address, void* data, u64 size) {
- system.Memory().ReadBlock(SanitizeAddress(address), data, size);
+ system.ApplicationMemory().ReadBlock(SanitizeAddress(address), data, size);
}
void StandardVmCallbacks::MemoryWrite(VAddr address, const void* data, u64 size) {
- system.Memory().WriteBlock(SanitizeAddress(address), data, size);
+ system.ApplicationMemory().WriteBlock(SanitizeAddress(address), data, size);
}
u64 StandardVmCallbacks::HidKeysDown() {
@@ -201,17 +201,17 @@ void CheatEngine::Initialize() {
const auto& page_table = system.ApplicationProcess()->PageTable();
metadata.heap_extents = {
- .base = page_table.GetHeapRegionStart(),
+ .base = GetInteger(page_table.GetHeapRegionStart()),
.size = page_table.GetHeapRegionSize(),
};
metadata.address_space_extents = {
- .base = page_table.GetAddressSpaceStart(),
+ .base = GetInteger(page_table.GetAddressSpaceStart()),
.size = page_table.GetAddressSpaceSize(),
};
metadata.alias_extents = {
- .base = page_table.GetAliasCodeRegionStart(),
+ .base = GetInteger(page_table.GetAliasCodeRegionStart()),
.size = page_table.GetAliasCodeRegionSize(),
};
diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp
index 004f2e57a..6c3dc7369 100644
--- a/src/core/reporter.cpp
+++ b/src/core/reporter.cpp
@@ -117,8 +117,8 @@ json GetProcessorStateDataAuto(Core::System& system) {
arm.SaveContext(context);
return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32",
- process->PageTable().GetCodeRegionStart(), context.sp, context.pc,
- context.pstate, context.cpu_registers);
+ GetInteger(process->PageTable().GetCodeRegionStart()), context.sp,
+ context.pc, context.pstate, context.cpu_registers);
}
json GetBacktraceData(Core::System& system) {
@@ -264,7 +264,7 @@ void Reporter::SaveUnimplementedFunctionReport(Service::HLERequestContext& ctx,
const auto title_id = system.GetApplicationProcessProgramID();
auto out = GetFullDataAuto(timestamp, title_id, system);
- auto function_out = GetHLERequestContextData(ctx, system.Memory());
+ auto function_out = GetHLERequestContextData(ctx, system.ApplicationMemory());
function_out["command_id"] = command_id;
function_out["function_name"] = name;
function_out["service_name"] = service_name;
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index ae84408bc..39b774c98 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -4,6 +4,7 @@
add_executable(tests
common/bit_field.cpp
common/cityhash.cpp
+ common/container_hash.cpp
common/fibers.cpp
common/host_memory.cpp
common/param_package.cpp
diff --git a/src/tests/common/container_hash.cpp b/src/tests/common/container_hash.cpp
new file mode 100644
index 000000000..dc45565ef
--- /dev/null
+++ b/src/tests/common/container_hash.cpp
@@ -0,0 +1,44 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "common/common_types.h"
+#include "common/container_hash.h"
+
+TEST_CASE("ContainerHash", "[common]") {
+ constexpr std::array<u8, 32> U8Values{
+ 114, 10, 238, 189, 199, 242, 86, 96, 53, 193, 195, 247, 249, 56, 253, 61,
+ 205, 3, 172, 4, 210, 197, 43, 72, 103, 8, 99, 89, 5, 97, 68, 196,
+ };
+ constexpr std::array<u16, 32> U16Values{
+ 61586, 49151, 3313, 11641, 31695, 54795, 46764, 20965, 23287, 14039, 19265,
+ 49093, 58932, 22518, 27139, 42825, 57417, 54237, 48057, 14586, 42813, 32994,
+ 33970, 45501, 5619, 15895, 33227, 27509, 25391, 37275, 60218, 17599,
+ };
+ constexpr std::array<u32, 32> U32Values{
+ 3838402410U, 2029146863U, 1730869921U, 985528872U, 186773874U, 2094639868U, 3324775932U,
+ 1795512424U, 2571165571U, 3256934519U, 2358691590U, 2752682538U, 1484336451U, 378124520U,
+ 3463015699U, 3395942161U, 1263211979U, 3473632889U, 3039822212U, 2068707357U, 2223837919U,
+ 1823232191U, 1583884041U, 1264393380U, 4087566993U, 3188607101U, 3933680362U, 1464520765U,
+ 1786838406U, 1311734848U, 2773642241U, 3993641692U,
+ };
+ constexpr std::array<u64, 32> U64Values{
+ 5908025796157537817ULL, 10947547850358315100ULL, 844798943576724669ULL,
+ 7999662937458523703ULL, 4006550374705895164ULL, 1832550525423503632ULL,
+ 9323088254855830976ULL, 12028890075598379412ULL, 6021511300787826236ULL,
+ 7864675007938747948ULL, 18099387408859708806ULL, 6438638299316820708ULL,
+ 9029399285648501543ULL, 18195459433089960253ULL, 17214335092761966083ULL,
+ 5549347964591337833ULL, 14899526073304962015ULL, 5058883181561464475ULL,
+ 7436311795731206973ULL, 7535129567768649864ULL, 1287169596809258072ULL,
+ 8237671246353565927ULL, 1715230541978016153ULL, 8443157615068813300ULL,
+ 6098675262328527839ULL, 704652094100376853ULL, 1303411723202926503ULL,
+ 7808312933946424854ULL, 6863726670433556594ULL, 9870361541383217495ULL,
+ 9273671094091079488ULL, 17541434976160119010ULL,
+ };
+
+ REQUIRE(Common::HashValue(U8Values) == 5867183267093890552ULL);
+ REQUIRE(Common::HashValue(U16Values) == 9594135570564347135ULL);
+ REQUIRE(Common::HashValue(U32Values) == 13123757214696618460ULL);
+ REQUIRE(Common::HashValue(U64Values) == 7296500016546938380ULL);
+}
diff --git a/src/tests/common/range_map.cpp b/src/tests/common/range_map.cpp
index d301ac5f6..faaefd49f 100644
--- a/src/tests/common/range_map.cpp
+++ b/src/tests/common/range_map.cpp
@@ -21,9 +21,9 @@ TEST_CASE("Range Map: Setup", "[video_core]") {
my_map.Map(4000, 4500, MappedEnum::Valid_2);
my_map.Map(4200, 4400, MappedEnum::Valid_2);
my_map.Map(4200, 4400, MappedEnum::Valid_1);
- REQUIRE(my_map.GetContinousSizeFrom(4200) == 200);
- REQUIRE(my_map.GetContinousSizeFrom(3000) == 200);
- REQUIRE(my_map.GetContinousSizeFrom(2900) == 0);
+ REQUIRE(my_map.GetContinuousSizeFrom(4200) == 200);
+ REQUIRE(my_map.GetContinuousSizeFrom(3000) == 200);
+ REQUIRE(my_map.GetContinuousSizeFrom(2900) == 0);
REQUIRE(my_map.GetValueAt(2900) == MappedEnum::Invalid);
REQUIRE(my_map.GetValueAt(3100) == MappedEnum::Valid_1);
@@ -38,20 +38,20 @@ TEST_CASE("Range Map: Setup", "[video_core]") {
my_map.Unmap(0, 6000);
for (u64 address = 0; address < 10000; address += 1000) {
- REQUIRE(my_map.GetContinousSizeFrom(address) == 0);
+ REQUIRE(my_map.GetContinuousSizeFrom(address) == 0);
}
my_map.Map(1000, 3000, MappedEnum::Valid_1);
my_map.Map(4000, 5000, MappedEnum::Valid_1);
my_map.Map(2500, 4100, MappedEnum::Valid_1);
- REQUIRE(my_map.GetContinousSizeFrom(1000) == 4000);
+ REQUIRE(my_map.GetContinuousSizeFrom(1000) == 4000);
my_map.Map(1000, 3000, MappedEnum::Valid_1);
my_map.Map(4000, 5000, MappedEnum::Valid_2);
my_map.Map(2500, 4100, MappedEnum::Valid_3);
- REQUIRE(my_map.GetContinousSizeFrom(1000) == 1500);
- REQUIRE(my_map.GetContinousSizeFrom(2500) == 1600);
- REQUIRE(my_map.GetContinousSizeFrom(4100) == 900);
+ REQUIRE(my_map.GetContinuousSizeFrom(1000) == 1500);
+ REQUIRE(my_map.GetContinuousSizeFrom(2500) == 1600);
+ REQUIRE(my_map.GetContinuousSizeFrom(4100) == 900);
REQUIRE(my_map.GetValueAt(900) == MappedEnum::Invalid);
REQUIRE(my_map.GetValueAt(1000) == MappedEnum::Valid_1);
REQUIRE(my_map.GetValueAt(2500) == MappedEnum::Valid_3);
@@ -59,8 +59,8 @@ TEST_CASE("Range Map: Setup", "[video_core]") {
REQUIRE(my_map.GetValueAt(5000) == MappedEnum::Invalid);
my_map.Map(2000, 6000, MappedEnum::Valid_3);
- REQUIRE(my_map.GetContinousSizeFrom(1000) == 1000);
- REQUIRE(my_map.GetContinousSizeFrom(3000) == 3000);
+ REQUIRE(my_map.GetContinuousSizeFrom(1000) == 1000);
+ REQUIRE(my_map.GetContinuousSizeFrom(3000) == 3000);
REQUIRE(my_map.GetValueAt(1000) == MappedEnum::Valid_1);
REQUIRE(my_map.GetValueAt(1999) == MappedEnum::Valid_1);
REQUIRE(my_map.GetValueAt(1500) == MappedEnum::Valid_1);
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 1f656ffa8..abdc593df 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -1442,7 +1442,7 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
}
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
address_size =
- static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
+ static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, address_size));
}
const u32 size = address_size; // TODO: Analyze stride and number of vertices
vertex_buffers[index] = Binding{
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 050b11874..3c5317777 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -31,8 +31,10 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
auto current_context = context.Acquire();
VideoCore::RasterizerInterface* const rasterizer = renderer.ReadRasterizer();
+ CommandDataContainer next;
+
while (!stop_token.stop_requested()) {
- CommandDataContainer next = state.queue.PopWait(stop_token);
+ state.queue.PopWait(next, stop_token);
if (stop_token.stop_requested()) {
break;
}
@@ -116,7 +118,7 @@ u64 ThreadManager::PushCommand(CommandData&& command_data, bool block) {
std::unique_lock lk(state.write_lock);
const u64 fence{++state.last_fence};
- state.queue.Push(CommandDataContainer(std::move(command_data), fence, block));
+ state.queue.EmplaceWait(std::move(command_data), fence, block);
if (block) {
Common::CondvarWait(state.cv, lk, thread.get_stop_token(), [this, fence] {
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 90bcb5958..43940bd6d 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -10,8 +10,8 @@
#include <thread>
#include <variant>
+#include "common/bounded_threadsafe_queue.h"
#include "common/polyfill_thread.h"
-#include "common/threadsafe_queue.h"
#include "video_core/framebuffer_config.h"
namespace Tegra {
@@ -97,7 +97,7 @@ struct CommandDataContainer {
/// Struct used to synchronize the GPU thread
struct SynchState final {
- using CommandQueue = Common::MPSCQueue<CommandDataContainer, true>;
+ using CommandQueue = Common::MPSCQueue<CommandDataContainer>;
std::mutex write_lock;
CommandQueue queue;
u64 last_fence{};
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
index 82ad0477d..905505ca1 100644
--- a/src/video_core/macro/macro.cpp
+++ b/src/video_core/macro/macro.cpp
@@ -6,7 +6,7 @@
#include <optional>
#include <span>
-#include <boost/container_hash/hash.hpp>
+#include "common/container_hash.h"
#include <fstream>
#include "common/assert.h"
@@ -89,7 +89,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
if (!mid_method.has_value()) {
cache_info.lle_program = Compile(macro_code->second);
- cache_info.hash = boost::hash_value(macro_code->second);
+ cache_info.hash = Common::HashValue(macro_code->second);
if (Settings::values.dump_macros) {
Dump(cache_info.hash, macro_code->second);
}
@@ -100,7 +100,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
code.resize(macro_cached.size() - rebased_method);
std::memcpy(code.data(), macro_cached.data() + rebased_method,
code.size() * sizeof(u32));
- cache_info.hash = boost::hash_value(code);
+ cache_info.hash = Common::HashValue(code);
cache_info.lle_program = Compile(code);
if (Settings::values.dump_macros) {
Dump(cache_info.hash, code);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index 83924475b..01fb5b546 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -22,7 +22,7 @@ std::atomic<size_t> MemoryManager::unique_identifier_generator{};
MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
u64 page_bits_)
- : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()},
+ : system{system_}, memory{system.ApplicationMemory()}, device_memory{system.DeviceMemory()},
address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
page_bits != big_page_bits ? page_bits : 0},
@@ -43,7 +43,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
big_entries.resize(big_page_table_size / 32, 0);
big_page_table_cpu.resize(big_page_table_size);
- big_page_continous.resize(big_page_table_size / continous_bits, 0);
+ big_page_continuous.resize(big_page_table_size / continuous_bits, 0);
entries.resize(page_table_size / 32, 0);
}
@@ -85,17 +85,17 @@ PTEKind MemoryManager::GetPageKind(GPUVAddr gpu_addr) const {
return kind_map.GetValueAt(gpu_addr);
}
-inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
- const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
- const size_t sub_index = big_page_index % continous_bits;
+inline bool MemoryManager::IsBigPageContinuous(size_t big_page_index) const {
+ const u64 entry_mask = big_page_continuous[big_page_index / continuous_bits];
+ const size_t sub_index = big_page_index % continuous_bits;
return ((entry_mask >> sub_index) & 0x1ULL) != 0;
}
-inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) {
- const u64 continous_mask = big_page_continous[big_page_index / continous_bits];
- const size_t sub_index = big_page_index % continous_bits;
- big_page_continous[big_page_index / continous_bits] =
- (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0);
+inline void MemoryManager::SetBigPageContinuous(size_t big_page_index, bool value) {
+ const u64 continuous_mask = big_page_continuous[big_page_index / continuous_bits];
+ const size_t sub_index = big_page_index % continuous_bits;
+ big_page_continuous[big_page_index / continuous_bits] =
+ (~(1ULL << sub_index) & continuous_mask) | (value ? 1ULL << sub_index : 0);
}
template <MemoryManager::EntryType entry_type>
@@ -140,7 +140,7 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
const auto index = PageEntryIndex<true>(current_gpu_addr);
const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
big_page_table_cpu[index] = sub_value;
- const bool is_continous = ([&] {
+ const bool is_continuous = ([&] {
uintptr_t base_ptr{
reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
if (base_ptr == 0) {
@@ -156,7 +156,7 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
}
return true;
})();
- SetBigPageContinous(index, is_continous);
+ SetBigPageContinuous(index, is_continuous);
}
remaining_size -= big_page_size;
}
@@ -378,7 +378,7 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std:
if constexpr (is_safe) {
rasterizer->FlushRegion(cpu_addr_base, copy_amount, which);
}
- if (!IsBigPageContinous(page_index)) [[unlikely]] {
+ if (!IsBigPageContinuous(page_index)) [[unlikely]] {
memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
} else {
u8* physical = memory.GetPointer(cpu_addr_base);
@@ -427,7 +427,7 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
if constexpr (is_safe) {
rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which);
}
- if (!IsBigPageContinous(page_index)) [[unlikely]] {
+ if (!IsBigPageContinuous(page_index)) [[unlikely]] {
memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
} else {
u8* physical = memory.GetPointer(cpu_addr_base);
@@ -512,7 +512,7 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
return result;
}
-size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
+size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const {
std::optional<VAddr> old_page_addr{};
size_t range_so_far = 0;
bool result{false};
@@ -553,7 +553,7 @@ size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
}
size_t MemoryManager::GetMemoryLayoutSize(GPUVAddr gpu_addr, size_t max_size) const {
- return kind_map.GetContinousSizeFrom(gpu_addr);
+ return kind_map.GetContinuousSizeFrom(gpu_addr);
}
void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size,
@@ -594,7 +594,7 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
size_t page_index = gpu_addr >> big_page_bits;
- if (IsBigPageContinous(page_index)) [[likely]] {
+ if (IsBigPageContinuous(page_index)) [[likely]] {
const std::size_t page{(page_index & big_page_mask) + size};
return page <= big_page_size;
}
@@ -608,7 +608,7 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
return page <= Core::Memory::YUZU_PAGESIZE;
}
-bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
+bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const {
std::optional<VAddr> old_page_addr{};
bool result{true};
auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 51ae2de68..fbbe856c4 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -94,7 +94,7 @@ public:
/**
* Checks if a gpu region is mapped by a single range of cpu addresses.
*/
- [[nodiscard]] bool IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const;
+ [[nodiscard]] bool IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const;
/**
* Checks if a gpu region is mapped entirely.
@@ -123,7 +123,7 @@ public:
bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) const;
- size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
+ size_t MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const;
bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
return gpu_addr < address_space_size;
@@ -158,8 +158,8 @@ private:
}
}
- inline bool IsBigPageContinous(size_t big_page_index) const;
- inline void SetBigPageContinous(size_t big_page_index, bool value);
+ inline bool IsBigPageContinuous(size_t big_page_index) const;
+ inline void SetBigPageContinuous(size_t big_page_index, bool value);
template <bool is_gpu_address>
void GetSubmappedRangeImpl(
@@ -213,10 +213,10 @@ private:
Common::RangeMap<GPUVAddr, PTEKind> kind_map;
Common::VirtualBuffer<u32> big_page_table_cpu;
- std::vector<u64> big_page_continous;
+ std::vector<u64> big_page_continuous;
std::vector<std::pair<VAddr, std::size_t>> page_stash{};
- static constexpr size_t continous_bits = 64;
+ static constexpr size_t continuous_bits = 64;
const size_t unique_identifier;
std::unique_ptr<VideoCommon::InvalidationAccumulator> accumulator;
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index ca52e2389..5dce51be8 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -166,7 +166,7 @@ struct FormatTuple {
{VK_FORMAT_R16G16_UINT, Attachable | Storage}, // R16G16_UINT
{VK_FORMAT_R16G16_SINT, Attachable | Storage}, // R16G16_SINT
{VK_FORMAT_R16G16_SNORM, Attachable | Storage}, // R16G16_SNORM
- {VK_FORMAT_UNDEFINED}, // R32G32B32_FLOAT
+ {VK_FORMAT_R32G32B32_SFLOAT}, // R32G32B32_FLOAT
{VK_FORMAT_A8B8G8R8_SRGB_PACK32, Attachable}, // A8B8G8R8_SRGB
{VK_FORMAT_R8G8_UNORM, Attachable | Storage}, // R8G8_UNORM
{VK_FORMAT_R8G8_SNORM, Attachable | Storage}, // R8G8_SNORM
@@ -234,11 +234,6 @@ FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with
PixelFormat pixel_format) {
ASSERT(static_cast<size_t>(pixel_format) < std::size(tex_format_tuples));
FormatTuple tuple = tex_format_tuples[static_cast<size_t>(pixel_format)];
- if (tuple.format == VK_FORMAT_UNDEFINED) {
- UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}", pixel_format);
- return FormatInfo{VK_FORMAT_A8B8G8R8_UNORM_PACK32, true, true};
- }
-
// Use A8B8G8R8_UNORM on hardware that doesn't support ASTC natively
if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) {
const bool is_srgb = with_srgb && VideoCore::Surface::IsPixelFormatSRGB(pixel_format);
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index e03685af1..b264e6ada 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -47,14 +47,15 @@ Scheduler::Scheduler(const Device& device_, StateTracker& state_tracker_)
Scheduler::~Scheduler() = default;
void Scheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
+ // When flushing, we only send data to the worker thread; no waiting is necessary.
SubmitExecution(signal_semaphore, wait_semaphore);
AllocateNewContext();
}
void Scheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
+ // When finishing, we need to wait for the submission to have executed on the device.
const u64 presubmit_tick = CurrentTick();
SubmitExecution(signal_semaphore, wait_semaphore);
- WaitWorker();
Wait(presubmit_tick);
AllocateNewContext();
}
@@ -63,8 +64,14 @@ void Scheduler::WaitWorker() {
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
DispatchWork();
- std::unique_lock lock{work_mutex};
- wait_cv.wait(lock, [this] { return work_queue.empty(); });
+ // Ensure the queue is drained.
+ {
+ std::unique_lock ql{queue_mutex};
+ event_cv.wait(ql, [this] { return work_queue.empty(); });
+ }
+
+ // Now wait for execution to finish.
+ std::scoped_lock el{execution_mutex};
}
void Scheduler::DispatchWork() {
@@ -72,10 +79,10 @@ void Scheduler::DispatchWork() {
return;
}
{
- std::scoped_lock lock{work_mutex};
+ std::scoped_lock ql{queue_mutex};
work_queue.push(std::move(chunk));
}
- work_cv.notify_one();
+ event_cv.notify_all();
AcquireNewChunk();
}
@@ -137,30 +144,55 @@ bool Scheduler::UpdateRescaling(bool is_rescaling) {
void Scheduler::WorkerThread(std::stop_token stop_token) {
Common::SetCurrentThreadName("VulkanWorker");
- do {
+
+ const auto TryPopQueue{[this](auto& work) -> bool {
+ if (work_queue.empty()) {
+ return false;
+ }
+
+ work = std::move(work_queue.front());
+ work_queue.pop();
+ event_cv.notify_all();
+ return true;
+ }};
+
+ while (!stop_token.stop_requested()) {
std::unique_ptr<CommandChunk> work;
- bool has_submit{false};
+
{
- std::unique_lock lock{work_mutex};
- if (work_queue.empty()) {
- wait_cv.notify_all();
- }
- Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
+ std::unique_lock lk{queue_mutex};
+
+ // Wait for work.
+ Common::CondvarWait(event_cv, lk, stop_token, [&] { return TryPopQueue(work); });
+
+ // If we've been asked to stop, we're done.
if (stop_token.stop_requested()) {
- continue;
+ return;
}
- work = std::move(work_queue.front());
- work_queue.pop();
- has_submit = work->HasSubmit();
+ // Exchange lock ownership so that we take the execution lock before
+ // the queue lock goes out of scope. This allows us to force execution
+ // to complete in the next step.
+ std::exchange(lk, std::unique_lock{execution_mutex});
+
+ // Perform the work, tracking whether the chunk was a submission
+ // before executing.
+ const bool has_submit = work->HasSubmit();
work->ExecuteAll(current_cmdbuf);
+
+ // If the chunk was a submission, reallocate the command buffer.
+ if (has_submit) {
+ AllocateWorkerCommandBuffer();
+ }
}
- if (has_submit) {
- AllocateWorkerCommandBuffer();
+
+ {
+ std::scoped_lock rl{reserve_mutex};
+
+ // Recycle the chunk back to the reserve.
+ chunk_reserve.emplace_back(std::move(work));
}
- std::scoped_lock reserve_lock{reserve_mutex};
- chunk_reserve.push_back(std::move(work));
- } while (!stop_token.stop_requested());
+ }
}
void Scheduler::AllocateWorkerCommandBuffer() {
@@ -289,13 +321,16 @@ void Scheduler::EndRenderPass() {
}
void Scheduler::AcquireNewChunk() {
- std::scoped_lock lock{reserve_mutex};
+ std::scoped_lock rl{reserve_mutex};
+
if (chunk_reserve.empty()) {
+ // If we don't have anything reserved, we need to make a new chunk.
chunk = std::make_unique<CommandChunk>();
- return;
+ } else {
+ // Otherwise, we can just take from the reserve.
+ chunk = std::move(chunk_reserve.back());
+ chunk_reserve.pop_back();
}
- chunk = std::move(chunk_reserve.back());
- chunk_reserve.pop_back();
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index bd4cb0f7e..8d75ce987 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -232,10 +232,10 @@ private:
std::queue<std::unique_ptr<CommandChunk>> work_queue;
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
+ std::mutex execution_mutex;
std::mutex reserve_mutex;
- std::mutex work_mutex;
- std::condition_variable_any work_cv;
- std::condition_variable wait_cv;
+ std::mutex queue_mutex;
+ std::condition_variable_any event_cv;
std::jthread worker_thread;
};
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index 08aa8ca33..5fc2b2fec 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -42,15 +42,15 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
ComponentType blue, ComponentType alpha,
bool is_srgb) noexcept {
switch (Hash(format, red, green, blue, alpha, is_srgb)) {
- case Hash(TextureFormat::A8R8G8B8, UNORM):
+ case Hash(TextureFormat::A8B8G8R8, UNORM):
return PixelFormat::A8B8G8R8_UNORM;
- case Hash(TextureFormat::A8R8G8B8, SNORM):
+ case Hash(TextureFormat::A8B8G8R8, SNORM):
return PixelFormat::A8B8G8R8_SNORM;
- case Hash(TextureFormat::A8R8G8B8, UINT):
+ case Hash(TextureFormat::A8B8G8R8, UINT):
return PixelFormat::A8B8G8R8_UINT;
- case Hash(TextureFormat::A8R8G8B8, SINT):
+ case Hash(TextureFormat::A8B8G8R8, SINT):
return PixelFormat::A8B8G8R8_SINT;
- case Hash(TextureFormat::A8R8G8B8, UNORM, SRGB):
+ case Hash(TextureFormat::A8B8G8R8, UNORM, SRGB):
return PixelFormat::A8B8G8R8_SRGB;
case Hash(TextureFormat::B5G6R5, UNORM):
return PixelFormat::B5G6R5_UNORM;
@@ -74,13 +74,13 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
return PixelFormat::R8_UINT;
case Hash(TextureFormat::R8, SINT):
return PixelFormat::R8_SINT;
- case Hash(TextureFormat::R8G8, UNORM):
+ case Hash(TextureFormat::G8R8, UNORM):
return PixelFormat::R8G8_UNORM;
- case Hash(TextureFormat::R8G8, SNORM):
+ case Hash(TextureFormat::G8R8, SNORM):
return PixelFormat::R8G8_SNORM;
- case Hash(TextureFormat::R8G8, UINT):
+ case Hash(TextureFormat::G8R8, UINT):
return PixelFormat::R8G8_UINT;
- case Hash(TextureFormat::R8G8, SINT):
+ case Hash(TextureFormat::G8R8, SINT):
return PixelFormat::R8G8_SINT;
case Hash(TextureFormat::R16G16B16A16, FLOAT):
return PixelFormat::R16G16B16A16_FLOAT;
@@ -136,49 +136,49 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
return PixelFormat::R32_SINT;
case Hash(TextureFormat::E5B9G9R9, FLOAT):
return PixelFormat::E5B9G9R9_FLOAT;
- case Hash(TextureFormat::D32, FLOAT):
+ case Hash(TextureFormat::Z32, FLOAT):
return PixelFormat::D32_FLOAT;
- case Hash(TextureFormat::D16, UNORM):
+ case Hash(TextureFormat::Z16, UNORM):
return PixelFormat::D16_UNORM;
- case Hash(TextureFormat::S8D24, UINT, UNORM, UNORM, UNORM, LINEAR):
+ case Hash(TextureFormat::Z24S8, UINT, UNORM, UNORM, UNORM, LINEAR):
return PixelFormat::S8_UINT_D24_UNORM;
- case Hash(TextureFormat::S8D24, UINT, UNORM, UINT, UINT, LINEAR):
+ case Hash(TextureFormat::Z24S8, UINT, UNORM, UINT, UINT, LINEAR):
return PixelFormat::S8_UINT_D24_UNORM;
- case Hash(TextureFormat::R8G24, UINT, UNORM, UNORM, UNORM, LINEAR):
+ case Hash(TextureFormat::G24R8, UINT, UNORM, UNORM, UNORM, LINEAR):
return PixelFormat::S8_UINT_D24_UNORM;
- case Hash(TextureFormat::D24S8, UNORM, UINT, UINT, UINT, LINEAR):
+ case Hash(TextureFormat::S8Z24, UNORM, UINT, UINT, UINT, LINEAR):
return PixelFormat::D24_UNORM_S8_UINT;
- case Hash(TextureFormat::D32S8, FLOAT, UINT, UNORM, UNORM, LINEAR):
+ case Hash(TextureFormat::Z32_X24S8, FLOAT, UINT, UNORM, UNORM, LINEAR):
return PixelFormat::D32_FLOAT_S8_UINT;
- case Hash(TextureFormat::R32_B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR):
+ case Hash(TextureFormat::R32B24G8, FLOAT, UINT, UNORM, UNORM, LINEAR):
return PixelFormat::D32_FLOAT_S8_UINT;
- case Hash(TextureFormat::BC1_RGBA, UNORM, LINEAR):
+ case Hash(TextureFormat::DXT1, UNORM, LINEAR):
return PixelFormat::BC1_RGBA_UNORM;
- case Hash(TextureFormat::BC1_RGBA, UNORM, SRGB):
+ case Hash(TextureFormat::DXT1, UNORM, SRGB):
return PixelFormat::BC1_RGBA_SRGB;
- case Hash(TextureFormat::BC2, UNORM, LINEAR):
+ case Hash(TextureFormat::DXT23, UNORM, LINEAR):
return PixelFormat::BC2_UNORM;
- case Hash(TextureFormat::BC2, UNORM, SRGB):
+ case Hash(TextureFormat::DXT23, UNORM, SRGB):
return PixelFormat::BC2_SRGB;
- case Hash(TextureFormat::BC3, UNORM, LINEAR):
+ case Hash(TextureFormat::DXT45, UNORM, LINEAR):
return PixelFormat::BC3_UNORM;
- case Hash(TextureFormat::BC3, UNORM, SRGB):
+ case Hash(TextureFormat::DXT45, UNORM, SRGB):
return PixelFormat::BC3_SRGB;
- case Hash(TextureFormat::BC4, UNORM):
+ case Hash(TextureFormat::DXN1, UNORM):
return PixelFormat::BC4_UNORM;
- case Hash(TextureFormat::BC4, SNORM):
+ case Hash(TextureFormat::DXN1, SNORM):
return PixelFormat::BC4_SNORM;
- case Hash(TextureFormat::BC5, UNORM):
+ case Hash(TextureFormat::DXN2, UNORM):
return PixelFormat::BC5_UNORM;
- case Hash(TextureFormat::BC5, SNORM):
+ case Hash(TextureFormat::DXN2, SNORM):
return PixelFormat::BC5_SNORM;
- case Hash(TextureFormat::BC7, UNORM, LINEAR):
+ case Hash(TextureFormat::BC7U, UNORM, LINEAR):
return PixelFormat::BC7_UNORM;
- case Hash(TextureFormat::BC7, UNORM, SRGB):
+ case Hash(TextureFormat::BC7U, UNORM, SRGB):
return PixelFormat::BC7_SRGB;
- case Hash(TextureFormat::BC6H_SFLOAT, FLOAT):
+ case Hash(TextureFormat::BC6H_S16, FLOAT):
return PixelFormat::BC6H_SFLOAT;
- case Hash(TextureFormat::BC6H_UFLOAT, FLOAT):
+ case Hash(TextureFormat::BC6H_U16, FLOAT):
return PixelFormat::BC6H_UFLOAT;
case Hash(TextureFormat::ASTC_2D_4X4, UNORM, LINEAR):
return PixelFormat::ASTC_2D_4X4_UNORM;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index c09eecd1a..ed5c768d8 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1176,13 +1176,13 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
const size_t size_bytes = CalculateGuestSizeInBytes(new_info);
const bool broken_views = runtime.HasBrokenTextureViewFormats();
const bool native_bgr = runtime.HasNativeBgr();
- std::vector<ImageId> overlap_ids;
+ boost::container::small_vector<ImageId, 4> overlap_ids;
std::unordered_set<ImageId> overlaps_found;
- std::vector<ImageId> left_aliased_ids;
- std::vector<ImageId> right_aliased_ids;
+ boost::container::small_vector<ImageId, 4> left_aliased_ids;
+ boost::container::small_vector<ImageId, 4> right_aliased_ids;
std::unordered_set<ImageId> ignore_textures;
- std::vector<ImageId> bad_overlap_ids;
- std::vector<ImageId> all_siblings;
+ boost::container::small_vector<ImageId, 4> bad_overlap_ids;
+ boost::container::small_vector<ImageId, 4> all_siblings;
const bool this_is_linear = info.type == ImageType::Linear;
const auto region_check = [&](ImageId overlap_id, ImageBase& overlap) {
if (True(overlap.flags & ImageFlagBits::Remapped)) {
@@ -1269,7 +1269,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
Image& new_image = slot_images[new_image_id];
- if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
+ if (!gpu_memory->IsContinuousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
new_image.flags |= ImageFlagBits::Sparse;
}
@@ -1298,16 +1298,16 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
Image& overlap = slot_images[overlap_id];
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
new_image.flags |= ImageFlagBits::GpuModified;
- }
- const auto& resolution = Settings::values.resolution_info;
- const SubresourceBase base = new_image.TryFindBase(overlap.gpu_addr).value();
- const u32 up_scale = can_rescale ? resolution.up_scale : 1;
- const u32 down_shift = can_rescale ? resolution.down_shift : 0;
- auto copies = MakeShrinkImageCopies(new_info, overlap.info, base, up_scale, down_shift);
- if (overlap.info.num_samples != new_image.info.num_samples) {
- runtime.CopyImageMSAA(new_image, overlap, std::move(copies));
- } else {
- runtime.CopyImage(new_image, overlap, std::move(copies));
+ const auto& resolution = Settings::values.resolution_info;
+ const SubresourceBase base = new_image.TryFindBase(overlap.gpu_addr).value();
+ const u32 up_scale = can_rescale ? resolution.up_scale : 1;
+ const u32 down_shift = can_rescale ? resolution.down_shift : 0;
+ auto copies = MakeShrinkImageCopies(new_info, overlap.info, base, up_scale, down_shift);
+ if (overlap.info.num_samples != new_image.info.num_samples) {
+ runtime.CopyImageMSAA(new_image, overlap, std::move(copies));
+ } else {
+ runtime.CopyImage(new_image, overlap, std::move(copies));
+ }
}
if (True(overlap.flags & ImageFlagBits::Tracked)) {
UntrackImage(overlap, overlap_id);
@@ -1616,37 +1616,38 @@ void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, s
return;
}
auto& gpu_page_table = gpu_page_table_storage[*storage_id];
- ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) {
- const auto it = gpu_page_table.find(page);
- if (it == gpu_page_table.end()) {
- if constexpr (BOOL_BREAK) {
- return false;
- } else {
- return;
- }
- }
- for (const ImageId image_id : it->second) {
- Image& image = slot_images[image_id];
- if (True(image.flags & ImageFlagBits::Picked)) {
- continue;
- }
- if (!image.OverlapsGPU(gpu_addr, size)) {
- continue;
- }
- image.flags |= ImageFlagBits::Picked;
- images.push_back(image_id);
- if constexpr (BOOL_BREAK) {
- if (func(image_id, image)) {
- return true;
- }
- } else {
- func(image_id, image);
- }
- }
- if constexpr (BOOL_BREAK) {
- return false;
- }
- });
+ ForEachGPUPage(gpu_addr, size,
+ [this, &gpu_page_table, &images, gpu_addr, size, func](u64 page) {
+ const auto it = gpu_page_table.find(page);
+ if (it == gpu_page_table.end()) {
+ if constexpr (BOOL_BREAK) {
+ return false;
+ } else {
+ return;
+ }
+ }
+ for (const ImageId image_id : it->second) {
+ Image& image = slot_images[image_id];
+ if (True(image.flags & ImageFlagBits::Picked)) {
+ continue;
+ }
+ if (!image.OverlapsGPU(gpu_addr, size)) {
+ continue;
+ }
+ image.flags |= ImageFlagBits::Picked;
+ images.push_back(image_id);
+ if constexpr (BOOL_BREAK) {
+ if (func(image_id, image)) {
+ return true;
+ }
+ } else {
+ func(image_id, image);
+ }
+ }
+ if constexpr (BOOL_BREAK) {
+ return false;
+ }
+ });
for (const ImageId image_id : images) {
slot_images[image_id].flags &= ~ImageFlagBits::Picked;
}
diff --git a/src/video_core/textures/texture.cpp b/src/video_core/textures/texture.cpp
index 26649aebf..4a80a59f9 100644
--- a/src/video_core/textures/texture.cpp
+++ b/src/video_core/textures/texture.cpp
@@ -14,7 +14,7 @@ namespace Tegra::Texture {
namespace {
-constexpr std::array<float, 256> SRGB_CONVERSION_LUT = {
+[[maybe_unused]] constexpr std::array<float, 256> SRGB_CONVERSION_LUT = {
0.000000f, 0.000000f, 0.000000f, 0.000012f, 0.000021f, 0.000033f, 0.000046f, 0.000062f,
0.000081f, 0.000102f, 0.000125f, 0.000151f, 0.000181f, 0.000214f, 0.000251f, 0.000293f,
0.000338f, 0.000388f, 0.000443f, 0.000503f, 0.000568f, 0.000639f, 0.000715f, 0.000798f,
@@ -52,11 +52,13 @@ constexpr std::array<float, 256> SRGB_CONVERSION_LUT = {
} // Anonymous namespace
std::array<float, 4> TSCEntry::BorderColor() const noexcept {
- if (!srgb_conversion) {
- return border_color;
- }
- return {SRGB_CONVERSION_LUT[srgb_border_color_r], SRGB_CONVERSION_LUT[srgb_border_color_g],
- SRGB_CONVERSION_LUT[srgb_border_color_b], border_color[3]};
+ // TODO: Handle SRGB correctly. Using this breaks shadows in some games (Xenoblade).
+ // if (!srgb_conversion) {
+ // return border_color;
+ //}
+ // return {SRGB_CONVERSION_LUT[srgb_border_color_r], SRGB_CONVERSION_LUT[srgb_border_color_g],
+ // SRGB_CONVERSION_LUT[srgb_border_color_b], border_color[3]};
+ return border_color;
}
float TSCEntry::MaxAnisotropy() const noexcept {
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h
index 7c4553a53..7e5837b20 100644
--- a/src/video_core/textures/texture.h
+++ b/src/video_core/textures/texture.h
@@ -15,26 +15,26 @@ enum class TextureFormat : u32 {
R32G32B32 = 0x02,
R16G16B16A16 = 0x03,
R32G32 = 0x04,
- R32_B24G8 = 0x05,
+ R32B24G8 = 0x05,
ETC2_RGB = 0x06,
X8B8G8R8 = 0x07,
- A8R8G8B8 = 0x08,
+ A8B8G8R8 = 0x08,
A2B10G10R10 = 0x09,
ETC2_RGB_PTA = 0x0a,
ETC2_RGBA = 0x0b,
R16G16 = 0x0c,
- R24G8 = 0x0d,
- R8G24 = 0x0e,
+ G8R24 = 0x0d,
+ G24R8 = 0x0e,
R32 = 0x0f,
- BC6H_SFLOAT = 0x10,
- BC6H_UFLOAT = 0x11,
+ BC6H_S16 = 0x10,
+ BC6H_U16 = 0x11,
A4B4G4R4 = 0x12,
A5B5G5R1 = 0x13,
A1B5G5R5 = 0x14,
B5G6R5 = 0x15,
B6G5R5 = 0x16,
- BC7 = 0x17,
- R8G8 = 0x18,
+ BC7U = 0x17,
+ G8R8 = 0x18,
EAC = 0x19,
EACX2 = 0x1a,
R16 = 0x1b,
@@ -46,33 +46,33 @@ enum class TextureFormat : u32 {
B10G11R11 = 0x21,
G8B8G8R8 = 0x22,
B8G8R8G8 = 0x23,
- BC1_RGBA = 0x24,
- BC2 = 0x25,
- BC3 = 0x26,
- BC4 = 0x27,
- BC5 = 0x28,
- S8D24 = 0x29,
- X8D24 = 0x2a,
- D24S8 = 0x2b,
- X4V4D24__COV4R4V = 0x2c,
- X4V4D24__COV8R8V = 0x2d,
- V8D24__COV4R12V = 0x2e,
- D32 = 0x2f,
- D32S8 = 0x30,
- X8D24_X20V4S8__COV4R4V = 0x31,
- X8D24_X20V4S8__COV8R8V = 0x32,
- D32_X20V4X8__COV4R4V = 0x33,
- D32_X20V4X8__COV8R8V = 0x34,
- D32_X20V4S8__COV4R4V = 0x35,
- D32_X20V4S8__COV8R8V = 0x36,
- X8D24_X16V8S8__COV4R12V = 0x37,
- D32_X16V8X8__COV4R12V = 0x38,
- D32_X16V8S8__COV4R12V = 0x39,
- D16 = 0x3a,
- V8D24__COV8R24V = 0x3b,
- X8D24_X16V8S8__COV8R24V = 0x3c,
- D32_X16V8X8__COV8R24V = 0x3d,
- D32_X16V8S8__COV8R24V = 0x3e,
+ DXT1 = 0x24,
+ DXT23 = 0x25,
+ DXT45 = 0x26,
+ DXN1 = 0x27,
+ DXN2 = 0x28,
+ Z24S8 = 0x29,
+ X8Z24 = 0x2a,
+ S8Z24 = 0x2b,
+ X4V4Z24__COV4R4V = 0x2c,
+ X4V4Z24__COV8R8V = 0x2d,
+ V8Z24__COV4R12V = 0x2e,
+ Z32 = 0x2f,
+ Z32_X24S8 = 0x30,
+ X8Z24_X20V4S8__COV4R4V = 0x31,
+ X8Z24_X20V4S8__COV8R8V = 0x32,
+ Z32_X20V4X8__COV4R4V = 0x33,
+ Z32_X20V4X8__COV8R8V = 0x34,
+ Z32_X20V4S8__COV4R4V = 0x35,
+ Z32_X20V4S8__COV8R8V = 0x36,
+ X8Z24_X16V8S8__COV4R12V = 0x37,
+ Z32_X16V8X8__COV4R12V = 0x38,
+ Z32_X16V8S8__COV4R12V = 0x39,
+ Z16 = 0x3a,
+ V8Z24__COV8R24V = 0x3b,
+ X8Z24_X16V8S8__COV8R24V = 0x3c,
+ Z32_X16V8X8__COV8R24V = 0x3d,
+ Z32_X16V8S8__COV8R24V = 0x3e,
ASTC_2D_4X4 = 0x40,
ASTC_2D_5X5 = 0x41,
ASTC_2D_6X6 = 0x42,
diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp
index fedb4a7bb..b42d48416 100644
--- a/src/video_core/video_core.cpp
+++ b/src/video_core/video_core.cpp
@@ -18,7 +18,7 @@ std::unique_ptr<VideoCore::RendererBase> CreateRenderer(
Core::System& system, Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
std::unique_ptr<Core::Frontend::GraphicsContext> context) {
auto& telemetry_session = system.TelemetrySession();
- auto& cpu_memory = system.Memory();
+ auto& cpu_memory = system.ApplicationMemory();
switch (Settings::values.renderer_backend.GetValue()) {
case Settings::RendererBackend::OpenGL:
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index df348af55..6f288b3f8 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -401,6 +401,12 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
loaded_extensions.erase(VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
}
}
+ if (extensions.extended_dynamic_state3 && is_radv) {
+ LOG_WARNING(Render_Vulkan, "RADV has broken extendedDynamicState3ColorBlendEquation");
+ features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
+ features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
+ dynamic_state3_blending = false;
+ }
if (extensions.vertex_input_dynamic_state && is_radv) {
// TODO(ameerj): Blacklist only offending driver versions
// TODO(ameerj): Confirm if RDNA1 is affected
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.cpp b/src/video_core/vulkan_common/vulkan_wrapper.cpp
index 486d4dfaf..336f53700 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.cpp
+++ b/src/video_core/vulkan_common/vulkan_wrapper.cpp
@@ -375,6 +375,8 @@ const char* ToString(VkResult result) noexcept {
return "VK_RESULT_MAX_ENUM";
case VkResult::VK_ERROR_COMPRESSION_EXHAUSTED_EXT:
return "VK_ERROR_COMPRESSION_EXHAUSTED_EXT";
+ case VkResult::VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT:
+ return "VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT";
}
return "Unknown";
}
diff --git a/src/web_service/verify_login.cpp b/src/web_service/verify_login.cpp
index 050080278..d5b7161cb 100644
--- a/src/web_service/verify_login.cpp
+++ b/src/web_service/verify_login.cpp
@@ -21,7 +21,7 @@ bool VerifyLogin(const std::string& host, const std::string& username, const std
return username.empty();
}
- return username == *iter;
+ return *iter == username;
}
} // namespace WebService
diff --git a/src/yuzu/applets/qt_amiibo_settings.cpp b/src/yuzu/applets/qt_amiibo_settings.cpp
index 93ad4b4f9..4559df5b1 100644
--- a/src/yuzu/applets/qt_amiibo_settings.cpp
+++ b/src/yuzu/applets/qt_amiibo_settings.cpp
@@ -245,12 +245,19 @@ void QtAmiiboSettingsDialog::SetSettingsDescription() {
QtAmiiboSettings::QtAmiiboSettings(GMainWindow& parent) {
connect(this, &QtAmiiboSettings::MainWindowShowAmiiboSettings, &parent,
&GMainWindow::AmiiboSettingsShowDialog, Qt::QueuedConnection);
+ connect(this, &QtAmiiboSettings::MainWindowRequestExit, &parent,
+ &GMainWindow::AmiiboSettingsRequestExit, Qt::QueuedConnection);
connect(&parent, &GMainWindow::AmiiboSettingsFinished, this,
&QtAmiiboSettings::MainWindowFinished, Qt::QueuedConnection);
}
QtAmiiboSettings::~QtAmiiboSettings() = default;
+void QtAmiiboSettings::Close() const {
+ callback = {};
+ emit MainWindowRequestExit();
+}
+
void QtAmiiboSettings::ShowCabinetApplet(
const Core::Frontend::CabinetCallback& callback_,
const Core::Frontend::CabinetParameters& parameters,
@@ -260,5 +267,7 @@ void QtAmiiboSettings::ShowCabinetApplet(
}
void QtAmiiboSettings::MainWindowFinished(bool is_success, const std::string& name) {
- callback(is_success, name);
+ if (callback) {
+ callback(is_success, name);
+ }
}
diff --git a/src/yuzu/applets/qt_amiibo_settings.h b/src/yuzu/applets/qt_amiibo_settings.h
index 930c96739..bc389a33f 100644
--- a/src/yuzu/applets/qt_amiibo_settings.h
+++ b/src/yuzu/applets/qt_amiibo_settings.h
@@ -68,6 +68,7 @@ public:
explicit QtAmiiboSettings(GMainWindow& parent);
~QtAmiiboSettings() override;
+ void Close() const override;
void ShowCabinetApplet(const Core::Frontend::CabinetCallback& callback_,
const Core::Frontend::CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const override;
@@ -75,6 +76,7 @@ public:
signals:
void MainWindowShowAmiiboSettings(const Core::Frontend::CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const;
+ void MainWindowRequestExit() const;
private:
void MainWindowFinished(bool is_success, const std::string& name);
diff --git a/src/yuzu/applets/qt_controller.cpp b/src/yuzu/applets/qt_controller.cpp
index c30b54499..00aafb8f8 100644
--- a/src/yuzu/applets/qt_controller.cpp
+++ b/src/yuzu/applets/qt_controller.cpp
@@ -300,7 +300,7 @@ bool QtControllerSelectorDialog::CheckIfParametersMet() {
if (num_connected_players < min_supported_players ||
num_connected_players > max_supported_players) {
parameters_met = false;
- ui->buttonBox->setEnabled(parameters_met);
+ ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(parameters_met);
return parameters_met;
}
@@ -327,7 +327,7 @@ bool QtControllerSelectorDialog::CheckIfParametersMet() {
}();
parameters_met = all_controllers_compatible;
- ui->buttonBox->setEnabled(parameters_met);
+ ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(parameters_met);
return parameters_met;
}
@@ -678,18 +678,27 @@ void QtControllerSelectorDialog::DisableUnsupportedPlayers() {
QtControllerSelector::QtControllerSelector(GMainWindow& parent) {
connect(this, &QtControllerSelector::MainWindowReconfigureControllers, &parent,
&GMainWindow::ControllerSelectorReconfigureControllers, Qt::QueuedConnection);
+ connect(this, &QtControllerSelector::MainWindowRequestExit, &parent,
+ &GMainWindow::ControllerSelectorRequestExit, Qt::QueuedConnection);
connect(&parent, &GMainWindow::ControllerSelectorReconfigureFinished, this,
&QtControllerSelector::MainWindowReconfigureFinished, Qt::QueuedConnection);
}
QtControllerSelector::~QtControllerSelector() = default;
+void QtControllerSelector::Close() const {
+ callback = {};
+ emit MainWindowRequestExit();
+}
+
void QtControllerSelector::ReconfigureControllers(
ReconfigureCallback callback_, const Core::Frontend::ControllerParameters& parameters) const {
callback = std::move(callback_);
emit MainWindowReconfigureControllers(parameters);
}
-void QtControllerSelector::MainWindowReconfigureFinished() {
- callback();
+void QtControllerSelector::MainWindowReconfigureFinished(bool is_success) {
+ if (callback) {
+ callback(is_success);
+ }
}
diff --git a/src/yuzu/applets/qt_controller.h b/src/yuzu/applets/qt_controller.h
index 16e99f507..2fdc35857 100644
--- a/src/yuzu/applets/qt_controller.h
+++ b/src/yuzu/applets/qt_controller.h
@@ -156,6 +156,7 @@ public:
explicit QtControllerSelector(GMainWindow& parent);
~QtControllerSelector() override;
+ void Close() const override;
void ReconfigureControllers(
ReconfigureCallback callback_,
const Core::Frontend::ControllerParameters& parameters) const override;
@@ -163,9 +164,10 @@ public:
signals:
void MainWindowReconfigureControllers(
const Core::Frontend::ControllerParameters& parameters) const;
+ void MainWindowRequestExit() const;
private:
- void MainWindowReconfigureFinished();
+ void MainWindowReconfigureFinished(bool is_success);
mutable ReconfigureCallback callback;
};
diff --git a/src/yuzu/applets/qt_controller.ui b/src/yuzu/applets/qt_controller.ui
index f5eccba70..729e921ee 100644
--- a/src/yuzu/applets/qt_controller.ui
+++ b/src/yuzu/applets/qt_controller.ui
@@ -2629,7 +2629,7 @@
<bool>true</bool>
</property>
<property name="standardButtons">
- <set>QDialogButtonBox::Ok</set>
+ <set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
</property>
</widget>
</item>
@@ -2649,5 +2649,11 @@
<receiver>QtControllerSelectorDialog</receiver>
<slot>accept()</slot>
</connection>
+ <connection>
+ <sender>buttonBox</sender>
+ <signal>rejected()</signal>
+ <receiver>QtControllerSelectorDialog</receiver>
+ <slot>reject()</slot>
+ </connection>
</connections>
</ui>
diff --git a/src/yuzu/applets/qt_error.cpp b/src/yuzu/applets/qt_error.cpp
index e0190a979..1dc4f0383 100644
--- a/src/yuzu/applets/qt_error.cpp
+++ b/src/yuzu/applets/qt_error.cpp
@@ -8,12 +8,19 @@
QtErrorDisplay::QtErrorDisplay(GMainWindow& parent) {
connect(this, &QtErrorDisplay::MainWindowDisplayError, &parent,
&GMainWindow::ErrorDisplayDisplayError, Qt::QueuedConnection);
+ connect(this, &QtErrorDisplay::MainWindowRequestExit, &parent,
+ &GMainWindow::ErrorDisplayRequestExit, Qt::QueuedConnection);
connect(&parent, &GMainWindow::ErrorDisplayFinished, this,
&QtErrorDisplay::MainWindowFinishedError, Qt::DirectConnection);
}
QtErrorDisplay::~QtErrorDisplay() = default;
+void QtErrorDisplay::Close() const {
+ callback = {};
+ emit MainWindowRequestExit();
+}
+
void QtErrorDisplay::ShowError(Result error, FinishedCallback finished) const {
callback = std::move(finished);
emit MainWindowDisplayError(
@@ -55,5 +62,7 @@ void QtErrorDisplay::ShowCustomErrorText(Result error, std::string dialog_text,
}
void QtErrorDisplay::MainWindowFinishedError() {
- callback();
+ if (callback) {
+ callback();
+ }
}
diff --git a/src/yuzu/applets/qt_error.h b/src/yuzu/applets/qt_error.h
index e4e174721..957f170ad 100644
--- a/src/yuzu/applets/qt_error.h
+++ b/src/yuzu/applets/qt_error.h
@@ -16,6 +16,7 @@ public:
explicit QtErrorDisplay(GMainWindow& parent);
~QtErrorDisplay() override;
+ void Close() const override;
void ShowError(Result error, FinishedCallback finished) const override;
void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
FinishedCallback finished) const override;
@@ -24,6 +25,7 @@ public:
signals:
void MainWindowDisplayError(QString error_code, QString error_text) const;
+ void MainWindowRequestExit() const;
private:
void MainWindowFinishedError();
diff --git a/src/yuzu/applets/qt_profile_select.cpp b/src/yuzu/applets/qt_profile_select.cpp
index 4145c5299..2448e46b6 100644
--- a/src/yuzu/applets/qt_profile_select.cpp
+++ b/src/yuzu/applets/qt_profile_select.cpp
@@ -46,11 +46,13 @@ QPixmap GetIcon(Common::UUID uuid) {
}
} // Anonymous namespace
-QtProfileSelectionDialog::QtProfileSelectionDialog(Core::HID::HIDCore& hid_core, QWidget* parent)
+QtProfileSelectionDialog::QtProfileSelectionDialog(
+ Core::HID::HIDCore& hid_core, QWidget* parent,
+ const Core::Frontend::ProfileSelectParameters& parameters)
: QDialog(parent), profile_manager(std::make_unique<Service::Account::ProfileManager>()) {
outer_layout = new QVBoxLayout;
- instruction_label = new QLabel(tr("Select a user:"));
+ instruction_label = new QLabel();
scroll_area = new QScrollArea;
@@ -120,7 +122,8 @@ QtProfileSelectionDialog::QtProfileSelectionDialog(Core::HID::HIDCore& hid_core,
item_model->appendRow(item);
setLayout(outer_layout);
- setWindowTitle(tr("Profile Selector"));
+ SetWindowTitle(parameters);
+ SetDialogPurpose(parameters);
resize(550, 400);
}
@@ -154,20 +157,101 @@ void QtProfileSelectionDialog::SelectUser(const QModelIndex& index) {
user_index = index.row();
}
+void QtProfileSelectionDialog::SetWindowTitle(
+ const Core::Frontend::ProfileSelectParameters& parameters) {
+ using Service::AM::Applets::UiMode;
+ switch (parameters.mode) {
+ case UiMode::UserCreator:
+ case UiMode::UserCreatorForStarter:
+ setWindowTitle(tr("Profile Creator"));
+ return;
+ case UiMode::EnsureNetworkServiceAccountAvailable:
+ setWindowTitle(tr("Profile Selector"));
+ return;
+ case UiMode::UserIconEditor:
+ setWindowTitle(tr("Profile Icon Editor"));
+ return;
+ case UiMode::UserNicknameEditor:
+ setWindowTitle(tr("Profile Nickname Editor"));
+ return;
+ case UiMode::NintendoAccountAuthorizationRequestContext:
+ case UiMode::IntroduceExternalNetworkServiceAccount:
+ case UiMode::IntroduceExternalNetworkServiceAccountForRegistration:
+ case UiMode::NintendoAccountNnidLinker:
+ case UiMode::LicenseRequirementsForNetworkService:
+ case UiMode::LicenseRequirementsForNetworkServiceWithUserContextImpl:
+ case UiMode::UserCreatorForImmediateNaLoginTest:
+ case UiMode::UserQualificationPromoter:
+ case UiMode::UserSelector:
+ default:
+ setWindowTitle(tr("Profile Selector"));
+ }
+}
+
+void QtProfileSelectionDialog::SetDialogPurpose(
+ const Core::Frontend::ProfileSelectParameters& parameters) {
+ using Service::AM::Applets::UserSelectionPurpose;
+
+ switch (parameters.purpose) {
+ case UserSelectionPurpose::GameCardRegistration:
+ instruction_label->setText(tr("Who will receive the points?"));
+ return;
+ case UserSelectionPurpose::EShopLaunch:
+ instruction_label->setText(tr("Who is using Nintendo eShop?"));
+ return;
+ case UserSelectionPurpose::EShopItemShow:
+ instruction_label->setText(tr("Who is making this purchase?"));
+ return;
+ case UserSelectionPurpose::PicturePost:
+ instruction_label->setText(tr("Who is posting?"));
+ return;
+ case UserSelectionPurpose::NintendoAccountLinkage:
+ instruction_label->setText(tr("Select a user to link to a Nintendo Account."));
+ return;
+ case UserSelectionPurpose::SettingsUpdate:
+ instruction_label->setText(tr("Change settings for which user?"));
+ return;
+ case UserSelectionPurpose::SaveDataDeletion:
+ instruction_label->setText(tr("Format data for which user?"));
+ return;
+ case UserSelectionPurpose::UserMigration:
+ instruction_label->setText(tr("Which user will be transferred to another console?"));
+ return;
+ case UserSelectionPurpose::SaveDataTransfer:
+ instruction_label->setText(tr("Send save data for which user?"));
+ return;
+ case UserSelectionPurpose::General:
+ default:
+ instruction_label->setText(tr("Select a user:"));
+ return;
+ }
+}
+
QtProfileSelector::QtProfileSelector(GMainWindow& parent) {
connect(this, &QtProfileSelector::MainWindowSelectProfile, &parent,
&GMainWindow::ProfileSelectorSelectProfile, Qt::QueuedConnection);
+ connect(this, &QtProfileSelector::MainWindowRequestExit, &parent,
+ &GMainWindow::ProfileSelectorRequestExit, Qt::QueuedConnection);
connect(&parent, &GMainWindow::ProfileSelectorFinishedSelection, this,
&QtProfileSelector::MainWindowFinishedSelection, Qt::DirectConnection);
}
QtProfileSelector::~QtProfileSelector() = default;
-void QtProfileSelector::SelectProfile(SelectProfileCallback callback_) const {
+void QtProfileSelector::Close() const {
+ callback = {};
+ emit MainWindowRequestExit();
+}
+
+void QtProfileSelector::SelectProfile(
+ SelectProfileCallback callback_,
+ const Core::Frontend::ProfileSelectParameters& parameters) const {
callback = std::move(callback_);
- emit MainWindowSelectProfile();
+ emit MainWindowSelectProfile(parameters);
}
void QtProfileSelector::MainWindowFinishedSelection(std::optional<Common::UUID> uuid) {
- callback(uuid);
+ if (callback) {
+ callback(uuid);
+ }
}
diff --git a/src/yuzu/applets/qt_profile_select.h b/src/yuzu/applets/qt_profile_select.h
index 637a3bda2..99056e274 100644
--- a/src/yuzu/applets/qt_profile_select.h
+++ b/src/yuzu/applets/qt_profile_select.h
@@ -28,7 +28,8 @@ class QtProfileSelectionDialog final : public QDialog {
Q_OBJECT
public:
- explicit QtProfileSelectionDialog(Core::HID::HIDCore& hid_core, QWidget* parent);
+ explicit QtProfileSelectionDialog(Core::HID::HIDCore& hid_core, QWidget* parent,
+ const Core::Frontend::ProfileSelectParameters& parameters);
~QtProfileSelectionDialog() override;
int exec() override;
@@ -40,6 +41,9 @@ public:
private:
void SelectUser(const QModelIndex& index);
+ void SetWindowTitle(const Core::Frontend::ProfileSelectParameters& parameters);
+ void SetDialogPurpose(const Core::Frontend::ProfileSelectParameters& parameters);
+
int user_index = 0;
QVBoxLayout* layout;
@@ -65,10 +69,13 @@ public:
explicit QtProfileSelector(GMainWindow& parent);
~QtProfileSelector() override;
- void SelectProfile(SelectProfileCallback callback_) const override;
+ void Close() const override;
+ void SelectProfile(SelectProfileCallback callback_,
+ const Core::Frontend::ProfileSelectParameters& parameters) const override;
signals:
- void MainWindowSelectProfile() const;
+ void MainWindowSelectProfile(const Core::Frontend::ProfileSelectParameters& parameters) const;
+ void MainWindowRequestExit() const;
private:
void MainWindowFinishedSelection(std::optional<Common::UUID> uuid);
diff --git a/src/yuzu/applets/qt_software_keyboard.h b/src/yuzu/applets/qt_software_keyboard.h
index 30ac8ecf6..ac23ce047 100644
--- a/src/yuzu/applets/qt_software_keyboard.h
+++ b/src/yuzu/applets/qt_software_keyboard.h
@@ -233,6 +233,10 @@ public:
explicit QtSoftwareKeyboard(GMainWindow& parent);
~QtSoftwareKeyboard() override;
+ void Close() const override {
+ ExitKeyboard();
+ }
+
void InitializeKeyboard(bool is_inline,
Core::Frontend::KeyboardInitializeParameters initialize_parameters,
SubmitNormalCallback submit_normal_callback_,
diff --git a/src/yuzu/applets/qt_web_browser.cpp b/src/yuzu/applets/qt_web_browser.cpp
index 0a5912326..28acc0ff8 100644
--- a/src/yuzu/applets/qt_web_browser.cpp
+++ b/src/yuzu/applets/qt_web_browser.cpp
@@ -393,6 +393,8 @@ void QtNXWebEngineView::FocusFirstLinkElement() {
QtWebBrowser::QtWebBrowser(GMainWindow& main_window) {
connect(this, &QtWebBrowser::MainWindowOpenWebPage, &main_window,
&GMainWindow::WebBrowserOpenWebPage, Qt::QueuedConnection);
+ connect(this, &QtWebBrowser::MainWindowRequestExit, &main_window,
+ &GMainWindow::WebBrowserRequestExit, Qt::QueuedConnection);
connect(&main_window, &GMainWindow::WebBrowserExtractOfflineRomFS, this,
&QtWebBrowser::MainWindowExtractOfflineRomFS, Qt::QueuedConnection);
connect(&main_window, &GMainWindow::WebBrowserClosed, this,
@@ -401,6 +403,11 @@ QtWebBrowser::QtWebBrowser(GMainWindow& main_window) {
QtWebBrowser::~QtWebBrowser() = default;
+void QtWebBrowser::Close() const {
+ callback = {};
+ emit MainWindowRequestExit();
+}
+
void QtWebBrowser::OpenLocalWebPage(const std::string& local_url,
ExtractROMFSCallback extract_romfs_callback_,
OpenWebPageCallback callback_) const {
@@ -436,5 +443,7 @@ void QtWebBrowser::MainWindowExtractOfflineRomFS() {
void QtWebBrowser::MainWindowWebBrowserClosed(Service::AM::Applets::WebExitReason exit_reason,
std::string last_url) {
- callback(exit_reason, last_url);
+ if (callback) {
+ callback(exit_reason, last_url);
+ }
}
diff --git a/src/yuzu/applets/qt_web_browser.h b/src/yuzu/applets/qt_web_browser.h
index ceae7926e..1234108ae 100644
--- a/src/yuzu/applets/qt_web_browser.h
+++ b/src/yuzu/applets/qt_web_browser.h
@@ -196,6 +196,7 @@ public:
explicit QtWebBrowser(GMainWindow& parent);
~QtWebBrowser() override;
+ void Close() const override;
void OpenLocalWebPage(const std::string& local_url,
ExtractROMFSCallback extract_romfs_callback_,
OpenWebPageCallback callback_) const override;
@@ -206,6 +207,7 @@ public:
signals:
void MainWindowOpenWebPage(const std::string& main_url, const std::string& additional_args,
bool is_local) const;
+ void MainWindowRequestExit() const;
private:
void MainWindowExtractOfflineRomFS();
diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui
index 6cd79673c..add110bb0 100644
--- a/src/yuzu/configuration/configure_general.ui
+++ b/src/yuzu/configuration/configure_general.ui
@@ -64,7 +64,7 @@
<item>
<widget class="QCheckBox" name="use_extended_memory_layout">
<property name="text">
- <string>Extended memory layout (6GB DRAM)</string>
+ <string>Extended memory layout (8GB DRAM)</string>
</property>
</widget>
</item>
diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp
index 1db374d4a..7fce85bca 100644
--- a/src/yuzu/configuration/configure_input.cpp
+++ b/src/yuzu/configuration/configure_input.cpp
@@ -189,6 +189,8 @@ QList<QWidget*> ConfigureInput::GetSubTabs() const {
}
void ConfigureInput::ApplyConfiguration() {
+ const bool was_global = Settings::values.players.UsingGlobal();
+ Settings::values.players.SetGlobal(true);
for (auto* controller : player_controllers) {
controller->ApplyConfiguration();
}
@@ -201,6 +203,7 @@ void ConfigureInput::ApplyConfiguration() {
Settings::values.vibration_enabled.SetValue(ui->vibrationGroup->isChecked());
Settings::values.motion_enabled.SetValue(ui->motionGroup->isChecked());
+ Settings::values.players.SetGlobal(was_global);
}
void ConfigureInput::changeEvent(QEvent* event) {
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index ae14884b5..b79409a68 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -307,6 +307,8 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
system->Initialize();
Common::Log::Initialize();
+ Common::Log::Start();
+
LoadTranslation();
setAcceptDrops(true);
@@ -449,8 +451,6 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
SetupPrepareForSleep();
- Common::Log::Start();
-
QStringList args = QApplication::arguments();
if (args.size() < 2) {
@@ -576,6 +576,10 @@ void GMainWindow::RegisterMetaTypes() {
// Controller Applet
qRegisterMetaType<Core::Frontend::ControllerParameters>("Core::Frontend::ControllerParameters");
+ // Profile Select Applet
+ qRegisterMetaType<Core::Frontend::ProfileSelectParameters>(
+ "Core::Frontend::ProfileSelectParameters");
+
// Software Keyboard Applet
qRegisterMetaType<Core::Frontend::KeyboardInitializeParameters>(
"Core::Frontend::KeyboardInitializeParameters");
@@ -596,50 +600,81 @@ void GMainWindow::RegisterMetaTypes() {
void GMainWindow::AmiiboSettingsShowDialog(const Core::Frontend::CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) {
- QtAmiiboSettingsDialog dialog(this, parameters, input_subsystem.get(), nfp_device);
+ cabinet_applet =
+ new QtAmiiboSettingsDialog(this, parameters, input_subsystem.get(), nfp_device);
+ SCOPE_EXIT({
+ cabinet_applet->deleteLater();
+ cabinet_applet = nullptr;
+ });
- dialog.setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint |
- Qt::WindowTitleHint | Qt::WindowSystemMenuHint);
- dialog.setWindowModality(Qt::WindowModal);
- if (dialog.exec() == QDialog::Rejected) {
+ cabinet_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint |
+ Qt::WindowTitleHint | Qt::WindowSystemMenuHint);
+ cabinet_applet->setWindowModality(Qt::WindowModal);
+
+ if (cabinet_applet->exec() == QDialog::Rejected) {
emit AmiiboSettingsFinished(false, {});
return;
}
- emit AmiiboSettingsFinished(true, dialog.GetName());
+ emit AmiiboSettingsFinished(true, cabinet_applet->GetName());
+}
+
+void GMainWindow::AmiiboSettingsRequestExit() {
+ if (cabinet_applet) {
+ cabinet_applet->reject();
+ }
}
void GMainWindow::ControllerSelectorReconfigureControllers(
const Core::Frontend::ControllerParameters& parameters) {
- QtControllerSelectorDialog dialog(this, parameters, input_subsystem.get(), *system);
-
- dialog.setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint |
- Qt::WindowTitleHint | Qt::WindowSystemMenuHint);
- dialog.setWindowModality(Qt::WindowModal);
- dialog.exec();
+ controller_applet =
+ new QtControllerSelectorDialog(this, parameters, input_subsystem.get(), *system);
+ SCOPE_EXIT({
+ controller_applet->deleteLater();
+ controller_applet = nullptr;
+ });
- emit ControllerSelectorReconfigureFinished();
+ controller_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint |
+ Qt::WindowStaysOnTopHint | Qt::WindowTitleHint |
+ Qt::WindowSystemMenuHint);
+ controller_applet->setWindowModality(Qt::WindowModal);
+ bool is_success = controller_applet->exec() != QDialog::Rejected;
// Don't forget to apply settings.
+ system->HIDCore().DisableAllControllerConfiguration();
system->ApplySettings();
config->Save();
UpdateStatusButtons();
+
+ emit ControllerSelectorReconfigureFinished(is_success);
}
-void GMainWindow::ProfileSelectorSelectProfile() {
- QtProfileSelectionDialog dialog(system->HIDCore(), this);
- dialog.setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint |
- Qt::WindowTitleHint | Qt::WindowSystemMenuHint |
- Qt::WindowCloseButtonHint);
- dialog.setWindowModality(Qt::WindowModal);
- if (dialog.exec() == QDialog::Rejected) {
+void GMainWindow::ControllerSelectorRequestExit() {
+ if (controller_applet) {
+ controller_applet->reject();
+ }
+}
+
+void GMainWindow::ProfileSelectorSelectProfile(
+ const Core::Frontend::ProfileSelectParameters& parameters) {
+ profile_select_applet = new QtProfileSelectionDialog(system->HIDCore(), this, parameters);
+ SCOPE_EXIT({
+ profile_select_applet->deleteLater();
+ profile_select_applet = nullptr;
+ });
+
+ profile_select_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint |
+ Qt::WindowStaysOnTopHint | Qt::WindowTitleHint |
+ Qt::WindowSystemMenuHint | Qt::WindowCloseButtonHint);
+ profile_select_applet->setWindowModality(Qt::WindowModal);
+ if (profile_select_applet->exec() == QDialog::Rejected) {
emit ProfileSelectorFinishedSelection(std::nullopt);
return;
}
const Service::Account::ProfileManager manager;
- const auto uuid = manager.GetUser(static_cast<std::size_t>(dialog.GetIndex()));
+ const auto uuid = manager.GetUser(static_cast<std::size_t>(profile_select_applet->GetIndex()));
if (!uuid.has_value()) {
emit ProfileSelectorFinishedSelection(std::nullopt);
return;
@@ -648,6 +683,12 @@ void GMainWindow::ProfileSelectorSelectProfile() {
emit ProfileSelectorFinishedSelection(uuid);
}
+void GMainWindow::ProfileSelectorRequestExit() {
+ if (profile_select_applet) {
+ profile_select_applet->reject();
+ }
+}
+
void GMainWindow::SoftwareKeyboardInitialize(
bool is_inline, Core::Frontend::KeyboardInitializeParameters initialize_parameters) {
if (software_keyboard) {
@@ -772,7 +813,7 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
return;
}
- QtNXWebEngineView web_browser_view(this, *system, input_subsystem.get());
+ web_applet = new QtNXWebEngineView(this, *system, input_subsystem.get());
ui->action_Pause->setEnabled(false);
ui->action_Restart->setEnabled(false);
@@ -799,9 +840,9 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
loading_progress.setValue(1);
if (is_local) {
- web_browser_view.LoadLocalWebPage(main_url, additional_args);
+ web_applet->LoadLocalWebPage(main_url, additional_args);
} else {
- web_browser_view.LoadExternalWebPage(main_url, additional_args);
+ web_applet->LoadExternalWebPage(main_url, additional_args);
}
if (render_window->IsLoadingComplete()) {
@@ -810,15 +851,15 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
const auto& layout = render_window->GetFramebufferLayout();
const auto scale_ratio = devicePixelRatioF();
- web_browser_view.resize(layout.screen.GetWidth() / scale_ratio,
- layout.screen.GetHeight() / scale_ratio);
- web_browser_view.move(layout.screen.left / scale_ratio,
- (layout.screen.top / scale_ratio) + menuBar()->height());
- web_browser_view.setZoomFactor(static_cast<qreal>(layout.screen.GetWidth() / scale_ratio) /
- static_cast<qreal>(Layout::ScreenUndocked::Width));
+ web_applet->resize(layout.screen.GetWidth() / scale_ratio,
+ layout.screen.GetHeight() / scale_ratio);
+ web_applet->move(layout.screen.left / scale_ratio,
+ (layout.screen.top / scale_ratio) + menuBar()->height());
+ web_applet->setZoomFactor(static_cast<qreal>(layout.screen.GetWidth() / scale_ratio) /
+ static_cast<qreal>(Layout::ScreenUndocked::Width));
- web_browser_view.setFocus();
- web_browser_view.show();
+ web_applet->setFocus();
+ web_applet->show();
loading_progress.setValue(2);
@@ -831,7 +872,7 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
// TODO (Morph): Remove this
QAction* exit_action = new QAction(tr("Disable Web Applet"), this);
- connect(exit_action, &QAction::triggered, this, [this, &web_browser_view] {
+ connect(exit_action, &QAction::triggered, this, [this] {
const auto result = QMessageBox::warning(
this, tr("Disable Web Applet"),
tr("Disabling the web applet can lead to undefined behavior and should only be used "
@@ -840,21 +881,21 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
QMessageBox::Yes | QMessageBox::No);
if (result == QMessageBox::Yes) {
UISettings::values.disable_web_applet = true;
- web_browser_view.SetFinished(true);
+ web_applet->SetFinished(true);
}
});
ui->menubar->addAction(exit_action);
- while (!web_browser_view.IsFinished()) {
+ while (!web_applet->IsFinished()) {
QCoreApplication::processEvents();
if (!exit_check) {
- web_browser_view.page()->runJavaScript(
+ web_applet->page()->runJavaScript(
QStringLiteral("end_applet;"), [&](const QVariant& variant) {
exit_check = false;
if (variant.toBool()) {
- web_browser_view.SetFinished(true);
- web_browser_view.SetExitReason(
+ web_applet->SetFinished(true);
+ web_applet->SetExitReason(
Service::AM::Applets::WebExitReason::EndButtonPressed);
}
});
@@ -862,22 +903,22 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
exit_check = true;
}
- if (web_browser_view.GetCurrentURL().contains(QStringLiteral("localhost"))) {
- if (!web_browser_view.IsFinished()) {
- web_browser_view.SetFinished(true);
- web_browser_view.SetExitReason(Service::AM::Applets::WebExitReason::CallbackURL);
+ if (web_applet->GetCurrentURL().contains(QStringLiteral("localhost"))) {
+ if (!web_applet->IsFinished()) {
+ web_applet->SetFinished(true);
+ web_applet->SetExitReason(Service::AM::Applets::WebExitReason::CallbackURL);
}
- web_browser_view.SetLastURL(web_browser_view.GetCurrentURL().toStdString());
+ web_applet->SetLastURL(web_applet->GetCurrentURL().toStdString());
}
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
- const auto exit_reason = web_browser_view.GetExitReason();
- const auto last_url = web_browser_view.GetLastURL();
+ const auto exit_reason = web_applet->GetExitReason();
+ const auto last_url = web_applet->GetLastURL();
- web_browser_view.hide();
+ web_applet->hide();
render_window->setFocus();
@@ -903,6 +944,15 @@ void GMainWindow::WebBrowserOpenWebPage(const std::string& main_url,
#endif
}
+void GMainWindow::WebBrowserRequestExit() {
+#ifdef YUZU_USE_QT_WEB_ENGINE
+ if (web_applet) {
+ web_applet->SetExitReason(Service::AM::Applets::WebExitReason::ExitRequested);
+ web_applet->SetFinished(true);
+ }
+#endif
+}
+
void GMainWindow::InitializeWidgets() {
#ifdef YUZU_ENABLE_COMPATIBILITY_REPORTING
ui->action_Report_Compatibility->setVisible(true);
@@ -1675,8 +1725,9 @@ bool GMainWindow::LoadROM(const QString& filename, u64 program_id, std::size_t p
return true;
}
-bool GMainWindow::SelectAndSetCurrentUser() {
- QtProfileSelectionDialog dialog(system->HIDCore(), this);
+bool GMainWindow::SelectAndSetCurrentUser(
+ const Core::Frontend::ProfileSelectParameters& parameters) {
+ QtProfileSelectionDialog dialog(system->HIDCore(), this, parameters);
dialog.setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowTitleHint |
Qt::WindowSystemMenuHint | Qt::WindowCloseButtonHint);
dialog.setWindowModality(Qt::WindowModal);
@@ -1722,7 +1773,13 @@ void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t
Settings::LogSettings();
if (UISettings::values.select_user_on_boot) {
- if (SelectAndSetCurrentUser() == false) {
+ const Core::Frontend::ProfileSelectParameters parameters{
+ .mode = Service::AM::Applets::UiMode::UserSelector,
+ .invalid_uid_list = {},
+ .display_options = {},
+ .purpose = Service::AM::Applets::UserSelectionPurpose::General,
+ };
+ if (SelectAndSetCurrentUser(parameters) == false) {
return;
}
}
@@ -2014,7 +2071,13 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
if (has_user_save) {
// User save data
const auto select_profile = [this] {
- QtProfileSelectionDialog dialog(system->HIDCore(), this);
+ const Core::Frontend::ProfileSelectParameters parameters{
+ .mode = Service::AM::Applets::UiMode::UserSelector,
+ .invalid_uid_list = {},
+ .display_options = {},
+ .purpose = Service::AM::Applets::UserSelectionPurpose::General,
+ };
+ QtProfileSelectionDialog dialog(system->HIDCore(), this, parameters);
dialog.setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowTitleHint |
Qt::WindowSystemMenuHint | Qt::WindowCloseButtonHint);
dialog.setWindowModality(Qt::WindowModal);
@@ -3089,13 +3152,23 @@ void GMainWindow::OnSaveConfig() {
}
void GMainWindow::ErrorDisplayDisplayError(QString error_code, QString error_text) {
- OverlayDialog dialog(render_window, *system, error_code, error_text, QString{}, tr("OK"),
- Qt::AlignLeft | Qt::AlignVCenter);
- dialog.exec();
+ error_applet = new OverlayDialog(render_window, *system, error_code, error_text, QString{},
+ tr("OK"), Qt::AlignLeft | Qt::AlignVCenter);
+ SCOPE_EXIT({
+ error_applet->deleteLater();
+ error_applet = nullptr;
+ });
+ error_applet->exec();
emit ErrorDisplayFinished();
}
+void GMainWindow::ErrorDisplayRequestExit() {
+ if (error_applet) {
+ error_applet->reject();
+ }
+}
+
void GMainWindow::OnMenuReportCompatibility() {
#if defined(ARCHITECTURE_x86_64) && !defined(__APPLE__)
const auto& caps = Common::GetCPUCaps();
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index a23b373a5..8b5c1d747 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -47,7 +47,11 @@ enum class DumpRomFSTarget;
enum class InstalledEntryType;
class GameListPlaceholder;
+class QtAmiiboSettingsDialog;
+class QtControllerSelectorDialog;
+class QtProfileSelectionDialog;
class QtSoftwareKeyboardDialog;
+class QtNXWebEngineView;
enum class StartGameType {
Normal, // Can use custom configuration
@@ -65,6 +69,7 @@ struct ControllerParameters;
struct InlineAppearParameters;
struct InlineTextParameters;
struct KeyboardInitializeParameters;
+struct ProfileSelectParameters;
} // namespace Core::Frontend
namespace DiscordRPC {
@@ -161,7 +166,7 @@ signals:
void AmiiboSettingsFinished(bool is_success, const std::string& name);
- void ControllerSelectorReconfigureFinished();
+ void ControllerSelectorReconfigureFinished(bool is_success);
void ErrorDisplayFinished();
@@ -184,8 +189,10 @@ public slots:
void OnSaveConfig();
void AmiiboSettingsShowDialog(const Core::Frontend::CabinetParameters& parameters,
std::shared_ptr<Service::NFP::NfpDevice> nfp_device);
+ void AmiiboSettingsRequestExit();
void ControllerSelectorReconfigureControllers(
const Core::Frontend::ControllerParameters& parameters);
+ void ControllerSelectorRequestExit();
void SoftwareKeyboardInitialize(
bool is_inline, Core::Frontend::KeyboardInitializeParameters initialize_parameters);
void SoftwareKeyboardShowNormal();
@@ -196,9 +203,12 @@ public slots:
void SoftwareKeyboardInlineTextChanged(Core::Frontend::InlineTextParameters text_parameters);
void SoftwareKeyboardExit();
void ErrorDisplayDisplayError(QString error_code, QString error_text);
- void ProfileSelectorSelectProfile();
+ void ErrorDisplayRequestExit();
+ void ProfileSelectorSelectProfile(const Core::Frontend::ProfileSelectParameters& parameters);
+ void ProfileSelectorRequestExit();
void WebBrowserOpenWebPage(const std::string& main_url, const std::string& additional_args,
bool is_local);
+ void WebBrowserRequestExit();
void OnAppFocusStateChanged(Qt::ApplicationState state);
void OnTasStateChanged();
@@ -233,7 +243,7 @@ private:
void SetDiscordEnabled(bool state);
void LoadAmiibo(const QString& filename);
- bool SelectAndSetCurrentUser();
+ bool SelectAndSetCurrentUser(const Core::Frontend::ProfileSelectParameters& parameters);
/**
* Stores the filename in the recently loaded files list.
@@ -466,7 +476,12 @@ private:
QString last_filename_booted;
// Applets
+ QtAmiiboSettingsDialog* cabinet_applet = nullptr;
+ QtControllerSelectorDialog* controller_applet = nullptr;
+ QtProfileSelectionDialog* profile_select_applet = nullptr;
+ QDialog* error_applet = nullptr;
QtSoftwareKeyboardDialog* software_keyboard = nullptr;
+ QtNXWebEngineView* web_applet = nullptr;
// True if amiibo file select is visible
bool is_amiibo_file_select_active{};
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index 20e403400..209cfc28a 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -163,7 +163,7 @@ keyboard_enabled =
# 0: Disabled, 1 (default): Enabled
use_multi_core =
-# Enable extended guest system memory layout (6GB DRAM)
+# Enable extended guest system memory layout (8GB DRAM)
# 0 (default): Disabled, 1: Enabled
use_extended_memory_layout =