summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.git-blame-ignore-revs5
-rw-r--r--.github/workflows/android-build.yml2
-rw-r--r--src/CMakeLists.txt6
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt1
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt3
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt7
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt30
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt61
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt14
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt14
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt8
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt29
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt7
-rw-r--r--src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt7
-rw-r--r--src/android/app/src/main/jni/emu_window/emu_window.cpp8
-rw-r--r--src/android/app/src/main/jni/emu_window/emu_window.h4
-rw-r--r--src/android/app/src/main/jni/native.cpp11
-rw-r--r--src/android/app/src/main/jni/native.h6
-rw-r--r--src/android/app/src/main/res/drawable/ic_audio.xml9
-rw-r--r--src/android/app/src/main/res/drawable/ic_code.xml9
-rw-r--r--src/android/app/src/main/res/drawable/ic_graphics.xml9
-rw-r--r--src/android/app/src/main/res/drawable/ic_system_settings.xml9
-rw-r--r--src/android/app/src/main/res/layout-w600dp/fragment_about.xml233
-rw-r--r--src/android/app/src/main/res/layout/card_home_option.xml4
-rw-r--r--src/android/app/src/main/res/layout/fragment_about.xml10
-rw-r--r--src/android/app/src/main/res/layout/fragment_emulation.xml8
-rw-r--r--src/android/app/src/main/res/layout/fragment_home_settings.xml9
-rw-r--r--src/android/app/src/main/res/layout/list_item_setting.xml72
-rw-r--r--src/android/app/src/main/res/layout/list_item_setting_switch.xml8
-rw-r--r--src/android/app/src/main/res/layout/list_item_settings_header.xml3
-rw-r--r--src/android/app/src/main/res/values/arrays.xml2
-rw-r--r--src/android/app/src/main/res/values/strings.xml5
-rw-r--r--src/audio_core/adsp/apps/opus/opus_decode_object.cpp214
-rw-r--r--src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp222
-rw-r--r--src/audio_core/opus/decoder.cpp358
-rw-r--r--src/audio_core/opus/decoder.h106
-rw-r--r--src/audio_core/opus/decoder_manager.cpp204
-rw-r--r--src/audio_core/opus/decoder_manager.h76
-rw-r--r--src/audio_core/opus/hardware_opus.cpp482
-rw-r--r--src/audio_core/opus/hardware_opus.h90
-rw-r--r--src/common/page_table.cpp30
-rw-r--r--src/common/page_table.h17
-rw-r--r--src/core/CMakeLists.txt6
-rw-r--r--src/core/debugger/gdbstub.cpp233
-rw-r--r--src/core/hid/emulated_controller.cpp37
-rw-r--r--src/core/hid/emulated_controller.h3
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp13
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h7
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp39
-rw-r--r--src/core/hle/kernel/k_capabilities.h17
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/k_device_address_space.h10
-rw-r--r--src/core/hle/kernel/k_memory_layout.h8
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp12
-rw-r--r--src/core/hle/kernel/k_page_table.cpp3519
-rw-r--r--src/core/hle/kernel/k_page_table.h542
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp5716
-rw-r--r--src/core/hle/kernel/k_page_table_base.h759
-rw-r--r--src/core/hle/kernel/k_process.cpp18
-rw-r--r--src/core/hle/kernel/k_process.h14
-rw-r--r--src/core/hle/kernel/k_process_page_table.h480
-rw-r--r--src/core/hle/kernel/k_server_session.cpp2
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp2
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/process_capability.cpp389
-rw-r--r--src/core/hle/kernel/process_capability.h266
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp9
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp3
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp8
-rw-r--r--src/core/hle/result.h31
-rw-r--r--src/core/hle/service/acc/acc.cpp56
-rw-r--r--src/core/hle/service/am/applets/applets.h24
-rw-r--r--src/core/hle/service/hid/irs.cpp2
-rw-r--r--src/core/hle/service/hid/irsensor/clustering_processor.cpp16
-rw-r--r--src/core/hle/service/hid/irsensor/clustering_processor.h9
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.cpp2
-rw-r--r--src/core/hle/service/hid/irsensor/moment_processor.cpp123
-rw-r--r--src/core/hle/service/hid/irsensor/moment_processor.h34
-rw-r--r--src/core/hle/service/ldr/ldr.cpp45
-rw-r--r--src/core/hle/service/nvdrv/devices/ioctl_serialization.h159
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp82
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h20
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp42
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp115
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h29
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp117
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h35
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp15
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp87
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp7
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp13
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp47
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h12
-rw-r--r--src/core/hle/service/nvnflinger/buffer_transform_flags.h2
-rw-r--r--src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp27
-rw-r--r--src/core/memory.cpp6
-rw-r--r--src/video_core/renderer_null/null_rasterizer.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp135
-rw-r--r--src/video_core/renderer_vulkan/vk_fsr.cpp24
-rw-r--r--src/video_core/renderer_vulkan/vk_fsr.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp2
-rw-r--r--src/yuzu/configuration/config.h2
-rw-r--r--src/yuzu/configuration/configure_camera.h2
-rw-r--r--src/yuzu/configuration/configure_input.cpp2
-rw-r--r--src/yuzu/configuration/configure_input.h2
-rw-r--r--src/yuzu/configuration/configure_input_advanced.cpp8
-rw-r--r--src/yuzu/configuration/configure_input_advanced.h8
-rw-r--r--src/yuzu/configuration/configure_input_player.h2
-rw-r--r--src/yuzu/configuration/configure_per_game.h2
-rw-r--r--src/yuzu/configuration/configure_profile_manager.cpp6
-rw-r--r--src/yuzu/configuration/configure_ringcon.h2
-rw-r--r--src/yuzu/configuration/configure_tas.h2
-rw-r--r--src/yuzu/configuration/configure_touchscreen_advanced.h2
-rw-r--r--src/yuzu/main.cpp18
-rw-r--r--src/yuzu/vk_device_info.cpp1
119 files changed, 9456 insertions, 6502 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 000000000..109f2f45e
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,5 @@
+# SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# CRLF -> LF
+90aa937593e53a5d5e070fb623b228578b0b225f
diff --git a/.github/workflows/android-build.yml b/.github/workflows/android-build.yml
index 5893f860e..80aea4aa7 100644
--- a/.github/workflows/android-build.yml
+++ b/.github/workflows/android-build.yml
@@ -40,11 +40,11 @@ jobs:
sudo apt-get install -y ccache apksigner glslang-dev glslang-tools
- name: Build
run: ./.ci/scripts/android/build.sh
- - name: Copy and sign artifacts
env:
ANDROID_KEYSTORE_B64: ${{ secrets.ANDROID_KEYSTORE_B64 }}
ANDROID_KEY_ALIAS: ${{ secrets.ANDROID_KEY_ALIAS }}
ANDROID_KEYSTORE_PASS: ${{ secrets.ANDROID_KEYSTORE_PASS }}
+ - name: Copy artifacts
run: ./.ci/scripts/android/upload.sh
- name: Upload
uses: actions/upload-artifact@v3
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index d7f68618c..d2ca4904a 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -21,7 +21,7 @@ if (MSVC)
# Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors.
add_definitions(-DWIN32_LEAN_AND_MEAN)
- # Ensure that projects build with Unicode support.
+ # Ensure that projects are built with Unicode support.
add_definitions(-DUNICODE -D_UNICODE)
# /W4 - Level 4 warnings
@@ -54,11 +54,11 @@ if (MSVC)
/GT
# Modules
- /experimental:module- # Disable module support explicitly due to conflicts with precompiled headers
+ /experimental:module- # Explicitly disable module support due to conflicts with precompiled headers.
# External headers diagnostics
/external:anglebrackets # Treats all headers included by #include <header>, where the header file is enclosed in angle brackets (< >), as external headers
- /external:W0 # Sets the default warning level to 0 for external headers, effectively turning off warnings for external headers
+ /external:W0 # Sets the default warning level to 0 for external headers, effectively disabling warnings for them.
# Warnings
/W4
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt
index 08e2a973d..2bf0e1b0d 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt
@@ -82,7 +82,6 @@ object Settings {
enum class MenuTag(val titleId: Int) {
SECTION_ROOT(R.string.advanced_settings),
- SECTION_GENERAL(R.string.preferences_general),
SECTION_SYSTEM(R.string.preferences_system),
SECTION_RENDERER(R.string.preferences_graphics),
SECTION_AUDIO(R.string.preferences_audio),
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt
index 522cc49df..425160024 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt
@@ -3,10 +3,13 @@
package org.yuzu.yuzu_emu.features.settings.model.view
+import androidx.annotation.DrawableRes
+
class RunnableSetting(
titleId: Int,
descriptionId: Int,
val isRuntimeRunnable: Boolean,
+ @DrawableRes val iconId: Int = 0,
val runnable: () -> Unit
) : SettingsItem(emptySetting, titleId, descriptionId) {
override val type = TYPE_RUNNABLE
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt
index b343e527e..94953b18a 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt
@@ -3,11 +3,14 @@
package org.yuzu.yuzu_emu.features.settings.model.view
+import androidx.annotation.DrawableRes
+import androidx.annotation.StringRes
import org.yuzu.yuzu_emu.features.settings.model.Settings
class SubmenuSetting(
- titleId: Int,
- descriptionId: Int,
+ @StringRes titleId: Int,
+ @StringRes descriptionId: Int,
+ @DrawableRes val iconId: Int,
val menuKey: Settings.MenuTag
) : SettingsItem(emptySetting, titleId, descriptionId) {
override val type = TYPE_SUBMENU
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt
index 70d8ec14b..769baf744 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt
@@ -20,7 +20,6 @@ import androidx.lifecycle.repeatOnLifecycle
import androidx.navigation.findNavController
import androidx.navigation.fragment.navArgs
import androidx.recyclerview.widget.LinearLayoutManager
-import com.google.android.material.divider.MaterialDividerItemDecoration
import com.google.android.material.transition.MaterialSharedAxis
import kotlinx.coroutines.flow.collectLatest
import kotlinx.coroutines.launch
@@ -68,15 +67,9 @@ class SettingsFragment : Fragment() {
)
binding.toolbarSettingsLayout.title = getString(args.menuTag.titleId)
- val dividerDecoration = MaterialDividerItemDecoration(
- requireContext(),
- LinearLayoutManager.VERTICAL
- )
- dividerDecoration.isLastItemDecorated = false
binding.listSettings.apply {
adapter = settingsAdapter
layoutManager = LinearLayoutManager(requireContext())
- addItemDecoration(dividerDecoration)
}
binding.toolbarSettings.setNavigationOnClickListener {
@@ -94,17 +87,6 @@ class SettingsFragment : Fragment() {
}
}
}
- launch {
- settingsViewModel.isUsingSearch.collectLatest {
- if (it) {
- reenterTransition = MaterialSharedAxis(MaterialSharedAxis.Z, true)
- exitTransition = MaterialSharedAxis(MaterialSharedAxis.Z, false)
- } else {
- reenterTransition = MaterialSharedAxis(MaterialSharedAxis.X, false)
- exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
- }
- }
- }
}
if (args.menuTag == Settings.MenuTag.SECTION_ROOT) {
@@ -112,8 +94,6 @@ class SettingsFragment : Fragment() {
binding.toolbarSettings.setOnMenuItemClickListener {
when (it.itemId) {
R.id.action_search -> {
- reenterTransition = MaterialSharedAxis(MaterialSharedAxis.Z, true)
- exitTransition = MaterialSharedAxis(MaterialSharedAxis.Z, false)
view.findNavController()
.navigate(R.id.action_settingsFragment_to_settingsSearchFragment)
true
@@ -129,11 +109,6 @@ class SettingsFragment : Fragment() {
setInsets()
}
- override fun onResume() {
- super.onResume()
- settingsViewModel.setIsUsingSearch(false)
- }
-
private fun setInsets() {
ViewCompat.setOnApplyWindowInsetsListener(
binding.root
@@ -144,10 +119,9 @@ class SettingsFragment : Fragment() {
val leftInsets = barInsets.left + cutoutInsets.left
val rightInsets = barInsets.right + cutoutInsets.right
- val sideMargin = resources.getDimensionPixelSize(R.dimen.spacing_medlarge)
val mlpSettingsList = binding.listSettings.layoutParams as MarginLayoutParams
- mlpSettingsList.leftMargin = sideMargin + leftInsets
- mlpSettingsList.rightMargin = sideMargin + rightInsets
+ mlpSettingsList.leftMargin = leftInsets
+ mlpSettingsList.rightMargin = rightInsets
binding.listSettings.layoutParams = mlpSettingsList
binding.listSettings.updatePadding(
bottom = barInsets.bottom
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt
index 766414a6c..8b71e32f3 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt
@@ -3,7 +3,6 @@
package org.yuzu.yuzu_emu.features.settings.ui
-import android.content.Context
import android.content.SharedPreferences
import android.os.Build
import android.widget.Toast
@@ -32,8 +31,6 @@ class SettingsFragmentPresenter(
private val preferences: SharedPreferences
get() = PreferenceManager.getDefaultSharedPreferences(YuzuApplication.appContext)
- private val context: Context get() = YuzuApplication.appContext
-
// Extension for populating settings list based on paired settings
fun ArrayList<SettingsItem>.add(key: String) {
val item = SettingsItem.settingsItems[key]!!
@@ -53,7 +50,6 @@ class SettingsFragmentPresenter(
val sl = ArrayList<SettingsItem>()
when (menuTag) {
Settings.MenuTag.SECTION_ROOT -> addConfigSettings(sl)
- Settings.MenuTag.SECTION_GENERAL -> addGeneralSettings(sl)
Settings.MenuTag.SECTION_SYSTEM -> addSystemSettings(sl)
Settings.MenuTag.SECTION_RENDERER -> addGraphicsSettings(sl)
Settings.MenuTag.SECTION_AUDIO -> addAudioSettings(sl)
@@ -75,30 +71,53 @@ class SettingsFragmentPresenter(
private fun addConfigSettings(sl: ArrayList<SettingsItem>) {
sl.apply {
- add(SubmenuSetting(R.string.preferences_general, 0, Settings.MenuTag.SECTION_GENERAL))
- add(SubmenuSetting(R.string.preferences_system, 0, Settings.MenuTag.SECTION_SYSTEM))
- add(SubmenuSetting(R.string.preferences_graphics, 0, Settings.MenuTag.SECTION_RENDERER))
- add(SubmenuSetting(R.string.preferences_audio, 0, Settings.MenuTag.SECTION_AUDIO))
- add(SubmenuSetting(R.string.preferences_debug, 0, Settings.MenuTag.SECTION_DEBUG))
add(
- RunnableSetting(R.string.reset_to_default, 0, false) {
- settingsViewModel.setShouldShowResetSettingsDialog(true)
- }
+ SubmenuSetting(
+ R.string.preferences_system,
+ R.string.preferences_system_description,
+ R.drawable.ic_system_settings,
+ Settings.MenuTag.SECTION_SYSTEM
+ )
+ )
+ add(
+ SubmenuSetting(
+ R.string.preferences_graphics,
+ R.string.preferences_graphics_description,
+ R.drawable.ic_graphics,
+ Settings.MenuTag.SECTION_RENDERER
+ )
+ )
+ add(
+ SubmenuSetting(
+ R.string.preferences_audio,
+ R.string.preferences_audio_description,
+ R.drawable.ic_audio,
+ Settings.MenuTag.SECTION_AUDIO
+ )
+ )
+ add(
+ SubmenuSetting(
+ R.string.preferences_debug,
+ R.string.preferences_debug_description,
+ R.drawable.ic_code,
+ Settings.MenuTag.SECTION_DEBUG
+ )
+ )
+ add(
+ RunnableSetting(
+ R.string.reset_to_default,
+ R.string.reset_to_default_description,
+ false,
+ R.drawable.ic_restore
+ ) { settingsViewModel.setShouldShowResetSettingsDialog(true) }
)
}
}
- private fun addGeneralSettings(sl: ArrayList<SettingsItem>) {
+ private fun addSystemSettings(sl: ArrayList<SettingsItem>) {
sl.apply {
add(BooleanSetting.RENDERER_USE_SPEED_LIMIT.key)
add(ShortSetting.RENDERER_SPEED_LIMIT.key)
- add(IntSetting.CPU_ACCURACY.key)
- add(BooleanSetting.PICTURE_IN_PICTURE.key)
- }
- }
-
- private fun addSystemSettings(sl: ArrayList<SettingsItem>) {
- sl.apply {
add(BooleanSetting.USE_DOCKED_MODE.key)
add(IntSetting.REGION_INDEX.key)
add(IntSetting.LANGUAGE_INDEX.key)
@@ -116,6 +135,7 @@ class SettingsFragmentPresenter(
add(IntSetting.RENDERER_ANTI_ALIASING.key)
add(IntSetting.RENDERER_SCREEN_LAYOUT.key)
add(IntSetting.RENDERER_ASPECT_RATIO.key)
+ add(BooleanSetting.PICTURE_IN_PICTURE.key)
add(BooleanSetting.RENDERER_USE_DISK_SHADER_CACHE.key)
add(BooleanSetting.RENDERER_FORCE_MAX_CLOCK.key)
add(BooleanSetting.RENDERER_ASYNCHRONOUS_SHADERS.key)
@@ -249,6 +269,7 @@ class SettingsFragmentPresenter(
add(BooleanSetting.RENDERER_DEBUG.key)
add(HeaderSetting(R.string.cpu))
+ add(IntSetting.CPU_ACCURACY.key)
add(BooleanSetting.CPU_DEBUG_MODE.key)
add(SettingsItem.FASTMEM_COMBINED)
}
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt
index 83a2e94f1..036195624 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt
@@ -4,6 +4,7 @@
package org.yuzu.yuzu_emu.features.settings.ui.viewholder
import android.view.View
+import androidx.core.content.res.ResourcesCompat
import org.yuzu.yuzu_emu.NativeLibrary
import org.yuzu.yuzu_emu.databinding.ListItemSettingBinding
import org.yuzu.yuzu_emu.features.settings.model.view.RunnableSetting
@@ -16,6 +17,19 @@ class RunnableViewHolder(val binding: ListItemSettingBinding, adapter: SettingsA
override fun bind(item: SettingsItem) {
setting = item as RunnableSetting
+ if (item.iconId != 0) {
+ binding.icon.visibility = View.VISIBLE
+ binding.icon.setImageDrawable(
+ ResourcesCompat.getDrawable(
+ binding.icon.resources,
+ item.iconId,
+ binding.icon.context.theme
+ )
+ )
+ } else {
+ binding.icon.visibility = View.GONE
+ }
+
binding.textSettingName.setText(item.nameId)
if (item.descriptionId != 0) {
binding.textSettingDescription.setText(item.descriptionId)
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt
index 1cf581a9d..8100c65dd 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt
@@ -4,6 +4,7 @@
package org.yuzu.yuzu_emu.features.settings.ui.viewholder
import android.view.View
+import androidx.core.content.res.ResourcesCompat
import org.yuzu.yuzu_emu.databinding.ListItemSettingBinding
import org.yuzu.yuzu_emu.features.settings.model.view.SettingsItem
import org.yuzu.yuzu_emu.features.settings.model.view.SubmenuSetting
@@ -15,6 +16,19 @@ class SubmenuViewHolder(val binding: ListItemSettingBinding, adapter: SettingsAd
override fun bind(item: SettingsItem) {
this.item = item as SubmenuSetting
+ if (item.iconId != 0) {
+ binding.icon.visibility = View.VISIBLE
+ binding.icon.setImageDrawable(
+ ResourcesCompat.getDrawable(
+ binding.icon.resources,
+ item.iconId,
+ binding.icon.context.theme
+ )
+ )
+ } else {
+ binding.icon.visibility = View.GONE
+ }
+
binding.textSettingName.setText(item.nameId)
if (item.descriptionId != 0) {
binding.textSettingDescription.setText(item.descriptionId)
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt
index 2ff827c6b..a1620fbb7 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt
@@ -114,10 +114,10 @@ class AboutFragment : Fragment() {
val leftInsets = barInsets.left + cutoutInsets.left
val rightInsets = barInsets.right + cutoutInsets.right
- val mlpAppBar = binding.appbarAbout.layoutParams as MarginLayoutParams
- mlpAppBar.leftMargin = leftInsets
- mlpAppBar.rightMargin = rightInsets
- binding.appbarAbout.layoutParams = mlpAppBar
+ val mlpToolbar = binding.toolbarAbout.layoutParams as MarginLayoutParams
+ mlpToolbar.leftMargin = leftInsets
+ mlpToolbar.rightMargin = rightInsets
+ binding.toolbarAbout.layoutParams = mlpToolbar
val mlpScrollAbout = binding.scrollAbout.layoutParams as MarginLayoutParams
mlpScrollAbout.leftMargin = leftInsets
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt
index c456c0592..c32fa0d7e 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt
@@ -10,7 +10,6 @@ import android.content.DialogInterface
import android.content.SharedPreferences
import android.content.pm.ActivityInfo
import android.content.res.Configuration
-import android.graphics.Color
import android.net.Uri
import android.os.Bundle
import android.os.Handler
@@ -155,7 +154,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
}
binding.surfaceEmulation.holder.addCallback(this)
- binding.showFpsText.setTextColor(Color.YELLOW)
binding.doneControlConfig.setOnClickListener { stopConfiguringControls() }
binding.drawerLayout.addDrawerListener(object : DrawerListener {
@@ -414,12 +412,12 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
val FRAMETIME = 2
val SPEED = 3
perfStatsUpdater = {
- if (emulationViewModel.emulationStarted.value == true) {
+ if (emulationViewModel.emulationStarted.value) {
val perfStats = NativeLibrary.getPerfStats()
- if (perfStats[FPS] > 0 && _binding != null) {
+ if (_binding != null) {
binding.showFpsText.text = String.format("FPS: %.1f", perfStats[FPS])
}
- perfStatsUpdateHandler.postDelayed(perfStatsUpdater!!, 100)
+ perfStatsUpdateHandler.postDelayed(perfStatsUpdater!!, 800)
}
}
perfStatsUpdateHandler.post(perfStatsUpdater!!)
@@ -464,7 +462,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
if (it.orientation == FoldingFeature.Orientation.HORIZONTAL) {
// Restrict emulation and overlays to the top of the screen
binding.emulationContainer.layoutParams.height = it.bounds.top
- binding.overlayContainer.layoutParams.height = it.bounds.top
// Restrict input and menu drawer to the bottom of the screen
binding.inputContainer.layoutParams.height = it.bounds.bottom
binding.inGameMenu.layoutParams.height = it.bounds.bottom
@@ -478,7 +475,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
if (!isFolding) {
binding.emulationContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
binding.inputContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
- binding.overlayContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
binding.inGameMenu.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT
isInFoldableLayout = false
updateOrientation()
@@ -486,7 +482,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
}
binding.emulationContainer.requestLayout()
binding.inputContainer.requestLayout()
- binding.overlayContainer.requestLayout()
binding.inGameMenu.requestLayout()
}
@@ -712,24 +707,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
}
v.setPadding(left, cutInsets.top, right, 0)
-
- // Ensure FPS text doesn't get cut off by rounded display corners
- val sidePadding = resources.getDimensionPixelSize(R.dimen.spacing_xtralarge)
- if (cutInsets.left == 0) {
- binding.showFpsText.setPadding(
- sidePadding,
- cutInsets.top,
- cutInsets.right,
- cutInsets.bottom
- )
- } else {
- binding.showFpsText.setPadding(
- cutInsets.left,
- cutInsets.top,
- cutInsets.right,
- cutInsets.bottom
- )
- }
windowInsets
}
}
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt
index 9d0594c6e..f95d545bf 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt
@@ -40,8 +40,10 @@ class SettingsSearchFragment : Fragment() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
- enterTransition = MaterialSharedAxis(MaterialSharedAxis.Z, false)
- returnTransition = MaterialSharedAxis(MaterialSharedAxis.Z, true)
+ enterTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
+ returnTransition = MaterialSharedAxis(MaterialSharedAxis.X, false)
+ reenterTransition = MaterialSharedAxis(MaterialSharedAxis.X, false)
+ exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true)
}
override fun onCreateView(
@@ -55,7 +57,6 @@ class SettingsSearchFragment : Fragment() {
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
- settingsViewModel.setIsUsingSearch(true)
if (savedInstanceState != null) {
binding.searchText.setText(savedInstanceState.getString(SEARCH_TEXT))
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt
index 53fa7a8de..6f947674e 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt
@@ -29,9 +29,6 @@ class SettingsViewModel : ViewModel() {
val shouldReloadSettingsList: StateFlow<Boolean> get() = _shouldReloadSettingsList
private val _shouldReloadSettingsList = MutableStateFlow(false)
- val isUsingSearch: StateFlow<Boolean> get() = _isUsingSearch
- private val _isUsingSearch = MutableStateFlow(false)
-
val sliderProgress: StateFlow<Int> get() = _sliderProgress
private val _sliderProgress = MutableStateFlow(-1)
@@ -57,10 +54,6 @@ class SettingsViewModel : ViewModel() {
_shouldReloadSettingsList.value = value
}
- fun setIsUsingSearch(value: Boolean) {
- _isUsingSearch.value = value
- }
-
fun setSliderTextValue(value: Float, units: String) {
_sliderProgress.value = value.toInt()
_sliderTextValue.value = String.format(
diff --git a/src/android/app/src/main/jni/emu_window/emu_window.cpp b/src/android/app/src/main/jni/emu_window/emu_window.cpp
index a7e414b81..c4f631924 100644
--- a/src/android/app/src/main/jni/emu_window/emu_window.cpp
+++ b/src/android/app/src/main/jni/emu_window/emu_window.cpp
@@ -9,6 +9,7 @@
#include "input_common/drivers/virtual_gamepad.h"
#include "input_common/main.h"
#include "jni/emu_window/emu_window.h"
+#include "jni/native.h"
void EmuWindow_Android::OnSurfaceChanged(ANativeWindow* surface) {
m_window_width = ANativeWindow_getWidth(surface);
@@ -57,6 +58,13 @@ void EmuWindow_Android::OnRemoveNfcTag() {
m_input_subsystem->GetVirtualAmiibo()->CloseAmiibo();
}
+void EmuWindow_Android::OnFrameDisplayed() {
+ if (!m_first_frame) {
+ EmulationSession::GetInstance().OnEmulationStarted();
+ m_first_frame = true;
+ }
+}
+
EmuWindow_Android::EmuWindow_Android(InputCommon::InputSubsystem* input_subsystem,
ANativeWindow* surface,
std::shared_ptr<Common::DynamicLibrary> driver_library)
diff --git a/src/android/app/src/main/jni/emu_window/emu_window.h b/src/android/app/src/main/jni/emu_window/emu_window.h
index b38087f73..a34a0e479 100644
--- a/src/android/app/src/main/jni/emu_window/emu_window.h
+++ b/src/android/app/src/main/jni/emu_window/emu_window.h
@@ -45,7 +45,7 @@ public:
float gyro_z, float accel_x, float accel_y, float accel_z);
void OnReadNfcTag(std::span<u8> data);
void OnRemoveNfcTag();
- void OnFrameDisplayed() override {}
+ void OnFrameDisplayed() override;
std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override {
return {std::make_unique<GraphicsContext_Android>(m_driver_library)};
@@ -61,4 +61,6 @@ private:
float m_window_height{};
std::shared_ptr<Common::DynamicLibrary> m_driver_library;
+
+ bool m_first_frame = false;
};
diff --git a/src/android/app/src/main/jni/native.cpp b/src/android/app/src/main/jni/native.cpp
index 46438906e..64663b084 100644
--- a/src/android/app/src/main/jni/native.cpp
+++ b/src/android/app/src/main/jni/native.cpp
@@ -199,8 +199,8 @@ bool EmulationSession::IsPaused() const {
return m_is_running && m_is_paused;
}
-const Core::PerfStatsResults& EmulationSession::PerfStats() const {
- std::scoped_lock m_perf_stats_lock(m_perf_stats_mutex);
+const Core::PerfStatsResults& EmulationSession::PerfStats() {
+ m_perf_stats = m_system.GetAndResetPerfStats();
return m_perf_stats;
}
@@ -372,8 +372,6 @@ void EmulationSession::RunEmulation() {
m_system.InitializeDebugger();
}
- OnEmulationStarted();
-
while (true) {
{
[[maybe_unused]] std::unique_lock lock(m_mutex);
@@ -383,11 +381,6 @@ void EmulationSession::RunEmulation() {
break;
}
}
- {
- // Refresh performance stats.
- std::scoped_lock m_perf_stats_lock(m_perf_stats_mutex);
- m_perf_stats = m_system.GetAndResetPerfStats();
- }
}
}
diff --git a/src/android/app/src/main/jni/native.h b/src/android/app/src/main/jni/native.h
index 3b9596459..78ef96802 100644
--- a/src/android/app/src/main/jni/native.h
+++ b/src/android/app/src/main/jni/native.h
@@ -41,7 +41,7 @@ public:
void RunEmulation();
void ShutdownEmulation();
- const Core::PerfStatsResults& PerfStats() const;
+ const Core::PerfStatsResults& PerfStats();
void ConfigureFilesystemProvider(const std::string& filepath);
void InitializeSystem(bool reload);
Core::SystemResultStatus InitializeEmulation(const std::string& filepath);
@@ -52,9 +52,10 @@ public:
void OnGamepadDisconnectEvent([[maybe_unused]] int index);
SoftwareKeyboard::AndroidKeyboard* SoftwareKeyboard();
+ static void OnEmulationStarted();
+
private:
static void LoadDiskCacheProgress(VideoCore::LoadCallbackStage stage, int progress, int max);
- static void OnEmulationStarted();
static void OnEmulationStopped(Core::SystemResultStatus result);
private:
@@ -80,6 +81,5 @@ private:
// Synchronization
std::condition_variable_any m_cv;
- mutable std::mutex m_perf_stats_mutex;
mutable std::mutex m_mutex;
};
diff --git a/src/android/app/src/main/res/drawable/ic_audio.xml b/src/android/app/src/main/res/drawable/ic_audio.xml
new file mode 100644
index 000000000..e306c3b0c
--- /dev/null
+++ b/src/android/app/src/main/res/drawable/ic_audio.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportHeight="24"
+ android:viewportWidth="24">
+ <path
+ android:fillColor="?attr/colorControlNormal"
+ android:pathData="M3,9v6h4l5,5L12,4L7,9L3,9zM16.5,12c0,-1.77 -1.02,-3.29 -2.5,-4.03v8.05c1.48,-0.73 2.5,-2.25 2.5,-4.02zM14,3.23v2.06c2.89,0.86 5,3.54 5,6.71s-2.11,5.85 -5,6.71v2.06c4.01,-0.91 7,-4.49 7,-8.77s-2.99,-7.86 -7,-8.77z" />
+</vector>
diff --git a/src/android/app/src/main/res/drawable/ic_code.xml b/src/android/app/src/main/res/drawable/ic_code.xml
new file mode 100644
index 000000000..26f83b39b
--- /dev/null
+++ b/src/android/app/src/main/res/drawable/ic_code.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="960"
+ android:viewportHeight="960">
+ <path
+ android:fillColor="?attr/colorControlNormal"
+ android:pathData="M320,720 L80,480l240,-240 57,57 -184,184 183,183 -56,56ZM640,720 L583,663 767,479 584,296 640,240 880,480 640,720Z"/>
+</vector>
diff --git a/src/android/app/src/main/res/drawable/ic_graphics.xml b/src/android/app/src/main/res/drawable/ic_graphics.xml
new file mode 100644
index 000000000..2fdb5a4d6
--- /dev/null
+++ b/src/android/app/src/main/res/drawable/ic_graphics.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="960"
+ android:viewportHeight="960">
+ <path
+ android:fillColor="?attr/colorControlNormal"
+ android:pathData="M160,840q-33,0 -56.5,-23.5T80,760v-560q0,-33 23.5,-56.5T160,120h560q33,0 56.5,23.5T800,200v80h80v80h-80v80h80v80h-80v80h80v80h-80v80q0,33 -23.5,56.5T720,840L160,840ZM160,760h560v-560L160,200v560ZM240,680h200v-160L240,520v160ZM480,400h160v-120L480,280v120ZM240,480h200v-200L240,280v200ZM480,680h160v-240L480,440v240ZM160,200v560,-560Z"/>
+</vector>
diff --git a/src/android/app/src/main/res/drawable/ic_system_settings.xml b/src/android/app/src/main/res/drawable/ic_system_settings.xml
new file mode 100644
index 000000000..7701a2bab
--- /dev/null
+++ b/src/android/app/src/main/res/drawable/ic_system_settings.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="960"
+ android:viewportHeight="960">
+ <path
+ android:fillColor="?attr/colorControlNormal"
+ android:pathData="M320,960q-17,0 -28.5,-11.5T280,920q0,-17 11.5,-28.5T320,880q17,0 28.5,11.5T360,920q0,17 -11.5,28.5T320,960ZM480,960q-17,0 -28.5,-11.5T440,920q0,-17 11.5,-28.5T480,880q17,0 28.5,11.5T520,920q0,17 -11.5,28.5T480,960ZM640,960q-17,0 -28.5,-11.5T600,920q0,-17 11.5,-28.5T640,880q17,0 28.5,11.5T680,920q0,17 -11.5,28.5T640,960ZM320,800q-33,0 -56.5,-23.5T240,720v-640q0,-33 23.5,-56.5T320,0h320q33,0 56.5,23.5T720,80v640q0,33 -23.5,56.5T640,800L320,800ZM320,720h320v-40L320,680v40ZM320,600h320v-400L320,200v400ZM320,120h320v-40L320,80v40ZM320,120v-40,40ZM320,720v-40,40Z"/>
+</vector>
diff --git a/src/android/app/src/main/res/layout-w600dp/fragment_about.xml b/src/android/app/src/main/res/layout-w600dp/fragment_about.xml
new file mode 100644
index 000000000..a26ffbc73
--- /dev/null
+++ b/src/android/app/src/main/res/layout-w600dp/fragment_about.xml
@@ -0,0 +1,233 @@
+<?xml version="1.0" encoding="utf-8"?>
+<androidx.coordinatorlayout.widget.CoordinatorLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:app="http://schemas.android.com/apk/res-auto"
+ xmlns:tools="http://schemas.android.com/tools"
+ android:id="@+id/coordinator_about"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:background="?attr/colorSurface">
+
+ <com.google.android.material.appbar.AppBarLayout
+ android:id="@+id/appbar_about"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:fitsSystemWindows="true">
+
+ <com.google.android.material.appbar.MaterialToolbar
+ android:id="@+id/toolbar_about"
+ android:layout_width="match_parent"
+ android:layout_height="?attr/actionBarSize"
+ app:navigationIcon="@drawable/ic_back"
+ app:title="@string/about" />
+
+ </com.google.android.material.appbar.AppBarLayout>
+
+ <androidx.core.widget.NestedScrollView
+ android:id="@+id/scroll_about"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:fadeScrollbars="false"
+ android:scrollbars="vertical"
+ app:layout_behavior="@string/appbar_scrolling_view_behavior">
+
+ <LinearLayout
+ android:id="@+id/content_about"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:orientation="horizontal">
+
+ <ImageView
+ android:id="@+id/image_logo"
+ android:layout_width="200dp"
+ android:layout_height="200dp"
+ android:layout_gravity="center_horizontal"
+ android:padding="20dp"
+ android:src="@drawable/ic_yuzu_title" />
+
+ <LinearLayout
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:orientation="vertical">
+
+ <LinearLayout
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:orientation="vertical"
+ android:paddingHorizontal="16dp"
+ android:paddingVertical="16dp">
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.TitleMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:text="@string/about"
+ android:textAlignment="viewStart" />
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.BodyMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:layout_marginTop="6dp"
+ android:text="@string/about_app_description"
+ android:textAlignment="viewStart" />
+
+ </LinearLayout>
+
+ <com.google.android.material.divider.MaterialDivider
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="20dp" />
+
+ <LinearLayout
+ android:id="@+id/button_contributors"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:background="?attr/selectableItemBackground"
+ android:orientation="vertical"
+ android:paddingHorizontal="16dp"
+ android:paddingVertical="16dp">
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.TitleMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:text="@string/contributors"
+ android:textAlignment="viewStart" />
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.BodyMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:layout_marginTop="6dp"
+ android:text="@string/contributors_description"
+ android:textAlignment="viewStart" />
+
+ </LinearLayout>
+
+ <com.google.android.material.divider.MaterialDivider
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="20dp" />
+
+ <LinearLayout
+ android:id="@+id/button_licenses"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:background="?attr/selectableItemBackground"
+ android:orientation="vertical"
+ android:paddingHorizontal="16dp"
+ android:paddingVertical="16dp">
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.TitleMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:text="@string/licenses"
+ android:textAlignment="viewStart" />
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.BodyMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:layout_marginTop="6dp"
+ android:text="@string/licenses_description"
+ android:textAlignment="viewStart" />
+
+ </LinearLayout>
+
+ <com.google.android.material.divider.MaterialDivider
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="20dp" />
+
+ <LinearLayout
+ android:id="@+id/button_build_hash"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:background="?attr/selectableItemBackground"
+ android:orientation="vertical"
+ android:paddingHorizontal="16dp"
+ android:paddingVertical="16dp">
+
+ <com.google.android.material.textview.MaterialTextView
+ style="@style/TextAppearance.Material3.TitleMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:text="@string/build"
+ android:textAlignment="viewStart" />
+
+ <com.google.android.material.textview.MaterialTextView
+ android:id="@+id/text_build_hash"
+ style="@style/TextAppearance.Material3.BodyMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="24dp"
+ android:layout_marginTop="6dp"
+ android:textAlignment="viewStart"
+ tools:text="abc123" />
+
+ </LinearLayout>
+
+ <com.google.android.material.divider.MaterialDivider
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="20dp" />
+
+ <LinearLayout
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginHorizontal="40dp"
+ android:layout_marginTop="12dp"
+ android:layout_marginBottom="16dp"
+ android:gravity="center_horizontal"
+ android:orientation="horizontal">
+
+ <Button
+ android:id="@+id/button_discord"
+ style="?attr/materialIconButtonStyle"
+ android:layout_width="0dp"
+ android:layout_height="wrap_content"
+ android:layout_weight="1"
+ app:icon="@drawable/ic_discord"
+ app:iconGravity="textEnd"
+ app:iconSize="24dp"
+ app:iconTint="?attr/colorOnSurface" />
+
+ <Button
+ android:id="@+id/button_website"
+ style="?attr/materialIconButtonStyle"
+ android:layout_width="0dp"
+ android:layout_height="wrap_content"
+ android:layout_weight="1"
+ app:icon="@drawable/ic_website"
+ app:iconGravity="textEnd"
+ app:iconSize="24dp"
+ app:iconTint="?attr/colorOnSurface" />
+
+ <Button
+ android:id="@+id/button_github"
+ style="?attr/materialIconButtonStyle"
+ android:layout_width="0dp"
+ android:layout_height="wrap_content"
+ android:layout_weight="1"
+ app:icon="@drawable/ic_github"
+ app:iconGravity="textEnd"
+ app:iconSize="24dp"
+ app:iconTint="?attr/colorOnSurface" />
+
+ </LinearLayout>
+
+ </LinearLayout>
+
+ </LinearLayout>
+
+ </androidx.core.widget.NestedScrollView>
+
+</androidx.coordinatorlayout.widget.CoordinatorLayout>
diff --git a/src/android/app/src/main/res/layout/card_home_option.xml b/src/android/app/src/main/res/layout/card_home_option.xml
index 6e8a232f9..cb667c928 100644
--- a/src/android/app/src/main/res/layout/card_home_option.xml
+++ b/src/android/app/src/main/res/layout/card_home_option.xml
@@ -6,8 +6,8 @@
android:id="@+id/option_card"
android:layout_width="match_parent"
android:layout_height="wrap_content"
- android:layout_marginVertical="12dp"
- android:layout_marginHorizontal="16dp"
+ android:layout_marginBottom="24dp"
+ android:layout_marginHorizontal="12dp"
android:background="?attr/selectableItemBackground"
android:backgroundTint="?attr/colorSurfaceVariant"
android:clickable="true"
diff --git a/src/android/app/src/main/res/layout/fragment_about.xml b/src/android/app/src/main/res/layout/fragment_about.xml
index 3e1d98451..a24f5230e 100644
--- a/src/android/app/src/main/res/layout/fragment_about.xml
+++ b/src/android/app/src/main/res/layout/fragment_about.xml
@@ -38,17 +38,17 @@
<ImageView
android:id="@+id/image_logo"
- android:layout_width="250dp"
- android:layout_height="250dp"
- android:layout_marginTop="20dp"
+ android:layout_width="150dp"
+ android:layout_height="150dp"
+ android:layout_marginTop="24dp"
+ android:layout_marginBottom="28dp"
android:layout_gravity="center_horizontal"
android:src="@drawable/ic_yuzu_title" />
<com.google.android.material.divider.MaterialDivider
android:layout_width="match_parent"
android:layout_height="wrap_content"
- android:layout_marginHorizontal="20dp"
- android:layout_marginTop="28dp" />
+ android:layout_marginHorizontal="20dp" />
<LinearLayout
android:layout_width="match_parent"
diff --git a/src/android/app/src/main/res/layout/fragment_emulation.xml b/src/android/app/src/main/res/layout/fragment_emulation.xml
index 750ce094a..cd6360b45 100644
--- a/src/android/app/src/main/res/layout/fragment_emulation.xml
+++ b/src/android/app/src/main/res/layout/fragment_emulation.xml
@@ -134,16 +134,18 @@
<FrameLayout
android:id="@+id/overlay_container"
android:layout_width="match_parent"
- android:layout_height="match_parent">
+ android:layout_height="match_parent"
+ android:fitsSystemWindows="true">
- <TextView
+ <com.google.android.material.textview.MaterialTextView
android:id="@+id/show_fps_text"
+ style="@style/TextAppearance.Material3.BodyMedium"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_gravity="left"
android:clickable="false"
android:focusable="false"
- android:shadowColor="@android:color/black"
+ android:paddingHorizontal="20dp"
android:textColor="@android:color/white"
android:textSize="12sp"
tools:ignore="RtlHardcoded" />
diff --git a/src/android/app/src/main/res/layout/fragment_home_settings.xml b/src/android/app/src/main/res/layout/fragment_home_settings.xml
index 1cb421dcb..d84093ba3 100644
--- a/src/android/app/src/main/res/layout/fragment_home_settings.xml
+++ b/src/android/app/src/main/res/layout/fragment_home_settings.xml
@@ -14,13 +14,14 @@
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical"
- android:background="?attr/colorSurface">
+ android:background="?attr/colorSurface"
+ android:paddingHorizontal="8dp">
<ImageView
android:id="@+id/logo_image"
- android:layout_width="128dp"
- android:layout_height="128dp"
- android:layout_margin="64dp"
+ android:layout_width="96dp"
+ android:layout_height="96dp"
+ android:layout_marginVertical="32dp"
android:layout_gravity="center_horizontal"
android:src="@drawable/ic_yuzu_full" />
diff --git a/src/android/app/src/main/res/layout/list_item_setting.xml b/src/android/app/src/main/res/layout/list_item_setting.xml
index f1037a740..544280e75 100644
--- a/src/android/app/src/main/res/layout/list_item_setting.xml
+++ b/src/android/app/src/main/res/layout/list_item_setting.xml
@@ -10,41 +10,59 @@
android:focusable="true"
android:gravity="center_vertical"
android:minHeight="72dp"
- android:padding="@dimen/spacing_large">
+ android:padding="16dp">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
- android:orientation="vertical">
+ android:orientation="horizontal">
- <com.google.android.material.textview.MaterialTextView
- android:id="@+id/text_setting_name"
- style="@style/TextAppearance.Material3.HeadlineMedium"
- android:layout_width="match_parent"
- android:layout_height="wrap_content"
- android:textAlignment="viewStart"
- android:textSize="16sp"
- app:lineHeight="22dp"
- tools:text="Setting Name" />
-
- <com.google.android.material.textview.MaterialTextView
- android:id="@+id/text_setting_description"
- style="@style/TextAppearance.Material3.BodySmall"
- android:layout_width="match_parent"
- android:layout_height="wrap_content"
- android:layout_marginTop="@dimen/spacing_small"
- android:textAlignment="viewStart"
- tools:text="@string/app_disclaimer" />
+ <ImageView
+ android:id="@+id/icon"
+ android:layout_width="24dp"
+ android:layout_height="24dp"
+ android:layout_marginStart="8dp"
+ android:layout_marginEnd="24dp"
+ android:layout_gravity="center_vertical"
+ android:visibility="gone"
+ app:tint="?attr/colorOnSurface" />
- <com.google.android.material.textview.MaterialTextView
- android:id="@+id/text_setting_value"
- style="@style/TextAppearance.Material3.LabelMedium"
+ <LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
- android:layout_marginTop="@dimen/spacing_small"
- android:textAlignment="viewStart"
- android:textStyle="bold"
- tools:text="1x" />
+ android:orientation="vertical">
+
+ <com.google.android.material.textview.MaterialTextView
+ android:id="@+id/text_setting_name"
+ style="@style/TextAppearance.Material3.HeadlineMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:textAlignment="viewStart"
+ android:textSize="17sp"
+ app:lineHeight="22dp"
+ tools:text="Setting Name" />
+
+ <com.google.android.material.textview.MaterialTextView
+ android:id="@+id/text_setting_description"
+ style="@style/TextAppearance.Material3.BodySmall"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginTop="@dimen/spacing_small"
+ android:textAlignment="viewStart"
+ tools:text="@string/app_disclaimer" />
+
+ <com.google.android.material.textview.MaterialTextView
+ android:id="@+id/text_setting_value"
+ style="@style/TextAppearance.Material3.LabelMedium"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginTop="@dimen/spacing_small"
+ android:textAlignment="viewStart"
+ android:textStyle="bold"
+ android:textSize="13sp"
+ tools:text="1x" />
+
+ </LinearLayout>
</LinearLayout>
diff --git a/src/android/app/src/main/res/layout/list_item_setting_switch.xml b/src/android/app/src/main/res/layout/list_item_setting_switch.xml
index a5767adee..a8f5aff78 100644
--- a/src/android/app/src/main/res/layout/list_item_setting_switch.xml
+++ b/src/android/app/src/main/res/layout/list_item_setting_switch.xml
@@ -8,9 +8,7 @@
android:clickable="true"
android:focusable="true"
android:minHeight="72dp"
- android:paddingVertical="@dimen/spacing_large"
- android:paddingStart="@dimen/spacing_large"
- android:paddingEnd="24dp">
+ android:padding="16dp">
<com.google.android.material.materialswitch.MaterialSwitch
android:id="@+id/switch_widget"
@@ -24,7 +22,7 @@
android:layout_height="wrap_content"
android:layout_alignParentTop="true"
android:layout_centerVertical="true"
- android:layout_marginEnd="@dimen/spacing_large"
+ android:layout_marginEnd="24dp"
android:layout_toStartOf="@+id/switch_widget"
android:gravity="center_vertical"
android:orientation="vertical">
@@ -35,7 +33,7 @@
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:textAlignment="viewStart"
- android:textSize="16sp"
+ android:textSize="17sp"
app:lineHeight="28dp"
tools:text="@string/frame_limit_enable" />
diff --git a/src/android/app/src/main/res/layout/list_item_settings_header.xml b/src/android/app/src/main/res/layout/list_item_settings_header.xml
index cf85bc0da..21276b19e 100644
--- a/src/android/app/src/main/res/layout/list_item_settings_header.xml
+++ b/src/android/app/src/main/res/layout/list_item_settings_header.xml
@@ -7,7 +7,8 @@
android:layout_height="wrap_content"
android:layout_gravity="start|center_vertical"
android:paddingHorizontal="@dimen/spacing_large"
- android:paddingVertical="16dp"
+ android:paddingTop="16dp"
+ android:paddingBottom="8dp"
android:textAlignment="viewStart"
android:textColor="?attr/colorPrimary"
android:textStyle="bold"
diff --git a/src/android/app/src/main/res/values/arrays.xml b/src/android/app/src/main/res/values/arrays.xml
index dc10159c9..51bcc49a3 100644
--- a/src/android/app/src/main/res/values/arrays.xml
+++ b/src/android/app/src/main/res/values/arrays.xml
@@ -2,7 +2,6 @@
<resources>
<string-array name="regionNames">
- <item>@string/auto</item>
<item>@string/region_australia</item>
<item>@string/region_china</item>
<item>@string/region_europe</item>
@@ -13,7 +12,6 @@
</string-array>
<integer-array name="regionValues">
- <item>-1</item>
<item>3</item>
<item>4</item>
<item>2</item>
diff --git a/src/android/app/src/main/res/values/strings.xml b/src/android/app/src/main/res/values/strings.xml
index c551a6106..98c3f20f8 100644
--- a/src/android/app/src/main/res/values/strings.xml
+++ b/src/android/app/src/main/res/values/strings.xml
@@ -240,6 +240,7 @@
<string name="shutting_down">Shutting down…</string>
<string name="reset_setting_confirmation">Do you want to reset this setting back to its default value?</string>
<string name="reset_to_default">Reset to default</string>
+ <string name="reset_to_default_description">Resets all advanced settings</string>
<string name="reset_all_settings">Reset all settings?</string>
<string name="reset_all_settings_description">All advanced settings will be reset to their default configuration. This can not be undone.</string>
<string name="settings_reset">Settings reset</string>
@@ -271,10 +272,14 @@
<string name="preferences_settings">Settings</string>
<string name="preferences_general">General</string>
<string name="preferences_system">System</string>
+ <string name="preferences_system_description">Docked mode, region, language</string>
<string name="preferences_graphics">Graphics</string>
+ <string name="preferences_graphics_description">Accuracy level, resolution, shader cache</string>
<string name="preferences_audio">Audio</string>
+ <string name="preferences_audio_description">Output engine, volume</string>
<string name="preferences_theme">Theme and color</string>
<string name="preferences_debug">Debug</string>
+ <string name="preferences_debug_description">CPU/GPU debugging, graphics API, fastmem</string>
<!-- ROM loading errors -->
<string name="loader_error_encrypted">Your ROM is encrypted</string>
diff --git a/src/audio_core/adsp/apps/opus/opus_decode_object.cpp b/src/audio_core/adsp/apps/opus/opus_decode_object.cpp
index 2c16d3769..e2b9eb566 100644
--- a/src/audio_core/adsp/apps/opus/opus_decode_object.cpp
+++ b/src/audio_core/adsp/apps/opus/opus_decode_object.cpp
@@ -1,107 +1,107 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "audio_core/adsp/apps/opus/opus_decode_object.h"
-#include "common/assert.h"
-
-namespace AudioCore::ADSP::OpusDecoder {
-namespace {
-bool IsValidChannelCount(u32 channel_count) {
- return channel_count == 1 || channel_count == 2;
-}
-} // namespace
-
-u32 OpusDecodeObject::GetWorkBufferSize(u32 channel_count) {
- if (!IsValidChannelCount(channel_count)) {
- return 0;
- }
- return static_cast<u32>(sizeof(OpusDecodeObject)) + opus_decoder_get_size(channel_count);
-}
-
-OpusDecodeObject& OpusDecodeObject::Initialize(u64 buffer, u64 buffer2) {
- auto* new_decoder = reinterpret_cast<OpusDecodeObject*>(buffer);
- auto* comparison = reinterpret_cast<OpusDecodeObject*>(buffer2);
-
- if (new_decoder->magic == DecodeObjectMagic) {
- if (!new_decoder->initialized ||
- (new_decoder->initialized && new_decoder->self == comparison)) {
- new_decoder->state_valid = true;
- }
- } else {
- new_decoder->initialized = false;
- new_decoder->state_valid = true;
- }
- return *new_decoder;
-}
-
-s32 OpusDecodeObject::InitializeDecoder(u32 sample_rate, u32 channel_count) {
- if (!state_valid) {
- return OPUS_INVALID_STATE;
- }
-
- if (initialized) {
- return OPUS_OK;
- }
-
- // Unfortunately libopus does not expose the OpusDecoder struct publicly, so we can't include
- // it in this class. Nintendo does not allocate memory, which is why we have a workbuffer
- // provided.
- // We could use _create and have libopus allocate it for us, but then we have to separately
- // track which decoder is being used between this and multistream in order to call the correct
- // destroy from the host side.
- // This is a bit cringe, but is safe as these objects are only ever initialized inside the given
- // workbuffer, and GetWorkBufferSize will guarantee there's enough space to follow.
- decoder = (LibOpusDecoder*)(this + 1);
- s32 ret = opus_decoder_init(decoder, sample_rate, channel_count);
- if (ret == OPUS_OK) {
- magic = DecodeObjectMagic;
- initialized = true;
- state_valid = true;
- self = this;
- final_range = 0;
- }
- return ret;
-}
-
-s32 OpusDecodeObject::Shutdown() {
- if (!state_valid) {
- return OPUS_INVALID_STATE;
- }
-
- if (initialized) {
- magic = 0x0;
- initialized = false;
- state_valid = false;
- self = nullptr;
- final_range = 0;
- decoder = nullptr;
- }
- return OPUS_OK;
-}
-
-s32 OpusDecodeObject::ResetDecoder() {
- return opus_decoder_ctl(decoder, OPUS_RESET_STATE);
-}
-
-s32 OpusDecodeObject::Decode(u32& out_sample_count, u64 output_data, u64 output_data_size,
- u64 input_data, u64 input_data_size) {
- ASSERT(initialized);
- out_sample_count = 0;
-
- if (!state_valid) {
- return OPUS_INVALID_STATE;
- }
-
- auto ret_code_or_samples = opus_decode(
- decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size),
- reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0);
-
- if (ret_code_or_samples < OPUS_OK) {
- return ret_code_or_samples;
- }
-
- out_sample_count = ret_code_or_samples;
- return opus_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range);
-}
-
-} // namespace AudioCore::ADSP::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "audio_core/adsp/apps/opus/opus_decode_object.h"
+#include "common/assert.h"
+
+namespace AudioCore::ADSP::OpusDecoder {
+namespace {
+bool IsValidChannelCount(u32 channel_count) {
+ return channel_count == 1 || channel_count == 2;
+}
+} // namespace
+
+u32 OpusDecodeObject::GetWorkBufferSize(u32 channel_count) {
+ if (!IsValidChannelCount(channel_count)) {
+ return 0;
+ }
+ return static_cast<u32>(sizeof(OpusDecodeObject)) + opus_decoder_get_size(channel_count);
+}
+
+OpusDecodeObject& OpusDecodeObject::Initialize(u64 buffer, u64 buffer2) {
+ auto* new_decoder = reinterpret_cast<OpusDecodeObject*>(buffer);
+ auto* comparison = reinterpret_cast<OpusDecodeObject*>(buffer2);
+
+ if (new_decoder->magic == DecodeObjectMagic) {
+ if (!new_decoder->initialized ||
+ (new_decoder->initialized && new_decoder->self == comparison)) {
+ new_decoder->state_valid = true;
+ }
+ } else {
+ new_decoder->initialized = false;
+ new_decoder->state_valid = true;
+ }
+ return *new_decoder;
+}
+
+s32 OpusDecodeObject::InitializeDecoder(u32 sample_rate, u32 channel_count) {
+ if (!state_valid) {
+ return OPUS_INVALID_STATE;
+ }
+
+ if (initialized) {
+ return OPUS_OK;
+ }
+
+ // Unfortunately libopus does not expose the OpusDecoder struct publicly, so we can't include
+ // it in this class. Nintendo does not allocate memory, which is why we have a workbuffer
+ // provided.
+ // We could use _create and have libopus allocate it for us, but then we have to separately
+ // track which decoder is being used between this and multistream in order to call the correct
+ // destroy from the host side.
+ // This is a bit cringe, but is safe as these objects are only ever initialized inside the given
+ // workbuffer, and GetWorkBufferSize will guarantee there's enough space to follow.
+ decoder = (LibOpusDecoder*)(this + 1);
+ s32 ret = opus_decoder_init(decoder, sample_rate, channel_count);
+ if (ret == OPUS_OK) {
+ magic = DecodeObjectMagic;
+ initialized = true;
+ state_valid = true;
+ self = this;
+ final_range = 0;
+ }
+ return ret;
+}
+
+s32 OpusDecodeObject::Shutdown() {
+ if (!state_valid) {
+ return OPUS_INVALID_STATE;
+ }
+
+ if (initialized) {
+ magic = 0x0;
+ initialized = false;
+ state_valid = false;
+ self = nullptr;
+ final_range = 0;
+ decoder = nullptr;
+ }
+ return OPUS_OK;
+}
+
+s32 OpusDecodeObject::ResetDecoder() {
+ return opus_decoder_ctl(decoder, OPUS_RESET_STATE);
+}
+
+s32 OpusDecodeObject::Decode(u32& out_sample_count, u64 output_data, u64 output_data_size,
+ u64 input_data, u64 input_data_size) {
+ ASSERT(initialized);
+ out_sample_count = 0;
+
+ if (!state_valid) {
+ return OPUS_INVALID_STATE;
+ }
+
+ auto ret_code_or_samples = opus_decode(
+ decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size),
+ reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0);
+
+ if (ret_code_or_samples < OPUS_OK) {
+ return ret_code_or_samples;
+ }
+
+ out_sample_count = ret_code_or_samples;
+ return opus_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range);
+}
+
+} // namespace AudioCore::ADSP::OpusDecoder
diff --git a/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp b/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp
index f6d362e68..7f1ed0450 100644
--- a/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp
+++ b/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp
@@ -1,111 +1,111 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "audio_core/adsp/apps/opus/opus_multistream_decode_object.h"
-#include "common/assert.h"
-
-namespace AudioCore::ADSP::OpusDecoder {
-
-namespace {
-bool IsValidChannelCount(u32 channel_count) {
- return channel_count == 1 || channel_count == 2;
-}
-
-bool IsValidStreamCounts(u32 total_stream_count, u32 stereo_stream_count) {
- return total_stream_count > 0 && stereo_stream_count > 0 &&
- stereo_stream_count <= total_stream_count && IsValidChannelCount(total_stream_count);
-}
-} // namespace
-
-u32 OpusMultiStreamDecodeObject::GetWorkBufferSize(u32 total_stream_count,
- u32 stereo_stream_count) {
- if (IsValidStreamCounts(total_stream_count, stereo_stream_count)) {
- return static_cast<u32>(sizeof(OpusMultiStreamDecodeObject)) +
- opus_multistream_decoder_get_size(total_stream_count, stereo_stream_count);
- }
- return 0;
-}
-
-OpusMultiStreamDecodeObject& OpusMultiStreamDecodeObject::Initialize(u64 buffer, u64 buffer2) {
- auto* new_decoder = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer);
- auto* comparison = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer2);
-
- if (new_decoder->magic == DecodeMultiStreamObjectMagic) {
- if (!new_decoder->initialized ||
- (new_decoder->initialized && new_decoder->self == comparison)) {
- new_decoder->state_valid = true;
- }
- } else {
- new_decoder->initialized = false;
- new_decoder->state_valid = true;
- }
- return *new_decoder;
-}
-
-s32 OpusMultiStreamDecodeObject::InitializeDecoder(u32 sample_rate, u32 total_stream_count,
- u32 channel_count, u32 stereo_stream_count,
- u8* mappings) {
- if (!state_valid) {
- return OPUS_INVALID_STATE;
- }
-
- if (initialized) {
- return OPUS_OK;
- }
-
- // See OpusDecodeObject::InitializeDecoder for an explanation of this
- decoder = (LibOpusMSDecoder*)(this + 1);
- s32 ret = opus_multistream_decoder_init(decoder, sample_rate, channel_count, total_stream_count,
- stereo_stream_count, mappings);
- if (ret == OPUS_OK) {
- magic = DecodeMultiStreamObjectMagic;
- initialized = true;
- state_valid = true;
- self = this;
- final_range = 0;
- }
- return ret;
-}
-
-s32 OpusMultiStreamDecodeObject::Shutdown() {
- if (!state_valid) {
- return OPUS_INVALID_STATE;
- }
-
- if (initialized) {
- magic = 0x0;
- initialized = false;
- state_valid = false;
- self = nullptr;
- final_range = 0;
- decoder = nullptr;
- }
- return OPUS_OK;
-}
-
-s32 OpusMultiStreamDecodeObject::ResetDecoder() {
- return opus_multistream_decoder_ctl(decoder, OPUS_RESET_STATE);
-}
-
-s32 OpusMultiStreamDecodeObject::Decode(u32& out_sample_count, u64 output_data,
- u64 output_data_size, u64 input_data, u64 input_data_size) {
- ASSERT(initialized);
- out_sample_count = 0;
-
- if (!state_valid) {
- return OPUS_INVALID_STATE;
- }
-
- auto ret_code_or_samples = opus_multistream_decode(
- decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size),
- reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0);
-
- if (ret_code_or_samples < OPUS_OK) {
- return ret_code_or_samples;
- }
-
- out_sample_count = ret_code_or_samples;
- return opus_multistream_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range);
-}
-
-} // namespace AudioCore::ADSP::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "audio_core/adsp/apps/opus/opus_multistream_decode_object.h"
+#include "common/assert.h"
+
+namespace AudioCore::ADSP::OpusDecoder {
+
+namespace {
+bool IsValidChannelCount(u32 channel_count) {
+ return channel_count == 1 || channel_count == 2;
+}
+
+bool IsValidStreamCounts(u32 total_stream_count, u32 stereo_stream_count) {
+ return total_stream_count > 0 && stereo_stream_count > 0 &&
+ stereo_stream_count <= total_stream_count && IsValidChannelCount(total_stream_count);
+}
+} // namespace
+
+u32 OpusMultiStreamDecodeObject::GetWorkBufferSize(u32 total_stream_count,
+ u32 stereo_stream_count) {
+ if (IsValidStreamCounts(total_stream_count, stereo_stream_count)) {
+ return static_cast<u32>(sizeof(OpusMultiStreamDecodeObject)) +
+ opus_multistream_decoder_get_size(total_stream_count, stereo_stream_count);
+ }
+ return 0;
+}
+
+OpusMultiStreamDecodeObject& OpusMultiStreamDecodeObject::Initialize(u64 buffer, u64 buffer2) {
+ auto* new_decoder = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer);
+ auto* comparison = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer2);
+
+ if (new_decoder->magic == DecodeMultiStreamObjectMagic) {
+ if (!new_decoder->initialized ||
+ (new_decoder->initialized && new_decoder->self == comparison)) {
+ new_decoder->state_valid = true;
+ }
+ } else {
+ new_decoder->initialized = false;
+ new_decoder->state_valid = true;
+ }
+ return *new_decoder;
+}
+
+s32 OpusMultiStreamDecodeObject::InitializeDecoder(u32 sample_rate, u32 total_stream_count,
+ u32 channel_count, u32 stereo_stream_count,
+ u8* mappings) {
+ if (!state_valid) {
+ return OPUS_INVALID_STATE;
+ }
+
+ if (initialized) {
+ return OPUS_OK;
+ }
+
+ // See OpusDecodeObject::InitializeDecoder for an explanation of this
+ decoder = (LibOpusMSDecoder*)(this + 1);
+ s32 ret = opus_multistream_decoder_init(decoder, sample_rate, channel_count, total_stream_count,
+ stereo_stream_count, mappings);
+ if (ret == OPUS_OK) {
+ magic = DecodeMultiStreamObjectMagic;
+ initialized = true;
+ state_valid = true;
+ self = this;
+ final_range = 0;
+ }
+ return ret;
+}
+
+s32 OpusMultiStreamDecodeObject::Shutdown() {
+ if (!state_valid) {
+ return OPUS_INVALID_STATE;
+ }
+
+ if (initialized) {
+ magic = 0x0;
+ initialized = false;
+ state_valid = false;
+ self = nullptr;
+ final_range = 0;
+ decoder = nullptr;
+ }
+ return OPUS_OK;
+}
+
+s32 OpusMultiStreamDecodeObject::ResetDecoder() {
+ return opus_multistream_decoder_ctl(decoder, OPUS_RESET_STATE);
+}
+
+s32 OpusMultiStreamDecodeObject::Decode(u32& out_sample_count, u64 output_data,
+ u64 output_data_size, u64 input_data, u64 input_data_size) {
+ ASSERT(initialized);
+ out_sample_count = 0;
+
+ if (!state_valid) {
+ return OPUS_INVALID_STATE;
+ }
+
+ auto ret_code_or_samples = opus_multistream_decode(
+ decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size),
+ reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0);
+
+ if (ret_code_or_samples < OPUS_OK) {
+ return ret_code_or_samples;
+ }
+
+ out_sample_count = ret_code_or_samples;
+ return opus_multistream_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range);
+}
+
+} // namespace AudioCore::ADSP::OpusDecoder
diff --git a/src/audio_core/opus/decoder.cpp b/src/audio_core/opus/decoder.cpp
index 5b23fce14..c6fd45f47 100644
--- a/src/audio_core/opus/decoder.cpp
+++ b/src/audio_core/opus/decoder.cpp
@@ -1,179 +1,179 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "audio_core/opus/decoder.h"
-#include "audio_core/opus/hardware_opus.h"
-#include "audio_core/opus/parameters.h"
-#include "common/alignment.h"
-#include "common/swap.h"
-#include "core/core.h"
-
-namespace AudioCore::OpusDecoder {
-using namespace Service::Audio;
-namespace {
-OpusPacketHeader ReverseHeader(OpusPacketHeader header) {
- OpusPacketHeader out;
- out.size = Common::swap32(header.size);
- out.final_range = Common::swap32(header.final_range);
- return out;
-}
-} // namespace
-
-OpusDecoder::OpusDecoder(Core::System& system_, HardwareOpus& hardware_opus_)
- : system{system_}, hardware_opus{hardware_opus_} {}
-
-OpusDecoder::~OpusDecoder() {
- if (decode_object_initialized) {
- hardware_opus.ShutdownDecodeObject(shared_buffer.get(), shared_buffer_size);
- }
-}
-
-Result OpusDecoder::Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory,
- u64 transfer_memory_size) {
- auto frame_size{params.use_large_frame_size ? 5760 : 1920};
- shared_buffer_size = transfer_memory_size;
- shared_buffer = std::make_unique<u8[]>(shared_buffer_size);
- shared_memory_mapped = true;
-
- buffer_size =
- Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16);
-
- out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size};
- size_t in_data_size{0x600u};
- in_data = {out_data.data() - in_data_size, in_data_size};
-
- ON_RESULT_FAILURE {
- if (shared_memory_mapped) {
- shared_memory_mapped = false;
- ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size)));
- }
- };
-
- R_TRY(hardware_opus.InitializeDecodeObject(params.sample_rate, params.channel_count,
- shared_buffer.get(), shared_buffer_size));
-
- sample_rate = params.sample_rate;
- channel_count = params.channel_count;
- use_large_frame_size = params.use_large_frame_size;
- decode_object_initialized = true;
- R_SUCCEED();
-}
-
-Result OpusDecoder::Initialize(OpusMultiStreamParametersEx& params,
- Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size) {
- auto frame_size{params.use_large_frame_size ? 5760 : 1920};
- shared_buffer_size = transfer_memory_size;
- shared_buffer = std::make_unique<u8[]>(shared_buffer_size);
- shared_memory_mapped = true;
-
- buffer_size =
- Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16);
-
- out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size};
- size_t in_data_size{Common::AlignUp(1500ull * params.total_stream_count, 64u)};
- in_data = {out_data.data() - in_data_size, in_data_size};
-
- ON_RESULT_FAILURE {
- if (shared_memory_mapped) {
- shared_memory_mapped = false;
- ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size)));
- }
- };
-
- R_TRY(hardware_opus.InitializeMultiStreamDecodeObject(
- params.sample_rate, params.channel_count, params.total_stream_count,
- params.stereo_stream_count, params.mappings.data(), shared_buffer.get(),
- shared_buffer_size));
-
- sample_rate = params.sample_rate;
- channel_count = params.channel_count;
- total_stream_count = params.total_stream_count;
- stereo_stream_count = params.stereo_stream_count;
- use_large_frame_size = params.use_large_frame_size;
- decode_object_initialized = true;
- R_SUCCEED();
-}
-
-Result OpusDecoder::DecodeInterleaved(u32* out_data_size, u64* out_time_taken,
- u32* out_sample_count, std::span<const u8> input_data,
- std::span<u8> output_data, bool reset) {
- u32 out_samples;
- u64 time_taken{};
-
- R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall);
-
- auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())};
- OpusPacketHeader header{ReverseHeader(*header_p)};
-
- R_UNLESS(in_data.size_bytes() >= header.size &&
- header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(),
- ResultBufferTooSmall);
-
- if (!shared_memory_mapped) {
- R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size));
- shared_memory_mapped = true;
- }
-
- std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size);
-
- R_TRY(hardware_opus.DecodeInterleaved(out_samples, out_data.data(), out_data.size_bytes(),
- channel_count, in_data.data(), header.size,
- shared_buffer.get(), time_taken, reset));
-
- std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16));
-
- *out_data_size = header.size + sizeof(OpusPacketHeader);
- *out_sample_count = out_samples;
- if (out_time_taken) {
- *out_time_taken = time_taken / 1000;
- }
- R_SUCCEED();
-}
-
-Result OpusDecoder::SetContext([[maybe_unused]] std::span<const u8> context) {
- R_SUCCEED_IF(shared_memory_mapped);
- shared_memory_mapped = true;
- R_RETURN(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size));
-}
-
-Result OpusDecoder::DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken,
- u32* out_sample_count,
- std::span<const u8> input_data,
- std::span<u8> output_data, bool reset) {
- u32 out_samples;
- u64 time_taken{};
-
- R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall);
-
- auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())};
- OpusPacketHeader header{ReverseHeader(*header_p)};
-
- LOG_ERROR(Service_Audio, "header size 0x{:X} input data size 0x{:X} in_data size 0x{:X}",
- header.size, input_data.size_bytes(), in_data.size_bytes());
-
- R_UNLESS(in_data.size_bytes() >= header.size &&
- header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(),
- ResultBufferTooSmall);
-
- if (!shared_memory_mapped) {
- R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size));
- shared_memory_mapped = true;
- }
-
- std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size);
-
- R_TRY(hardware_opus.DecodeInterleavedForMultiStream(
- out_samples, out_data.data(), out_data.size_bytes(), channel_count, in_data.data(),
- header.size, shared_buffer.get(), time_taken, reset));
-
- std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16));
-
- *out_data_size = header.size + sizeof(OpusPacketHeader);
- *out_sample_count = out_samples;
- if (out_time_taken) {
- *out_time_taken = time_taken / 1000;
- }
- R_SUCCEED();
-}
-
-} // namespace AudioCore::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "audio_core/opus/decoder.h"
+#include "audio_core/opus/hardware_opus.h"
+#include "audio_core/opus/parameters.h"
+#include "common/alignment.h"
+#include "common/swap.h"
+#include "core/core.h"
+
+namespace AudioCore::OpusDecoder {
+using namespace Service::Audio;
+namespace {
+OpusPacketHeader ReverseHeader(OpusPacketHeader header) {
+ OpusPacketHeader out;
+ out.size = Common::swap32(header.size);
+ out.final_range = Common::swap32(header.final_range);
+ return out;
+}
+} // namespace
+
+OpusDecoder::OpusDecoder(Core::System& system_, HardwareOpus& hardware_opus_)
+ : system{system_}, hardware_opus{hardware_opus_} {}
+
+OpusDecoder::~OpusDecoder() {
+ if (decode_object_initialized) {
+ hardware_opus.ShutdownDecodeObject(shared_buffer.get(), shared_buffer_size);
+ }
+}
+
+Result OpusDecoder::Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory,
+ u64 transfer_memory_size) {
+ auto frame_size{params.use_large_frame_size ? 5760 : 1920};
+ shared_buffer_size = transfer_memory_size;
+ shared_buffer = std::make_unique<u8[]>(shared_buffer_size);
+ shared_memory_mapped = true;
+
+ buffer_size =
+ Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16);
+
+ out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size};
+ size_t in_data_size{0x600u};
+ in_data = {out_data.data() - in_data_size, in_data_size};
+
+ ON_RESULT_FAILURE {
+ if (shared_memory_mapped) {
+ shared_memory_mapped = false;
+ ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size)));
+ }
+ };
+
+ R_TRY(hardware_opus.InitializeDecodeObject(params.sample_rate, params.channel_count,
+ shared_buffer.get(), shared_buffer_size));
+
+ sample_rate = params.sample_rate;
+ channel_count = params.channel_count;
+ use_large_frame_size = params.use_large_frame_size;
+ decode_object_initialized = true;
+ R_SUCCEED();
+}
+
+Result OpusDecoder::Initialize(OpusMultiStreamParametersEx& params,
+ Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size) {
+ auto frame_size{params.use_large_frame_size ? 5760 : 1920};
+ shared_buffer_size = transfer_memory_size;
+ shared_buffer = std::make_unique<u8[]>(shared_buffer_size);
+ shared_memory_mapped = true;
+
+ buffer_size =
+ Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16);
+
+ out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size};
+ size_t in_data_size{Common::AlignUp(1500ull * params.total_stream_count, 64u)};
+ in_data = {out_data.data() - in_data_size, in_data_size};
+
+ ON_RESULT_FAILURE {
+ if (shared_memory_mapped) {
+ shared_memory_mapped = false;
+ ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size)));
+ }
+ };
+
+ R_TRY(hardware_opus.InitializeMultiStreamDecodeObject(
+ params.sample_rate, params.channel_count, params.total_stream_count,
+ params.stereo_stream_count, params.mappings.data(), shared_buffer.get(),
+ shared_buffer_size));
+
+ sample_rate = params.sample_rate;
+ channel_count = params.channel_count;
+ total_stream_count = params.total_stream_count;
+ stereo_stream_count = params.stereo_stream_count;
+ use_large_frame_size = params.use_large_frame_size;
+ decode_object_initialized = true;
+ R_SUCCEED();
+}
+
+Result OpusDecoder::DecodeInterleaved(u32* out_data_size, u64* out_time_taken,
+ u32* out_sample_count, std::span<const u8> input_data,
+ std::span<u8> output_data, bool reset) {
+ u32 out_samples;
+ u64 time_taken{};
+
+ R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall);
+
+ auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())};
+ OpusPacketHeader header{ReverseHeader(*header_p)};
+
+ R_UNLESS(in_data.size_bytes() >= header.size &&
+ header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(),
+ ResultBufferTooSmall);
+
+ if (!shared_memory_mapped) {
+ R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size));
+ shared_memory_mapped = true;
+ }
+
+ std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size);
+
+ R_TRY(hardware_opus.DecodeInterleaved(out_samples, out_data.data(), out_data.size_bytes(),
+ channel_count, in_data.data(), header.size,
+ shared_buffer.get(), time_taken, reset));
+
+ std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16));
+
+ *out_data_size = header.size + sizeof(OpusPacketHeader);
+ *out_sample_count = out_samples;
+ if (out_time_taken) {
+ *out_time_taken = time_taken / 1000;
+ }
+ R_SUCCEED();
+}
+
+Result OpusDecoder::SetContext([[maybe_unused]] std::span<const u8> context) {
+ R_SUCCEED_IF(shared_memory_mapped);
+ shared_memory_mapped = true;
+ R_RETURN(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size));
+}
+
+Result OpusDecoder::DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken,
+ u32* out_sample_count,
+ std::span<const u8> input_data,
+ std::span<u8> output_data, bool reset) {
+ u32 out_samples;
+ u64 time_taken{};
+
+ R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall);
+
+ auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())};
+ OpusPacketHeader header{ReverseHeader(*header_p)};
+
+ LOG_ERROR(Service_Audio, "header size 0x{:X} input data size 0x{:X} in_data size 0x{:X}",
+ header.size, input_data.size_bytes(), in_data.size_bytes());
+
+ R_UNLESS(in_data.size_bytes() >= header.size &&
+ header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(),
+ ResultBufferTooSmall);
+
+ if (!shared_memory_mapped) {
+ R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size));
+ shared_memory_mapped = true;
+ }
+
+ std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size);
+
+ R_TRY(hardware_opus.DecodeInterleavedForMultiStream(
+ out_samples, out_data.data(), out_data.size_bytes(), channel_count, in_data.data(),
+ header.size, shared_buffer.get(), time_taken, reset));
+
+ std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16));
+
+ *out_data_size = header.size + sizeof(OpusPacketHeader);
+ *out_sample_count = out_samples;
+ if (out_time_taken) {
+ *out_time_taken = time_taken / 1000;
+ }
+ R_SUCCEED();
+}
+
+} // namespace AudioCore::OpusDecoder
diff --git a/src/audio_core/opus/decoder.h b/src/audio_core/opus/decoder.h
index d08d8a4a4..fd728958a 100644
--- a/src/audio_core/opus/decoder.h
+++ b/src/audio_core/opus/decoder.h
@@ -1,53 +1,53 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <span>
-
-#include "audio_core/opus/parameters.h"
-#include "common/common_types.h"
-#include "core/hle/kernel/k_transfer_memory.h"
-#include "core/hle/service/audio/errors.h"
-
-namespace Core {
-class System;
-}
-
-namespace AudioCore::OpusDecoder {
-class HardwareOpus;
-
-class OpusDecoder {
-public:
- explicit OpusDecoder(Core::System& system, HardwareOpus& hardware_opus_);
- ~OpusDecoder();
-
- Result Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory,
- u64 transfer_memory_size);
- Result Initialize(OpusMultiStreamParametersEx& params, Kernel::KTransferMemory* transfer_memory,
- u64 transfer_memory_size);
- Result DecodeInterleaved(u32* out_data_size, u64* out_time_taken, u32* out_sample_count,
- std::span<const u8> input_data, std::span<u8> output_data, bool reset);
- Result SetContext([[maybe_unused]] std::span<const u8> context);
- Result DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken,
- u32* out_sample_count, std::span<const u8> input_data,
- std::span<u8> output_data, bool reset);
-
-private:
- Core::System& system;
- HardwareOpus& hardware_opus;
- std::unique_ptr<u8[]> shared_buffer{};
- u64 shared_buffer_size;
- std::span<u8> in_data{};
- std::span<u8> out_data{};
- u64 buffer_size{};
- s32 sample_rate{};
- s32 channel_count{};
- bool use_large_frame_size{false};
- s32 total_stream_count{};
- s32 stereo_stream_count{};
- bool shared_memory_mapped{false};
- bool decode_object_initialized{false};
-};
-
-} // namespace AudioCore::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <span>
+
+#include "audio_core/opus/parameters.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_transfer_memory.h"
+#include "core/hle/service/audio/errors.h"
+
+namespace Core {
+class System;
+}
+
+namespace AudioCore::OpusDecoder {
+class HardwareOpus;
+
+class OpusDecoder {
+public:
+ explicit OpusDecoder(Core::System& system, HardwareOpus& hardware_opus_);
+ ~OpusDecoder();
+
+ Result Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory,
+ u64 transfer_memory_size);
+ Result Initialize(OpusMultiStreamParametersEx& params, Kernel::KTransferMemory* transfer_memory,
+ u64 transfer_memory_size);
+ Result DecodeInterleaved(u32* out_data_size, u64* out_time_taken, u32* out_sample_count,
+ std::span<const u8> input_data, std::span<u8> output_data, bool reset);
+ Result SetContext([[maybe_unused]] std::span<const u8> context);
+ Result DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken,
+ u32* out_sample_count, std::span<const u8> input_data,
+ std::span<u8> output_data, bool reset);
+
+private:
+ Core::System& system;
+ HardwareOpus& hardware_opus;
+ std::unique_ptr<u8[]> shared_buffer{};
+ u64 shared_buffer_size;
+ std::span<u8> in_data{};
+ std::span<u8> out_data{};
+ u64 buffer_size{};
+ s32 sample_rate{};
+ s32 channel_count{};
+ bool use_large_frame_size{false};
+ s32 total_stream_count{};
+ s32 stereo_stream_count{};
+ bool shared_memory_mapped{false};
+ bool decode_object_initialized{false};
+};
+
+} // namespace AudioCore::OpusDecoder
diff --git a/src/audio_core/opus/decoder_manager.cpp b/src/audio_core/opus/decoder_manager.cpp
index fdeccdf50..1464880a1 100644
--- a/src/audio_core/opus/decoder_manager.cpp
+++ b/src/audio_core/opus/decoder_manager.cpp
@@ -1,102 +1,102 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "audio_core/adsp/apps/opus/opus_decoder.h"
-#include "audio_core/opus/decoder_manager.h"
-#include "common/alignment.h"
-#include "core/core.h"
-
-namespace AudioCore::OpusDecoder {
-using namespace Service::Audio;
-
-namespace {
-bool IsValidChannelCount(u32 channel_count) {
- return channel_count == 1 || channel_count == 2;
-}
-
-bool IsValidMultiStreamChannelCount(u32 channel_count) {
- return channel_count > 0 && channel_count <= OpusStreamCountMax;
-}
-
-bool IsValidSampleRate(u32 sample_rate) {
- return sample_rate == 8'000 || sample_rate == 12'000 || sample_rate == 16'000 ||
- sample_rate == 24'000 || sample_rate == 48'000;
-}
-
-bool IsValidStreamCount(u32 channel_count, u32 total_stream_count, u32 stereo_stream_count) {
- return total_stream_count > 0 && static_cast<s32>(stereo_stream_count) >= 0 &&
- stereo_stream_count <= total_stream_count &&
- total_stream_count + stereo_stream_count <= channel_count;
-}
-
-} // namespace
-
-OpusDecoderManager::OpusDecoderManager(Core::System& system_)
- : system{system_}, hardware_opus{system} {
- for (u32 i = 0; i < MaxChannels; i++) {
- required_workbuffer_sizes[i] = hardware_opus.GetWorkBufferSize(1 + i);
- }
-}
-
-Result OpusDecoderManager::GetWorkBufferSize(OpusParameters& params, u64& out_size) {
- OpusParametersEx ex{
- .sample_rate = params.sample_rate,
- .channel_count = params.channel_count,
- .use_large_frame_size = false,
- };
- R_RETURN(GetWorkBufferSizeExEx(ex, out_size));
-}
-
-Result OpusDecoderManager::GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size) {
- R_RETURN(GetWorkBufferSizeExEx(params, out_size));
-}
-
-Result OpusDecoderManager::GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size) {
- R_UNLESS(IsValidChannelCount(params.channel_count), ResultInvalidOpusChannelCount);
- R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate);
-
- auto work_buffer_size{required_workbuffer_sizes[params.channel_count - 1]};
- auto frame_size{params.use_large_frame_size ? 5760 : 1920};
- work_buffer_size +=
- Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64);
- out_size = work_buffer_size + 0x600;
- R_SUCCEED();
-}
-
-Result OpusDecoderManager::GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params,
- u64& out_size) {
- OpusMultiStreamParametersEx ex{
- .sample_rate = params.sample_rate,
- .channel_count = params.channel_count,
- .total_stream_count = params.total_stream_count,
- .stereo_stream_count = params.stereo_stream_count,
- .use_large_frame_size = false,
- .mappings = {},
- };
- R_RETURN(GetWorkBufferSizeForMultiStreamExEx(ex, out_size));
-}
-
-Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params,
- u64& out_size) {
- R_RETURN(GetWorkBufferSizeForMultiStreamExEx(params, out_size));
-}
-
-Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params,
- u64& out_size) {
- R_UNLESS(IsValidMultiStreamChannelCount(params.channel_count), ResultInvalidOpusChannelCount);
- R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate);
- R_UNLESS(IsValidStreamCount(params.channel_count, params.total_stream_count,
- params.stereo_stream_count),
- ResultInvalidOpusSampleRate);
-
- auto work_buffer_size{hardware_opus.GetWorkBufferSizeForMultiStream(
- params.total_stream_count, params.stereo_stream_count)};
- auto frame_size{params.use_large_frame_size ? 5760 : 1920};
- work_buffer_size += Common::AlignUp(1500 * params.total_stream_count, 64);
- work_buffer_size +=
- Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64);
- out_size = work_buffer_size;
- R_SUCCEED();
-}
-
-} // namespace AudioCore::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "audio_core/adsp/apps/opus/opus_decoder.h"
+#include "audio_core/opus/decoder_manager.h"
+#include "common/alignment.h"
+#include "core/core.h"
+
+namespace AudioCore::OpusDecoder {
+using namespace Service::Audio;
+
+namespace {
+bool IsValidChannelCount(u32 channel_count) {
+ return channel_count == 1 || channel_count == 2;
+}
+
+bool IsValidMultiStreamChannelCount(u32 channel_count) {
+ return channel_count > 0 && channel_count <= OpusStreamCountMax;
+}
+
+bool IsValidSampleRate(u32 sample_rate) {
+ return sample_rate == 8'000 || sample_rate == 12'000 || sample_rate == 16'000 ||
+ sample_rate == 24'000 || sample_rate == 48'000;
+}
+
+bool IsValidStreamCount(u32 channel_count, u32 total_stream_count, u32 stereo_stream_count) {
+ return total_stream_count > 0 && static_cast<s32>(stereo_stream_count) >= 0 &&
+ stereo_stream_count <= total_stream_count &&
+ total_stream_count + stereo_stream_count <= channel_count;
+}
+
+} // namespace
+
+OpusDecoderManager::OpusDecoderManager(Core::System& system_)
+ : system{system_}, hardware_opus{system} {
+ for (u32 i = 0; i < MaxChannels; i++) {
+ required_workbuffer_sizes[i] = hardware_opus.GetWorkBufferSize(1 + i);
+ }
+}
+
+Result OpusDecoderManager::GetWorkBufferSize(OpusParameters& params, u64& out_size) {
+ OpusParametersEx ex{
+ .sample_rate = params.sample_rate,
+ .channel_count = params.channel_count,
+ .use_large_frame_size = false,
+ };
+ R_RETURN(GetWorkBufferSizeExEx(ex, out_size));
+}
+
+Result OpusDecoderManager::GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size) {
+ R_RETURN(GetWorkBufferSizeExEx(params, out_size));
+}
+
+Result OpusDecoderManager::GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size) {
+ R_UNLESS(IsValidChannelCount(params.channel_count), ResultInvalidOpusChannelCount);
+ R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate);
+
+ auto work_buffer_size{required_workbuffer_sizes[params.channel_count - 1]};
+ auto frame_size{params.use_large_frame_size ? 5760 : 1920};
+ work_buffer_size +=
+ Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64);
+ out_size = work_buffer_size + 0x600;
+ R_SUCCEED();
+}
+
+Result OpusDecoderManager::GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params,
+ u64& out_size) {
+ OpusMultiStreamParametersEx ex{
+ .sample_rate = params.sample_rate,
+ .channel_count = params.channel_count,
+ .total_stream_count = params.total_stream_count,
+ .stereo_stream_count = params.stereo_stream_count,
+ .use_large_frame_size = false,
+ .mappings = {},
+ };
+ R_RETURN(GetWorkBufferSizeForMultiStreamExEx(ex, out_size));
+}
+
+Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params,
+ u64& out_size) {
+ R_RETURN(GetWorkBufferSizeForMultiStreamExEx(params, out_size));
+}
+
+Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params,
+ u64& out_size) {
+ R_UNLESS(IsValidMultiStreamChannelCount(params.channel_count), ResultInvalidOpusChannelCount);
+ R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate);
+ R_UNLESS(IsValidStreamCount(params.channel_count, params.total_stream_count,
+ params.stereo_stream_count),
+ ResultInvalidOpusSampleRate);
+
+ auto work_buffer_size{hardware_opus.GetWorkBufferSizeForMultiStream(
+ params.total_stream_count, params.stereo_stream_count)};
+ auto frame_size{params.use_large_frame_size ? 5760 : 1920};
+ work_buffer_size += Common::AlignUp(1500 * params.total_stream_count, 64);
+ work_buffer_size +=
+ Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64);
+ out_size = work_buffer_size;
+ R_SUCCEED();
+}
+
+} // namespace AudioCore::OpusDecoder
diff --git a/src/audio_core/opus/decoder_manager.h b/src/audio_core/opus/decoder_manager.h
index 466e1967b..70ebc4bab 100644
--- a/src/audio_core/opus/decoder_manager.h
+++ b/src/audio_core/opus/decoder_manager.h
@@ -1,38 +1,38 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include "audio_core/opus/hardware_opus.h"
-#include "audio_core/opus/parameters.h"
-#include "common/common_types.h"
-#include "core/hle/service/audio/errors.h"
-
-namespace Core {
-class System;
-}
-
-namespace AudioCore::OpusDecoder {
-
-class OpusDecoderManager {
-public:
- OpusDecoderManager(Core::System& system);
-
- HardwareOpus& GetHardwareOpus() {
- return hardware_opus;
- }
-
- Result GetWorkBufferSize(OpusParameters& params, u64& out_size);
- Result GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size);
- Result GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size);
- Result GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params, u64& out_size);
- Result GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params, u64& out_size);
- Result GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params, u64& out_size);
-
-private:
- Core::System& system;
- HardwareOpus hardware_opus;
- std::array<u64, MaxChannels> required_workbuffer_sizes{};
-};
-
-} // namespace AudioCore::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "audio_core/opus/hardware_opus.h"
+#include "audio_core/opus/parameters.h"
+#include "common/common_types.h"
+#include "core/hle/service/audio/errors.h"
+
+namespace Core {
+class System;
+}
+
+namespace AudioCore::OpusDecoder {
+
+class OpusDecoderManager {
+public:
+ OpusDecoderManager(Core::System& system);
+
+ HardwareOpus& GetHardwareOpus() {
+ return hardware_opus;
+ }
+
+ Result GetWorkBufferSize(OpusParameters& params, u64& out_size);
+ Result GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size);
+ Result GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size);
+ Result GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params, u64& out_size);
+ Result GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params, u64& out_size);
+ Result GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params, u64& out_size);
+
+private:
+ Core::System& system;
+ HardwareOpus hardware_opus;
+ std::array<u64, MaxChannels> required_workbuffer_sizes{};
+};
+
+} // namespace AudioCore::OpusDecoder
diff --git a/src/audio_core/opus/hardware_opus.cpp b/src/audio_core/opus/hardware_opus.cpp
index d6544dcb0..5ff71ab2d 100644
--- a/src/audio_core/opus/hardware_opus.cpp
+++ b/src/audio_core/opus/hardware_opus.cpp
@@ -1,241 +1,241 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <array>
-
-#include "audio_core/audio_core.h"
-#include "audio_core/opus/hardware_opus.h"
-#include "core/core.h"
-
-namespace AudioCore::OpusDecoder {
-namespace {
-using namespace Service::Audio;
-
-static constexpr Result ResultCodeFromLibOpusErrorCode(u64 error_code) {
- s32 error{static_cast<s32>(error_code)};
- ASSERT(error <= OPUS_OK);
- switch (error) {
- case OPUS_ALLOC_FAIL:
- R_THROW(ResultLibOpusAllocFail);
- case OPUS_INVALID_STATE:
- R_THROW(ResultLibOpusInvalidState);
- case OPUS_UNIMPLEMENTED:
- R_THROW(ResultLibOpusUnimplemented);
- case OPUS_INVALID_PACKET:
- R_THROW(ResultLibOpusInvalidPacket);
- case OPUS_INTERNAL_ERROR:
- R_THROW(ResultLibOpusInternalError);
- case OPUS_BUFFER_TOO_SMALL:
- R_THROW(ResultBufferTooSmall);
- case OPUS_BAD_ARG:
- R_THROW(ResultLibOpusBadArg);
- case OPUS_OK:
- R_RETURN(ResultSuccess);
- }
- UNREACHABLE();
-}
-
-} // namespace
-
-HardwareOpus::HardwareOpus(Core::System& system_)
- : system{system_}, opus_decoder{system.AudioCore().ADSP().OpusDecoder()} {
- opus_decoder.SetSharedMemory(shared_memory);
-}
-
-u64 HardwareOpus::GetWorkBufferSize(u32 channel) {
- if (!opus_decoder.IsRunning()) {
- return 0;
- }
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = channel;
- opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::GetWorkBufferSize);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::GetWorkBufferSizeOK, msg);
- return 0;
- }
- return shared_memory.dsp_return_data[0];
-}
-
-u64 HardwareOpus::GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = total_stream_count;
- shared_memory.host_send_data[1] = stereo_stream_count;
- opus_decoder.Send(ADSP::Direction::DSP,
- ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStream);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK, msg);
- return 0;
- }
- return shared_memory.dsp_return_data[0];
-}
-
-Result HardwareOpus::InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer,
- u64 buffer_size) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = buffer_size;
- shared_memory.host_send_data[2] = sample_rate;
- shared_memory.host_send_data[3] = channel_count;
-
- opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::InitializeDecodeObject);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::InitializeDecodeObjectOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::InitializeDecodeObjectOK, msg);
- R_THROW(ResultInvalidOpusDSPReturnCode);
- }
-
- R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
-}
-
-Result HardwareOpus::InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count,
- u32 total_stream_count,
- u32 stereo_stream_count, void* mappings,
- void* buffer, u64 buffer_size) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = buffer_size;
- shared_memory.host_send_data[2] = sample_rate;
- shared_memory.host_send_data[3] = channel_count;
- shared_memory.host_send_data[4] = total_stream_count;
- shared_memory.host_send_data[5] = stereo_stream_count;
-
- ASSERT(channel_count <= MaxChannels);
- std::memcpy(shared_memory.channel_mapping.data(), mappings, channel_count * sizeof(u8));
-
- opus_decoder.Send(ADSP::Direction::DSP,
- ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObject);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK, msg);
- R_THROW(ResultInvalidOpusDSPReturnCode);
- }
-
- R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
-}
-
-Result HardwareOpus::ShutdownDecodeObject(void* buffer, u64 buffer_size) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = buffer_size;
-
- opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::ShutdownDecodeObject);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK,
- "Expected Opus shutdown code {}, got {}",
- ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK, msg);
-
- R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
-}
-
-Result HardwareOpus::ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = buffer_size;
-
- opus_decoder.Send(ADSP::Direction::DSP,
- ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObject);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK,
- "Expected Opus shutdown code {}, got {}",
- ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK, msg);
-
- R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
-}
-
-Result HardwareOpus::DecodeInterleaved(u32& out_sample_count, void* output_data,
- u64 output_data_size, u32 channel_count, void* input_data,
- u64 input_data_size, void* buffer, u64& out_time_taken,
- bool reset) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = (u64)input_data;
- shared_memory.host_send_data[2] = input_data_size;
- shared_memory.host_send_data[3] = (u64)output_data;
- shared_memory.host_send_data[4] = output_data_size;
- shared_memory.host_send_data[5] = 0;
- shared_memory.host_send_data[6] = reset;
-
- opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::DecodeInterleaved);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::DecodeInterleavedOK, msg);
- R_THROW(ResultInvalidOpusDSPReturnCode);
- }
-
- auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])};
- if (error_code == OPUS_OK) {
- out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]);
- out_time_taken = 1000 * shared_memory.dsp_return_data[2];
- }
- R_RETURN(ResultCodeFromLibOpusErrorCode(error_code));
-}
-
-Result HardwareOpus::DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data,
- u64 output_data_size, u32 channel_count,
- void* input_data, u64 input_data_size,
- void* buffer, u64& out_time_taken,
- bool reset) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = (u64)input_data;
- shared_memory.host_send_data[2] = input_data_size;
- shared_memory.host_send_data[3] = (u64)output_data;
- shared_memory.host_send_data[4] = output_data_size;
- shared_memory.host_send_data[5] = 0;
- shared_memory.host_send_data[6] = reset;
-
- opus_decoder.Send(ADSP::Direction::DSP,
- ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStream);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK, msg);
- R_THROW(ResultInvalidOpusDSPReturnCode);
- }
-
- auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])};
- if (error_code == OPUS_OK) {
- out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]);
- out_time_taken = 1000 * shared_memory.dsp_return_data[2];
- }
- R_RETURN(ResultCodeFromLibOpusErrorCode(error_code));
-}
-
-Result HardwareOpus::MapMemory(void* buffer, u64 buffer_size) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = buffer_size;
-
- opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::MapMemory);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::MapMemoryOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::MapMemoryOK, msg);
- R_THROW(ResultInvalidOpusDSPReturnCode);
- }
- R_SUCCEED();
-}
-
-Result HardwareOpus::UnmapMemory(void* buffer, u64 buffer_size) {
- std::scoped_lock l{mutex};
- shared_memory.host_send_data[0] = (u64)buffer;
- shared_memory.host_send_data[1] = buffer_size;
-
- opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::UnmapMemory);
- auto msg = opus_decoder.Receive(ADSP::Direction::Host);
- if (msg != ADSP::OpusDecoder::Message::UnmapMemoryOK) {
- LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
- ADSP::OpusDecoder::Message::UnmapMemoryOK, msg);
- R_THROW(ResultInvalidOpusDSPReturnCode);
- }
- R_SUCCEED();
-}
-
-} // namespace AudioCore::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <array>
+
+#include "audio_core/audio_core.h"
+#include "audio_core/opus/hardware_opus.h"
+#include "core/core.h"
+
+namespace AudioCore::OpusDecoder {
+namespace {
+using namespace Service::Audio;
+
+static constexpr Result ResultCodeFromLibOpusErrorCode(u64 error_code) {
+ s32 error{static_cast<s32>(error_code)};
+ ASSERT(error <= OPUS_OK);
+ switch (error) {
+ case OPUS_ALLOC_FAIL:
+ R_THROW(ResultLibOpusAllocFail);
+ case OPUS_INVALID_STATE:
+ R_THROW(ResultLibOpusInvalidState);
+ case OPUS_UNIMPLEMENTED:
+ R_THROW(ResultLibOpusUnimplemented);
+ case OPUS_INVALID_PACKET:
+ R_THROW(ResultLibOpusInvalidPacket);
+ case OPUS_INTERNAL_ERROR:
+ R_THROW(ResultLibOpusInternalError);
+ case OPUS_BUFFER_TOO_SMALL:
+ R_THROW(ResultBufferTooSmall);
+ case OPUS_BAD_ARG:
+ R_THROW(ResultLibOpusBadArg);
+ case OPUS_OK:
+ R_RETURN(ResultSuccess);
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+HardwareOpus::HardwareOpus(Core::System& system_)
+ : system{system_}, opus_decoder{system.AudioCore().ADSP().OpusDecoder()} {
+ opus_decoder.SetSharedMemory(shared_memory);
+}
+
+u64 HardwareOpus::GetWorkBufferSize(u32 channel) {
+ if (!opus_decoder.IsRunning()) {
+ return 0;
+ }
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = channel;
+ opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::GetWorkBufferSize);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::GetWorkBufferSizeOK, msg);
+ return 0;
+ }
+ return shared_memory.dsp_return_data[0];
+}
+
+u64 HardwareOpus::GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = total_stream_count;
+ shared_memory.host_send_data[1] = stereo_stream_count;
+ opus_decoder.Send(ADSP::Direction::DSP,
+ ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStream);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK, msg);
+ return 0;
+ }
+ return shared_memory.dsp_return_data[0];
+}
+
+Result HardwareOpus::InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer,
+ u64 buffer_size) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = buffer_size;
+ shared_memory.host_send_data[2] = sample_rate;
+ shared_memory.host_send_data[3] = channel_count;
+
+ opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::InitializeDecodeObject);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::InitializeDecodeObjectOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::InitializeDecodeObjectOK, msg);
+ R_THROW(ResultInvalidOpusDSPReturnCode);
+ }
+
+ R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
+}
+
+Result HardwareOpus::InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count,
+ u32 total_stream_count,
+ u32 stereo_stream_count, void* mappings,
+ void* buffer, u64 buffer_size) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = buffer_size;
+ shared_memory.host_send_data[2] = sample_rate;
+ shared_memory.host_send_data[3] = channel_count;
+ shared_memory.host_send_data[4] = total_stream_count;
+ shared_memory.host_send_data[5] = stereo_stream_count;
+
+ ASSERT(channel_count <= MaxChannels);
+ std::memcpy(shared_memory.channel_mapping.data(), mappings, channel_count * sizeof(u8));
+
+ opus_decoder.Send(ADSP::Direction::DSP,
+ ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObject);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK, msg);
+ R_THROW(ResultInvalidOpusDSPReturnCode);
+ }
+
+ R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
+}
+
+Result HardwareOpus::ShutdownDecodeObject(void* buffer, u64 buffer_size) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = buffer_size;
+
+ opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::ShutdownDecodeObject);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK,
+ "Expected Opus shutdown code {}, got {}",
+ ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK, msg);
+
+ R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
+}
+
+Result HardwareOpus::ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = buffer_size;
+
+ opus_decoder.Send(ADSP::Direction::DSP,
+ ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObject);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK,
+ "Expected Opus shutdown code {}, got {}",
+ ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK, msg);
+
+ R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0]));
+}
+
+Result HardwareOpus::DecodeInterleaved(u32& out_sample_count, void* output_data,
+ u64 output_data_size, u32 channel_count, void* input_data,
+ u64 input_data_size, void* buffer, u64& out_time_taken,
+ bool reset) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = (u64)input_data;
+ shared_memory.host_send_data[2] = input_data_size;
+ shared_memory.host_send_data[3] = (u64)output_data;
+ shared_memory.host_send_data[4] = output_data_size;
+ shared_memory.host_send_data[5] = 0;
+ shared_memory.host_send_data[6] = reset;
+
+ opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::DecodeInterleaved);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::DecodeInterleavedOK, msg);
+ R_THROW(ResultInvalidOpusDSPReturnCode);
+ }
+
+ auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])};
+ if (error_code == OPUS_OK) {
+ out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]);
+ out_time_taken = 1000 * shared_memory.dsp_return_data[2];
+ }
+ R_RETURN(ResultCodeFromLibOpusErrorCode(error_code));
+}
+
+Result HardwareOpus::DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data,
+ u64 output_data_size, u32 channel_count,
+ void* input_data, u64 input_data_size,
+ void* buffer, u64& out_time_taken,
+ bool reset) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = (u64)input_data;
+ shared_memory.host_send_data[2] = input_data_size;
+ shared_memory.host_send_data[3] = (u64)output_data;
+ shared_memory.host_send_data[4] = output_data_size;
+ shared_memory.host_send_data[5] = 0;
+ shared_memory.host_send_data[6] = reset;
+
+ opus_decoder.Send(ADSP::Direction::DSP,
+ ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStream);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK, msg);
+ R_THROW(ResultInvalidOpusDSPReturnCode);
+ }
+
+ auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])};
+ if (error_code == OPUS_OK) {
+ out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]);
+ out_time_taken = 1000 * shared_memory.dsp_return_data[2];
+ }
+ R_RETURN(ResultCodeFromLibOpusErrorCode(error_code));
+}
+
+Result HardwareOpus::MapMemory(void* buffer, u64 buffer_size) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = buffer_size;
+
+ opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::MapMemory);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::MapMemoryOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::MapMemoryOK, msg);
+ R_THROW(ResultInvalidOpusDSPReturnCode);
+ }
+ R_SUCCEED();
+}
+
+Result HardwareOpus::UnmapMemory(void* buffer, u64 buffer_size) {
+ std::scoped_lock l{mutex};
+ shared_memory.host_send_data[0] = (u64)buffer;
+ shared_memory.host_send_data[1] = buffer_size;
+
+ opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::UnmapMemory);
+ auto msg = opus_decoder.Receive(ADSP::Direction::Host);
+ if (msg != ADSP::OpusDecoder::Message::UnmapMemoryOK) {
+ LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}",
+ ADSP::OpusDecoder::Message::UnmapMemoryOK, msg);
+ R_THROW(ResultInvalidOpusDSPReturnCode);
+ }
+ R_SUCCEED();
+}
+
+} // namespace AudioCore::OpusDecoder
diff --git a/src/audio_core/opus/hardware_opus.h b/src/audio_core/opus/hardware_opus.h
index 7013a6b40..b10184baa 100644
--- a/src/audio_core/opus/hardware_opus.h
+++ b/src/audio_core/opus/hardware_opus.h
@@ -1,45 +1,45 @@
-// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <mutex>
-#include <opus.h>
-
-#include "audio_core/adsp/apps/opus/opus_decoder.h"
-#include "audio_core/adsp/apps/opus/shared_memory.h"
-#include "audio_core/adsp/mailbox.h"
-#include "core/hle/service/audio/errors.h"
-
-namespace AudioCore::OpusDecoder {
-class HardwareOpus {
-public:
- HardwareOpus(Core::System& system);
-
- u64 GetWorkBufferSize(u32 channel);
- u64 GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count);
-
- Result InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer,
- u64 buffer_size);
- Result InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count,
- u32 totaL_stream_count, u32 stereo_stream_count,
- void* mappings, void* buffer, u64 buffer_size);
- Result ShutdownDecodeObject(void* buffer, u64 buffer_size);
- Result ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size);
- Result DecodeInterleaved(u32& out_sample_count, void* output_data, u64 output_data_size,
- u32 channel_count, void* input_data, u64 input_data_size, void* buffer,
- u64& out_time_taken, bool reset);
- Result DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data,
- u64 output_data_size, u32 channel_count,
- void* input_data, u64 input_data_size, void* buffer,
- u64& out_time_taken, bool reset);
- Result MapMemory(void* buffer, u64 buffer_size);
- Result UnmapMemory(void* buffer, u64 buffer_size);
-
-private:
- Core::System& system;
- std::mutex mutex;
- ADSP::OpusDecoder::OpusDecoder& opus_decoder;
- ADSP::OpusDecoder::SharedMemory shared_memory;
-};
-} // namespace AudioCore::OpusDecoder
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <mutex>
+#include <opus.h>
+
+#include "audio_core/adsp/apps/opus/opus_decoder.h"
+#include "audio_core/adsp/apps/opus/shared_memory.h"
+#include "audio_core/adsp/mailbox.h"
+#include "core/hle/service/audio/errors.h"
+
+namespace AudioCore::OpusDecoder {
+class HardwareOpus {
+public:
+ HardwareOpus(Core::System& system);
+
+ u64 GetWorkBufferSize(u32 channel);
+ u64 GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count);
+
+ Result InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer,
+ u64 buffer_size);
+ Result InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count,
+ u32 totaL_stream_count, u32 stereo_stream_count,
+ void* mappings, void* buffer, u64 buffer_size);
+ Result ShutdownDecodeObject(void* buffer, u64 buffer_size);
+ Result ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size);
+ Result DecodeInterleaved(u32& out_sample_count, void* output_data, u64 output_data_size,
+ u32 channel_count, void* input_data, u64 input_data_size, void* buffer,
+ u64& out_time_taken, bool reset);
+ Result DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data,
+ u64 output_data_size, u32 channel_count,
+ void* input_data, u64 input_data_size, void* buffer,
+ u64& out_time_taken, bool reset);
+ Result MapMemory(void* buffer, u64 buffer_size);
+ Result UnmapMemory(void* buffer, u64 buffer_size);
+
+private:
+ Core::System& system;
+ std::mutex mutex;
+ ADSP::OpusDecoder::OpusDecoder& opus_decoder;
+ ADSP::OpusDecoder::SharedMemory shared_memory;
+};
+} // namespace AudioCore::OpusDecoder
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 4b1690269..166dc3dce 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -9,12 +9,12 @@ PageTable::PageTable() = default;
PageTable::~PageTable() noexcept = default;
-bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
- u64 address) const {
+bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
+ Common::ProcessAddress address) const {
// Setup invalid defaults.
- out_entry.phys_addr = 0;
- out_entry.block_size = page_size;
- out_context.next_page = 0;
+ out_entry->phys_addr = 0;
+ out_entry->block_size = page_size;
+ out_context->next_page = 0;
// Validate that we can read the actual entry.
const auto page = address / page_size;
@@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_
}
// Populate the results.
- out_entry.phys_addr = phys_addr + address;
- out_context.next_page = page + 1;
- out_context.next_offset = address + page_size;
+ out_entry->phys_addr = phys_addr + GetInteger(address);
+ out_context->next_page = page + 1;
+ out_context->next_offset = GetInteger(address) + page_size;
return true;
}
-bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
+bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
// Setup invalid defaults.
- out_entry.phys_addr = 0;
- out_entry.block_size = page_size;
+ out_entry->phys_addr = 0;
+ out_entry->block_size = page_size;
// Validate that we can read the actual entry.
- const auto page = context.next_page;
+ const auto page = context->next_page;
if (page >= backing_addr.size()) {
return false;
}
@@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c
}
// Populate the results.
- out_entry.phys_addr = phys_addr + context.next_offset;
- context.next_page = page + 1;
- context.next_offset += page_size;
+ out_entry->phys_addr = phys_addr + context->next_offset;
+ context->next_page = page + 1;
+ context->next_offset += page_size;
return true;
}
diff --git a/src/common/page_table.h b/src/common/page_table.h
index e653d52ad..5340f7d86 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -6,6 +6,7 @@
#include <atomic>
#include "common/common_types.h"
+#include "common/typed_address.h"
#include "common/virtual_buffer.h"
namespace Common {
@@ -100,9 +101,9 @@ struct PageTable {
PageTable(PageTable&&) noexcept = default;
PageTable& operator=(PageTable&&) noexcept = default;
- bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
- u64 address) const;
- bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
+ bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
+ Common::ProcessAddress address) const;
+ bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const;
/**
* Resizes the page table to be able to accommodate enough pages within
@@ -117,6 +118,16 @@ struct PageTable {
return current_address_space_width_in_bits;
}
+ bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr,
+ Common::ProcessAddress virt_addr) const {
+ if (virt_addr > (1ULL << this->GetAddressSpaceBits())) {
+ return false;
+ }
+
+ *out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr);
+ return true;
+ }
+
/**
* Vector of memory pointers backing each page. An entry can only be non-null if the
* corresponding attribute element is of type `Memory`.
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index e4f499135..8be3bdd08 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -271,8 +271,9 @@ add_library(core STATIC
hle/kernel/k_page_heap.h
hle/kernel/k_page_group.cpp
hle/kernel/k_page_group.h
- hle/kernel/k_page_table.cpp
hle/kernel/k_page_table.h
+ hle/kernel/k_page_table_base.cpp
+ hle/kernel/k_page_table_base.h
hle/kernel/k_page_table_manager.h
hle/kernel/k_page_table_slab_heap.h
hle/kernel/k_port.cpp
@@ -280,6 +281,7 @@ add_library(core STATIC
hle/kernel/k_priority_queue.h
hle/kernel/k_process.cpp
hle/kernel/k_process.h
+ hle/kernel/k_process_page_table.h
hle/kernel/k_readable_event.cpp
hle/kernel/k_readable_event.h
hle/kernel/k_resource_limit.cpp
@@ -330,8 +332,6 @@ add_library(core STATIC
hle/kernel/physical_core.cpp
hle/kernel/physical_core.h
hle/kernel/physical_memory.h
- hle/kernel/process_capability.cpp
- hle/kernel/process_capability.h
hle/kernel/slab_helpers.h
hle/kernel/svc.cpp
hle/kernel/svc.h
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 6f5f5156b..148dd3e39 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -562,6 +562,120 @@ static std::string PaginateBuffer(std::string_view buffer, std::string_view requ
}
}
+static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) {
+ Kernel::KMemoryInfo mem_info;
+ Kernel::Svc::MemoryInfo svc_mem_info;
+ Kernel::Svc::PageInfo page_info;
+ VAddr cur_addr{base};
+
+ // Expect: r-x Code (.text)
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ svc_mem_info = mem_info.GetSvcMemoryInfo();
+ cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+ if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
+ svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
+ return cur_addr - 1;
+ }
+
+ // Expect: r-- Code (.rodata)
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ svc_mem_info = mem_info.GetSvcMemoryInfo();
+ cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+ if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
+ svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
+ return cur_addr - 1;
+ }
+
+ // Expect: rw- CodeData (.data)
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ svc_mem_info = mem_info.GetSvcMemoryInfo();
+ cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+ return cur_addr - 1;
+}
+
+static Loader::AppLoader::Modules FindModules(Core::System& system) {
+ Loader::AppLoader::Modules modules;
+
+ auto& page_table = system.ApplicationProcess()->GetPageTable();
+ auto& memory = system.ApplicationMemory();
+ VAddr cur_addr = 0;
+
+ // Look for executable sections in Code or AliasCode regions.
+ while (true) {
+ Kernel::KMemoryInfo mem_info{};
+ Kernel::Svc::PageInfo page_info{};
+ R_ASSERT(
+ page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+
+ if (svc_mem_info.permission == Kernel::Svc::MemoryPermission::ReadExecute &&
+ (svc_mem_info.state == Kernel::Svc::MemoryState::Code ||
+ svc_mem_info.state == Kernel::Svc::MemoryState::AliasCode)) {
+ // Try to read the module name from its path.
+ constexpr s32 PathLengthMax = 0x200;
+ struct {
+ u32 zero;
+ s32 path_length;
+ std::array<char, PathLengthMax> path;
+ } module_path;
+
+ if (memory.ReadBlock(svc_mem_info.base_address + svc_mem_info.size, &module_path,
+ sizeof(module_path))) {
+ if (module_path.zero == 0 && module_path.path_length > 0) {
+ // Truncate module name.
+ module_path.path[PathLengthMax - 1] = '\0';
+
+ // Ignore leading directories.
+ char* path_pointer = module_path.path.data();
+
+ for (s32 i = 0; i < std::min(PathLengthMax, module_path.path_length) &&
+ module_path.path[i] != '\0';
+ i++) {
+ if (module_path.path[i] == '/' || module_path.path[i] == '\\') {
+ path_pointer = module_path.path.data() + i + 1;
+ }
+ }
+
+ // Insert output.
+ modules.emplace(svc_mem_info.base_address, path_pointer);
+ }
+ }
+ }
+
+ // Check if we're done.
+ const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
+ if (next_address <= cur_addr) {
+ break;
+ }
+
+ cur_addr = next_address;
+ }
+
+ return modules;
+}
+
+static VAddr FindMainModuleEntrypoint(Core::System& system) {
+ Loader::AppLoader::Modules modules;
+ system.GetAppLoader().ReadNSOModules(modules);
+
+ // Do we have a module named main?
+ const auto main = std::find_if(modules.begin(), modules.end(),
+ [](const auto& key) { return key.second == "main"; });
+
+ if (main != modules.end()) {
+ return main->first;
+ }
+
+ // Do we have any loaded executable sections?
+ modules = FindModules(system);
+ if (!modules.empty()) {
+ return modules.begin()->first;
+ }
+
+ // As a last resort, use the start of the code region.
+ return GetInteger(system.ApplicationProcess()->GetPageTable().GetCodeRegionStart());
+}
+
void GDBStub::HandleQuery(std::string_view command) {
if (command.starts_with("TStatus")) {
// no tracepoint support
@@ -573,21 +687,10 @@ void GDBStub::HandleQuery(std::string_view command) {
const auto target_xml{arch->GetTargetXML()};
SendReply(PaginateBuffer(target_xml, command.substr(30)));
} else if (command.starts_with("Offsets")) {
- Loader::AppLoader::Modules modules;
- system.GetAppLoader().ReadNSOModules(modules);
-
- const auto main = std::find_if(modules.begin(), modules.end(),
- [](const auto& key) { return key.second == "main"; });
- if (main != modules.end()) {
- SendReply(fmt::format("TextSeg={:x}", main->first));
- } else {
- SendReply(fmt::format(
- "TextSeg={:x}",
- GetInteger(system.ApplicationProcess()->GetPageTable().GetCodeRegionStart())));
- }
+ const auto main_offset = FindMainModuleEntrypoint(system);
+ SendReply(fmt::format("TextSeg={:x}", main_offset));
} else if (command.starts_with("Xfer:libraries:read::")) {
- Loader::AppLoader::Modules modules;
- system.GetAppLoader().ReadNSOModules(modules);
+ auto modules = FindModules(system);
std::string buffer;
buffer += R"(<?xml version="1.0"?>)";
@@ -727,32 +830,6 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory
}
}
-static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) {
- Kernel::Svc::MemoryInfo mem_info;
- VAddr cur_addr{base};
-
- // Expect: r-x Code (.text)
- mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
- cur_addr = mem_info.base_address + mem_info.size;
- if (mem_info.state != Kernel::Svc::MemoryState::Code ||
- mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
- return cur_addr - 1;
- }
-
- // Expect: r-- Code (.rodata)
- mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
- cur_addr = mem_info.base_address + mem_info.size;
- if (mem_info.state != Kernel::Svc::MemoryState::Code ||
- mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
- return cur_addr - 1;
- }
-
- // Expect: rw- CodeData (.data)
- mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
- cur_addr = mem_info.base_address + mem_info.size;
- return cur_addr - 1;
-}
-
void GDBStub::HandleRcmd(const std::vector<u8>& command) {
std::string_view command_str{reinterpret_cast<const char*>(&command[0]), command.size()};
std::string reply;
@@ -767,7 +844,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
if (command_str == "get fastmem") {
if (Settings::IsFastmemEnabled()) {
- const auto& impl = page_table.PageTableImpl();
+ const auto& impl = page_table.GetImpl();
const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena);
const auto region_bits = impl.current_address_space_width_in_bits;
const auto region_size = 1ULL << region_bits;
@@ -779,26 +856,27 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
reply = "Fastmem is not enabled.\n";
}
} else if (command_str == "get info") {
- Loader::AppLoader::Modules modules;
- system.GetAppLoader().ReadNSOModules(modules);
+ auto modules = FindModules(system);
reply = fmt::format("Process: {:#x} ({})\n"
"Program Id: {:#018x}\n",
process->GetProcessId(), process->GetName(), process->GetProgramId());
- reply += fmt::format("Layout:\n"
- " Alias: {:#012x} - {:#012x}\n"
- " Heap: {:#012x} - {:#012x}\n"
- " Aslr: {:#012x} - {:#012x}\n"
- " Stack: {:#012x} - {:#012x}\n"
- "Modules:\n",
- GetInteger(page_table.GetAliasRegionStart()),
- GetInteger(page_table.GetAliasRegionEnd()),
- GetInteger(page_table.GetHeapRegionStart()),
- GetInteger(page_table.GetHeapRegionEnd()),
- GetInteger(page_table.GetAliasCodeRegionStart()),
- GetInteger(page_table.GetAliasCodeRegionEnd()),
- GetInteger(page_table.GetStackRegionStart()),
- GetInteger(page_table.GetStackRegionEnd()));
+ reply += fmt::format(
+ "Layout:\n"
+ " Alias: {:#012x} - {:#012x}\n"
+ " Heap: {:#012x} - {:#012x}\n"
+ " Aslr: {:#012x} - {:#012x}\n"
+ " Stack: {:#012x} - {:#012x}\n"
+ "Modules:\n",
+ GetInteger(page_table.GetAliasRegionStart()),
+ GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1,
+ GetInteger(page_table.GetHeapRegionStart()),
+ GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1,
+ GetInteger(page_table.GetAliasCodeRegionStart()),
+ GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() -
+ 1,
+ GetInteger(page_table.GetStackRegionStart()),
+ GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1);
for (const auto& [vaddr, name] : modules) {
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
@@ -811,27 +889,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
while (true) {
using MemoryAttribute = Kernel::Svc::MemoryAttribute;
- auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
-
- if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
- mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) {
- const char* state = GetMemoryStateName(mem_info.state);
- const char* perm = GetMemoryPermissionString(mem_info);
-
- const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
- const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
- const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
- const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
+ Kernel::KMemoryInfo mem_info{};
+ Kernel::Svc::PageInfo page_info{};
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
+ cur_addr));
+ auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+
+ if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
+ svc_mem_info.base_address + svc_mem_info.size - 1 !=
+ std::numeric_limits<u64>::max()) {
+ const char* state = GetMemoryStateName(svc_mem_info.state);
+ const char* perm = GetMemoryPermissionString(svc_mem_info);
+
+ const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
+ const char i =
+ True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
+ const char d =
+ True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
+ const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
const char p =
- True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
+ True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
- reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
- mem_info.base_address,
- mem_info.base_address + mem_info.size - 1, perm, state, l, i,
- d, u, p, mem_info.ipc_count, mem_info.device_count);
+ reply += fmt::format(
+ " {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address,
+ svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p,
+ svc_mem_info.ipc_count, svc_mem_info.device_count);
}
- const uintptr_t next_address = mem_info.base_address + mem_info.size;
+ const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
if (next_address <= cur_addr) {
break;
}
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 8e2894449..b08a71446 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -96,18 +96,7 @@ void EmulatedController::ReloadFromSettings() {
}
controller.color_values = {};
- controller.colors_state.fullkey = {
- .body = GetNpadColor(player.body_color_left),
- .button = GetNpadColor(player.button_color_left),
- };
- controller.colors_state.left = {
- .body = GetNpadColor(player.body_color_left),
- .button = GetNpadColor(player.button_color_left),
- };
- controller.colors_state.right = {
- .body = GetNpadColor(player.body_color_right),
- .button = GetNpadColor(player.button_color_right),
- };
+ ReloadColorsFromSettings();
ring_params[0] = Common::ParamPackage(Settings::values.ringcon_analogs);
@@ -128,6 +117,30 @@ void EmulatedController::ReloadFromSettings() {
ReloadInput();
}
+void EmulatedController::ReloadColorsFromSettings() {
+ const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto& player = Settings::values.players.GetValue()[player_index];
+
+ // Avoid updating colors if overridden by physical controller
+ if (controller.color_values[LeftIndex].body != 0 &&
+ controller.color_values[RightIndex].body != 0) {
+ return;
+ }
+
+ controller.colors_state.fullkey = {
+ .body = GetNpadColor(player.body_color_left),
+ .button = GetNpadColor(player.button_color_left),
+ };
+ controller.colors_state.left = {
+ .body = GetNpadColor(player.body_color_left),
+ .button = GetNpadColor(player.button_color_left),
+ };
+ controller.colors_state.right = {
+ .body = GetNpadColor(player.body_color_right),
+ .button = GetNpadColor(player.button_color_right),
+ };
+}
+
void EmulatedController::LoadDevices() {
// TODO(german77): Use more buttons to detect the correct device
const auto left_joycon = button_params[Settings::NativeButton::DRight];
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index d4500583e..ea18c2343 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -253,6 +253,9 @@ public:
/// Overrides current mapped devices with the stored configuration and reloads all input devices
void ReloadFromSettings();
+ /// Updates current colors with the ones stored in the configuration
+ void ReloadColorsFromSettings();
+
/// Saves the current mapped configuration
void SaveCurrentConfig();
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 59364efa1..37fa39a73 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress*
};
// We succeeded.
- *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
+ *out = KPageTable::GetHeapVirtualAddress(kernel, paddr);
R_SUCCEED();
}
@@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres
ASSERT(Common::IsAligned(size, alignment));
// Close the secure region's pages.
- kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
+ kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address),
size / PageSize);
}
+// Insecure Memory.
+KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) {
+ return kernel.GetSystemResourceLimit();
+}
+
+u32 KSystemControl::GetInsecureMemoryPool() {
+ return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure);
+}
+
} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index ff1feec70..60c5e58b7 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -8,7 +8,8 @@
namespace Kernel {
class KernelCore;
-}
+class KResourceLimit;
+} // namespace Kernel
namespace Kernel::Board::Nintendo::Nx {
@@ -40,6 +41,10 @@ public:
u32 pool);
static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
u32 pool);
+
+ // Insecure Memory.
+ static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel);
+ static u32 GetInsecureMemoryPool();
};
} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
index e7da7a21d..274fee493 100644
--- a/src/core/hle/kernel/k_capabilities.cpp
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -4,14 +4,16 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
+#include "core/hle/kernel/k_trace.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_version.h"
namespace Kernel {
-Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps,
+ KProcessPageTable* page_table) {
// We're initializing an initial process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
@@ -41,7 +43,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl
R_RETURN(this->SetCapabilities(kern_caps, page_table));
}
-Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForUser(std::span<const u32> user_caps,
+ KProcessPageTable* page_table) {
// We're initializing a user process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
@@ -121,7 +124,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
R_SUCCEED();
}
-Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
+Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) {
const auto range_pack = MapRange{cap};
const auto size_pack = MapRangeSize{size_cap};
@@ -142,16 +145,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p
? KMemoryPermission::UserRead
: KMemoryPermission::UserReadWrite;
if (MapRangeSize{size_cap}.normal) {
- // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
+ R_RETURN(page_table->MapStatic(phys_addr, size, perm));
} else {
- // R_RETURN(page_table->MapIo(phys_addr, size, perm));
+ R_RETURN(page_table->MapIo(phys_addr, size, perm));
}
-
- UNIMPLEMENTED();
- R_SUCCEED();
}
-Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) {
// Get/validate address/size
const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
const size_t num_pages = 1;
@@ -160,10 +160,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
// Do the mapping.
- // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
-
- UNIMPLEMENTED();
- R_SUCCEED();
+ R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite));
}
template <typename F>
@@ -200,13 +197,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
R_SUCCEED();
}
-Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) {
// Map each region into the process's page table.
return ProcessMapRegionCapability(
- cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
- // R_RETURN(page_table->MapRegion(region_type, perm));
- UNIMPLEMENTED();
- R_SUCCEED();
+ cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ R_RETURN(page_table->MapRegion(region_type, perm));
});
}
@@ -280,7 +275,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
}
Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
- KPageTable* page_table) {
+ KProcessPageTable* page_table) {
// Validate this is a capability we can act on.
const auto type = GetCapabilityType(cap);
R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
@@ -318,7 +313,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
}
}
-Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
+Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) {
u32 set_flags = 0, set_svc = 0;
for (size_t i = 0; i < caps.size(); i++) {
@@ -335,6 +330,8 @@ Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* pag
// Map the range.
R_TRY(this->MapRange_(cap, size_cap, page_table));
+ } else if (GetCapabilityType(cap) == CapabilityType::MapRegion && !IsKTraceEnabled) {
+ continue;
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index ebd4eedb1..013d952ad 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -15,15 +15,15 @@
namespace Kernel {
-class KPageTable;
+class KProcessPageTable;
class KernelCore;
class KCapabilities {
public:
constexpr explicit KCapabilities() = default;
- Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
- Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
+ Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table);
+ Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table);
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
@@ -264,9 +264,9 @@ private:
Result SetCorePriorityCapability(const u32 cap);
Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
- Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
- Result MapIoPage_(const u32 cap, KPageTable* page_table);
- Result MapRegion_(const u32 cap, KPageTable* page_table);
+ Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table);
+ Result MapIoPage_(const u32 cap, KProcessPageTable* page_table);
+ Result MapRegion_(const u32 cap, KProcessPageTable* page_table);
Result SetInterruptPairCapability(const u32 cap);
Result SetProgramTypeCapability(const u32 cap);
Result SetKernelVersionCapability(const u32 cap);
@@ -277,8 +277,9 @@ private:
static Result ProcessMapRegionCapability(const u32 cap, F f);
static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
- Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
- Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
+ Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
+ KProcessPageTable* page_table);
+ Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table);
private:
Svc::SvcAccessFlagSet m_svc_access_flags{};
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index f48896715..f0703f795 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
R_SUCCEED();
}
-Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address, u32 option, bool is_aligned) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
@@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_
R_SUCCEED();
}
-Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index 18556e3cc..ff0ec8152 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,7 +5,7 @@
#include <string>
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -31,23 +31,23 @@ public:
Result Attach(Svc::DeviceName device_name);
Result Detach(Svc::DeviceName device_name);
- Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
}
- Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
}
- Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address);
static void Initialize();
private:
- Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option, bool is_aligned);
private:
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index c8122644f..d7adb3169 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -394,6 +394,14 @@ private:
return region.GetEndAddress();
}
+public:
+ static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) {
+ return Find(address, layout.GetVirtualMemoryRegionTree());
+ }
+ static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) {
+ return Find(address, layout.GetPhysicalMemoryRegionTree());
+ }
+
private:
u64 m_linear_phys_to_virt_diff{};
u64 m_linear_virt_to_phys_diff{};
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index cdc5572d8..0a973ec8c 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
}
void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
- auto optimize_pa =
- KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
@@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages) {
- auto optimize_pa =
- KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
// Get the range we're tracking.
@@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi
void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages) {
- auto optimize_pa =
- KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
// Get the range we're tracking.
@@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages, u8 fill_pattern) {
auto& device_memory = kernel.System().DeviceMemory();
- auto optimize_pa =
- KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
// We want to return whether any pages were newly allocated.
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
deleted file mode 100644
index 1d47bdf6b..000000000
--- a/src/core/hle/kernel/k_page_table.cpp
+++ /dev/null
@@ -1,3519 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/literals.h"
-#include "common/scope_exit.h"
-#include "common/settings.h"
-#include "core/core.h"
-#include "core/hle/kernel/k_address_space_info.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_group.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_system_control.h"
-#include "core/hle/kernel/k_system_resource.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-namespace {
-
-class KScopedLightLockPair {
- YUZU_NON_COPYABLE(KScopedLightLockPair);
- YUZU_NON_MOVEABLE(KScopedLightLockPair);
-
-private:
- KLightLock* m_lower;
- KLightLock* m_upper;
-
-public:
- KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
- // Ensure our locks are in a consistent order.
- if (std::addressof(lhs) <= std::addressof(rhs)) {
- m_lower = std::addressof(lhs);
- m_upper = std::addressof(rhs);
- } else {
- m_lower = std::addressof(rhs);
- m_upper = std::addressof(lhs);
- }
-
- // Acquire both locks.
- m_lower->Lock();
- if (m_lower != m_upper) {
- m_upper->Lock();
- }
- }
-
- ~KScopedLightLockPair() {
- // Unlock the upper lock.
- if (m_upper != nullptr && m_upper != m_lower) {
- m_upper->Unlock();
- }
-
- // Unlock the lower lock.
- if (m_lower != nullptr) {
- m_lower->Unlock();
- }
- }
-
-public:
- // Utility.
- void TryUnlockHalf(KLightLock& lock) {
- // Only allow unlocking if the lock is half the pair.
- if (m_lower != m_upper) {
- // We want to be sure the lock is one we own.
- if (m_lower == std::addressof(lock)) {
- lock.Unlock();
- m_lower = nullptr;
- } else if (m_upper == std::addressof(lock)) {
- lock.Unlock();
- m_upper = nullptr;
- }
- }
- }
-};
-
-using namespace Common::Literals;
-
-constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
- switch (as_type) {
- case Svc::CreateProcessFlag::AddressSpace32Bit:
- case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
- return 32;
- case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
- return 36;
- case Svc::CreateProcessFlag::AddressSpace64Bit:
- return 39;
- default:
- ASSERT(false);
- return {};
- }
-}
-
-} // namespace
-
-KPageTable::KPageTable(Core::System& system_)
- : m_general_lock{system_.Kernel()},
- m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
-
-KPageTable::~KPageTable() = default;
-
-Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
- bool enable_das_merge, bool from_back,
- KMemoryManager::Pool pool, KProcessAddress code_addr,
- size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit,
- Core::Memory::Memory& memory) {
-
- const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
- };
- const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
- };
-
- // Set the tracking memory
- m_memory = std::addressof(memory);
-
- // Set our width and heap/alias sizes
- m_address_space_width = GetAddressSpaceWidthFromType(as_type);
- const KProcessAddress start = 0;
- const KProcessAddress end{1ULL << m_address_space_width};
- size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
- size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
-
- ASSERT(code_addr < code_addr + code_size);
- ASSERT(code_addr + code_size - 1 <= end - 1);
-
- // Adjust heap/alias size if we don't have an alias region
- if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
- heap_region_size += alias_region_size;
- alias_region_size = 0;
- }
-
- // Set code regions and determine remaining
- constexpr size_t RegionAlignment{2_MiB};
- KProcessAddress process_code_start{};
- KProcessAddress process_code_end{};
- size_t stack_region_size{};
- size_t kernel_map_region_size{};
-
- if (m_address_space_width == 39) {
- alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
- heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
- stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
- kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
- m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
- m_alias_code_region_start = m_code_region_start;
- m_alias_code_region_end = m_code_region_end;
- process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
- process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
- } else {
- stack_region_size = 0;
- kernel_map_region_size = 0;
- m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
- m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- m_stack_region_start = m_code_region_start;
- m_alias_code_region_start = m_code_region_start;
- m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
- GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
- m_stack_region_end = m_code_region_end;
- m_kernel_map_region_start = m_code_region_start;
- m_kernel_map_region_end = m_code_region_end;
- process_code_start = m_code_region_start;
- process_code_end = m_code_region_end;
- }
-
- // Set other basic fields
- m_enable_aslr = enable_aslr;
- m_enable_device_address_space_merge = enable_das_merge;
- m_address_space_start = start;
- m_address_space_end = end;
- m_is_kernel = false;
- m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
- m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
- m_resource_limit = resource_limit;
-
- // Determine the region we can place our undetermineds in
- KProcessAddress alloc_start{};
- size_t alloc_size{};
- if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
- alloc_start = m_code_region_start;
- alloc_size = process_code_start - m_code_region_start;
- } else {
- alloc_start = process_code_end;
- alloc_size = end - process_code_end;
- }
- const size_t needed_size =
- (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
- R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
-
- const size_t remaining_size{alloc_size - needed_size};
-
- // Determine random placements for each region
- size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
- if (enable_aslr) {
- alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- }
-
- // Setup heap and alias regions
- m_alias_region_start = alloc_start + alias_rnd;
- m_alias_region_end = m_alias_region_start + alias_region_size;
- m_heap_region_start = alloc_start + heap_rnd;
- m_heap_region_end = m_heap_region_start + heap_region_size;
-
- if (alias_rnd <= heap_rnd) {
- m_heap_region_start += alias_region_size;
- m_heap_region_end += alias_region_size;
- } else {
- m_alias_region_start += heap_region_size;
- m_alias_region_end += heap_region_size;
- }
-
- // Setup stack region
- if (stack_region_size) {
- m_stack_region_start = alloc_start + stack_rnd;
- m_stack_region_end = m_stack_region_start + stack_region_size;
-
- if (alias_rnd < stack_rnd) {
- m_stack_region_start += alias_region_size;
- m_stack_region_end += alias_region_size;
- } else {
- m_alias_region_start += stack_region_size;
- m_alias_region_end += stack_region_size;
- }
-
- if (heap_rnd < stack_rnd) {
- m_stack_region_start += heap_region_size;
- m_stack_region_end += heap_region_size;
- } else {
- m_heap_region_start += stack_region_size;
- m_heap_region_end += stack_region_size;
- }
- }
-
- // Setup kernel map region
- if (kernel_map_region_size) {
- m_kernel_map_region_start = alloc_start + kmap_rnd;
- m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
-
- if (alias_rnd < kmap_rnd) {
- m_kernel_map_region_start += alias_region_size;
- m_kernel_map_region_end += alias_region_size;
- } else {
- m_alias_region_start += kernel_map_region_size;
- m_alias_region_end += kernel_map_region_size;
- }
-
- if (heap_rnd < kmap_rnd) {
- m_kernel_map_region_start += heap_region_size;
- m_kernel_map_region_end += heap_region_size;
- } else {
- m_heap_region_start += kernel_map_region_size;
- m_heap_region_end += kernel_map_region_size;
- }
-
- if (stack_region_size) {
- if (stack_rnd < kmap_rnd) {
- m_kernel_map_region_start += stack_region_size;
- m_kernel_map_region_end += stack_region_size;
- } else {
- m_stack_region_start += kernel_map_region_size;
- m_stack_region_end += kernel_map_region_size;
- }
- }
- }
-
- // Set heap and fill members.
- m_current_heap_end = m_heap_region_start;
- m_max_heap_size = 0;
- m_mapped_physical_memory_size = 0;
- m_mapped_unsafe_physical_memory = 0;
- m_mapped_insecure_memory = 0;
- m_mapped_ipc_server_memory = 0;
-
- m_heap_fill_value = 0;
- m_ipc_fill_value = 0;
- m_stack_fill_value = 0;
-
- // Set allocation option.
- m_allocate_option =
- KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
- : KMemoryManager::Direction::FromFront);
-
- // Ensure that we regions inside our address space
- auto IsInAddressSpace = [&](KProcessAddress addr) {
- return m_address_space_start <= addr && addr <= m_address_space_end;
- };
- ASSERT(IsInAddressSpace(m_alias_region_start));
- ASSERT(IsInAddressSpace(m_alias_region_end));
- ASSERT(IsInAddressSpace(m_heap_region_start));
- ASSERT(IsInAddressSpace(m_heap_region_end));
- ASSERT(IsInAddressSpace(m_stack_region_start));
- ASSERT(IsInAddressSpace(m_stack_region_end));
- ASSERT(IsInAddressSpace(m_kernel_map_region_start));
- ASSERT(IsInAddressSpace(m_kernel_map_region_end));
-
- // Ensure that we selected regions that don't overlap
- const KProcessAddress alias_start{m_alias_region_start};
- const KProcessAddress alias_last{m_alias_region_end - 1};
- const KProcessAddress heap_start{m_heap_region_start};
- const KProcessAddress heap_last{m_heap_region_end - 1};
- const KProcessAddress stack_start{m_stack_region_start};
- const KProcessAddress stack_last{m_stack_region_end - 1};
- const KProcessAddress kmap_start{m_kernel_map_region_start};
- const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
- ASSERT(alias_last < heap_start || heap_last < alias_start);
- ASSERT(alias_last < stack_start || stack_last < alias_start);
- ASSERT(alias_last < kmap_start || kmap_last < alias_start);
- ASSERT(heap_last < stack_start || stack_last < heap_start);
- ASSERT(heap_last < kmap_start || kmap_last < heap_start);
-
- m_current_heap_end = m_heap_region_start;
- m_max_heap_size = 0;
- m_mapped_physical_memory_size = 0;
- m_memory_pool = pool;
-
- m_page_table_impl = std::make_unique<Common::PageTable>();
- m_page_table_impl->Resize(m_address_space_width, PageBits);
-
- // Initialize our memory block manager.
- R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
- m_memory_block_slab_manager));
-}
-
-void KPageTable::Finalize() {
- auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
- if (Settings::IsFastmemEnabled()) {
- m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
- }
- };
-
- // Finalize memory blocks.
- m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
-
- // Release any insecure mapped memory.
- if (m_mapped_insecure_memory) {
- UNIMPLEMENTED();
- }
-
- // Release any ipc server memory.
- if (m_mapped_ipc_server_memory) {
- UNIMPLEMENTED();
- }
-
- // Close the backing page table, as the destructor is not called for guest objects.
- m_page_table_impl.reset();
-}
-
-Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- const u64 size{num_pages * PageSize};
-
- // Validate the mapping request.
- R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the destination memory is unmapped.
- R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
-
- // Allocate and open.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, num_pages,
- KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
-
- R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Validate the mapping request.
- R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
- ResultInvalidMemoryRegion);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the source memory is normal heap.
- KMemoryState src_state{};
- KMemoryPermission src_perm{};
- size_t num_src_allocator_blocks{};
- R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
- src_address, size, KMemoryState::All, KMemoryState::Normal,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Verify that the destination memory is unmapped.
- size_t num_dst_allocator_blocks{};
- R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Map the code memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being mapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
- AddRegionToPages(src_address, num_pages, pg);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reprotect the source as kernel-read/not mapped.
- const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
- KMemoryPermission::NotMapped);
- R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
-
- // Ensure that we unprotect the source pages on failure.
- auto unprot_guard = SCOPE_GUARD({
- ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
- .IsSuccess());
- });
-
- // Map the alias pages.
- const KPageProperties dst_properties = {new_perm, false, false,
- DisableMergeAttribute::DisableHead};
- R_TRY(
- this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
-
- // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
- // failure.
- unprot_guard.Cancel();
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
- src_state, new_perm, KMemoryAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy) {
- // Validate the mapping request.
- R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
- ResultInvalidMemoryRegion);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the source memory is locked normal heap.
- size_t num_src_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
- KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked));
-
- // Verify that the destination memory is aliasable code.
- size_t num_dst_allocator_blocks{};
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
- KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
-
- // Determine whether any pages being unmapped are code.
- bool any_code_pages = false;
- {
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
- while (true) {
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Check if the memory has code flag.
- if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
- any_code_pages = true;
- break;
- }
-
- // Check if we're done.
- if (dst_address + size - 1 <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
- }
-
- // Ensure that we maintain the instruction cache.
- bool reprotected_pages = false;
- SCOPE_EXIT({
- if (reprotected_pages && any_code_pages) {
- if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
- m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
- } else {
- m_system.InvalidateCpuInstructionCaches();
- }
- }
- });
-
- // Unmap.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Unmap the aliased copy of the pages.
- R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Try to set the permissions for the source pages back to what they should be.
- R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
- m_memory_block_manager.Update(
- std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
-
- // Note that we reprotected pages.
- reprotected_pages = true;
- }
-
- R_SUCCEED();
-}
-
-KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages) {
- KProcessAddress address = 0;
-
- if (num_pages <= region_num_pages) {
- if (this->IsAslrEnabled()) {
- UNIMPLEMENTED();
- }
- // Find the first free area.
- if (address == 0) {
- address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
- alignment, offset, guard_pages);
- }
- }
-
- return address;
-}
-
-Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
- ASSERT(this->IsLockedByCurrentThread());
-
- const size_t size = num_pages * PageSize;
-
- // We're making a new group, not adding to an existing one.
- R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
- ResultInvalidCurrentMemory);
-
- // Prepare tracking variables.
- KPhysicalAddress cur_addr = next_entry.phys_addr;
- size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
- size_t tot_size = cur_size;
-
- // Iterate, adding to group as we go.
- const auto& memory_layout = m_system.Kernel().MemoryLayout();
- while (tot_size < size) {
- R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
- ResultInvalidCurrentMemory);
-
- if (next_entry.phys_addr != (cur_addr + cur_size)) {
- const size_t cur_pages = cur_size / PageSize;
-
- R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
- R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
- cur_addr = next_entry.phys_addr;
- cur_size = next_entry.block_size;
- } else {
- cur_size += next_entry.block_size;
- }
-
- tot_size += next_entry.block_size;
- }
-
- // Ensure we add the right amount for the last block.
- if (tot_size > size) {
- cur_size -= (tot_size - size);
- }
-
- // Add the last block.
- const size_t cur_pages = cur_size / PageSize;
- R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
- R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
- R_SUCCEED();
-}
-
-bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
- ASSERT(this->IsLockedByCurrentThread());
-
- const size_t size = num_pages * PageSize;
- const auto& memory_layout = m_system.Kernel().MemoryLayout();
-
- // Empty groups are necessarily invalid.
- if (pg.empty()) {
- return false;
- }
-
- // We're going to validate that the group we'd expect is the group we see.
- auto cur_it = pg.begin();
- KPhysicalAddress cur_block_address = cur_it->GetAddress();
- size_t cur_block_pages = cur_it->GetNumPages();
-
- auto UpdateCurrentIterator = [&]() {
- if (cur_block_pages == 0) {
- if ((++cur_it) == pg.end()) {
- return false;
- }
-
- cur_block_address = cur_it->GetAddress();
- cur_block_pages = cur_it->GetNumPages();
- }
- return true;
- };
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
- return false;
- }
-
- // Prepare tracking variables.
- KPhysicalAddress cur_addr = next_entry.phys_addr;
- size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
- size_t tot_size = cur_size;
-
- // Iterate, comparing expected to actual.
- while (tot_size < size) {
- if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
- return false;
- }
-
- if (next_entry.phys_addr != (cur_addr + cur_size)) {
- const size_t cur_pages = cur_size / PageSize;
-
- if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
- return false;
- }
-
- if (!UpdateCurrentIterator()) {
- return false;
- }
-
- if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
- return false;
- }
-
- cur_block_address += cur_size;
- cur_block_pages -= cur_pages;
- cur_addr = next_entry.phys_addr;
- cur_size = next_entry.block_size;
- } else {
- cur_size += next_entry.block_size;
- }
-
- tot_size += next_entry.block_size;
- }
-
- // Ensure we compare the right amount for the last block.
- if (tot_size > size) {
- cur_size -= (tot_size - size);
- }
-
- if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
- return false;
- }
-
- if (!UpdateCurrentIterator()) {
- return false;
- }
-
- return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
-}
-
-Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
- KPageTable& src_page_table, KProcessAddress src_addr) {
- // Acquire the table locks.
- KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
-
- const size_t num_pages{size / PageSize};
-
- // Check that the memory is mapped in the destination process.
- size_t num_allocator_blocks;
- R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
- KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Check that the memory is mapped in the source process.
- R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
- KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Apply the memory block update.
- m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- m_system.InvalidateCpuInstructionCaches();
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- KProcessAddress address, size_t size,
- KMemoryPermission test_perm, KMemoryState dst_state) {
- // Validate pre-conditions.
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
- test_perm == KMemoryPermission::UserRead);
-
- // Check that the address is in range.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Get the source permission.
- const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
- ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
- : KMemoryPermission::UserRead;
-
- // Get aligned extents.
- const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
- const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
- const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
-
- const auto aligned_src_last = (aligned_src_end)-1;
- const auto mapping_src_last = (mapping_src_end)-1;
-
- // Get the test state and attribute mask.
- KMemoryState test_state;
- KMemoryAttribute test_attr_mask;
- switch (dst_state) {
- case KMemoryState::Ipc:
- test_state = KMemoryState::FlagCanUseIpc;
- test_attr_mask =
- KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonSecureIpc:
- test_state = KMemoryState::FlagCanUseNonSecureIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonDeviceIpc:
- test_state = KMemoryState::FlagCanUseNonDeviceIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- default:
- R_THROW(ResultInvalidCombination);
- }
-
- // Ensure that on failure, we roll back appropriately.
- size_t mapped_size = 0;
- ON_RESULT_FAILURE {
- if (mapped_size > 0) {
- this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
- src_perm);
- }
- };
-
- size_t blocks_needed = 0;
-
- // Iterate, mapping as needed.
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
- while (true) {
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Validate the current block.
- R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
- test_attr_mask, KMemoryAttribute::None));
-
- if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
- info.GetAddress() < GetInteger(mapping_src_end)) {
- const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
- ? info.GetAddress()
- : (mapping_src_start);
- const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
- : (mapping_src_end);
- const size_t cur_size = cur_end - cur_start;
-
- if (info.GetAddress() < GetInteger(mapping_src_start)) {
- ++blocks_needed;
- }
- if (mapping_src_last < info.GetLastAddress()) {
- ++blocks_needed;
- }
-
- // Set the permissions on the block, if we need to.
- if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
- R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
- OperationType::ChangePermissions));
- }
-
- // Note that we mapped this part.
- mapped_size += cur_size;
- }
-
- // If the block is at the end, we're done.
- if (aligned_src_last <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- ASSERT(it != m_memory_block_manager.end());
- }
-
- if (out_blocks_needed != nullptr) {
- ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- *out_blocks_needed = blocks_needed;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
- KProcessAddress src_addr, KMemoryPermission test_perm,
- KMemoryState dst_state, KPageTable& src_page_table,
- bool send) {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(src_page_table.IsLockedByCurrentThread());
-
- // Check that we can theoretically map.
- const KProcessAddress region_start = m_alias_region_start;
- const size_t region_size = m_alias_region_end - m_alias_region_start;
- R_UNLESS(size < region_size, ResultOutOfAddressSpace);
-
- // Get aligned source extents.
- const KProcessAddress src_start = src_addr;
- const KProcessAddress src_end = src_addr + size;
- const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
- const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
- const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
- const KProcessAddress mapping_src_end =
- Common::AlignDown(GetInteger(src_start) + size, PageSize);
- const size_t aligned_src_size = aligned_src_end - aligned_src_start;
- const size_t mapping_src_size =
- (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
-
- // Select a random address to map at.
- KProcessAddress dst_addr =
- this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
- PageSize, 0, this->GetNumGuardPages());
-
- R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
-
- // Check that we can perform the operation we're about to perform.
- ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reserve space for any partial pages we allocate.
- const size_t unmapped_size = aligned_src_size - mapping_src_size;
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Ensure that we manage page references correctly.
- KPhysicalAddress start_partial_page = 0;
- KPhysicalAddress end_partial_page = 0;
- KProcessAddress cur_mapped_addr = dst_addr;
-
- // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
- // free on scope exit.
- SCOPE_EXIT({
- if (start_partial_page != 0) {
- m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
- }
- if (end_partial_page != 0) {
- m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
- }
- });
-
- ON_RESULT_FAILURE {
- if (cur_mapped_addr != dst_addr) {
- ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
- KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
- }
- };
-
- // Allocate the start page as needed.
- if (aligned_src_start < mapping_src_start) {
- start_partial_page =
- m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
- R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
- }
-
- // Allocate the end page as needed.
- if (mapping_src_end < aligned_src_end &&
- (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
- end_partial_page =
- m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
- R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
- }
-
- // Get the implementation.
- auto& src_impl = src_page_table.PageTableImpl();
-
- // Get the fill value for partial pages.
- const auto fill_val = m_ipc_fill_value;
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- bool traverse_valid =
- src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
- ASSERT(traverse_valid);
-
- // Prepare tracking variables.
- KPhysicalAddress cur_block_addr = next_entry.phys_addr;
- size_t cur_block_size =
- next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
- size_t tot_block_size = cur_block_size;
-
- // Map the start page, if we have one.
- if (start_partial_page != 0) {
- // Ensure the page holds correct data.
- const KVirtualAddress start_partial_virt =
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
- if (send) {
- const size_t partial_offset = src_start - aligned_src_start;
- size_t copy_size, clear_size;
- if (src_end < mapping_src_start) {
- copy_size = size;
- clear_size = mapping_src_start - src_end;
- } else {
- copy_size = mapping_src_start - src_start;
- clear_size = 0;
- }
-
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
- partial_offset);
- std::memcpy(
- m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
- m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr)) +
- partial_offset),
- copy_size);
- if (clear_size > 0) {
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
- partial_offset + copy_size),
- fill_val, clear_size);
- }
- } else {
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
- PageSize);
- }
-
- // Map the page.
- R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
-
- // Update tracking extents.
- cur_mapped_addr += PageSize;
- cur_block_addr += PageSize;
- cur_block_size -= PageSize;
-
- // If the block's size was one page, we may need to continue traversal.
- if (cur_block_size == 0 && aligned_src_size > PageSize) {
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- cur_block_addr = next_entry.phys_addr;
- cur_block_size = next_entry.block_size;
- tot_block_size += next_entry.block_size;
- }
- }
-
- // Map the remaining pages.
- while (aligned_src_start + tot_block_size < mapping_src_end) {
- // Continue the traversal.
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- // Process the block.
- if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
- // Map the block we've been processing so far.
- R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
- cur_block_addr));
-
- // Update tracking extents.
- cur_mapped_addr += cur_block_size;
- cur_block_addr = next_entry.phys_addr;
- cur_block_size = next_entry.block_size;
- } else {
- cur_block_size += next_entry.block_size;
- }
- tot_block_size += next_entry.block_size;
- }
-
- // Handle the last direct-mapped page.
- if (const KProcessAddress mapped_block_end =
- aligned_src_start + tot_block_size - cur_block_size;
- mapped_block_end < mapping_src_end) {
- const size_t last_block_size = mapping_src_end - mapped_block_end;
-
- // Map the last block.
- R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
- cur_block_addr));
-
- // Update tracking extents.
- cur_mapped_addr += last_block_size;
- cur_block_addr += last_block_size;
- if (mapped_block_end + cur_block_size < aligned_src_end &&
- cur_block_size == last_block_size) {
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- cur_block_addr = next_entry.phys_addr;
- }
- }
-
- // Map the end page, if we have one.
- if (end_partial_page != 0) {
- // Ensure the page holds correct data.
- const KVirtualAddress end_partial_virt =
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
- if (send) {
- const size_t copy_size = src_end - mapping_src_end;
- std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
- m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr))),
- copy_size);
- std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
- fill_val, PageSize - copy_size);
- } else {
- std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
- PageSize);
- }
-
- // Map the page.
- R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
- }
-
- // Update memory blocks to reflect our changes
- m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
- dst_state, test_perm, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Set the output address.
- *out_addr = dst_addr + (src_start - aligned_src_start);
-
- // We succeeded.
- memory_reservation.Commit();
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
- KPageTable& src_page_table, KMemoryPermission test_perm,
- KMemoryState dst_state, bool send) {
- // For convenience, alias this.
- KPageTable& dst_page_table = *this;
-
- // Acquire the table locks.
- KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(std::addressof(src_page_table));
-
- // Perform client setup.
- size_t num_allocator_blocks;
- R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
- std::addressof(num_allocator_blocks), src_addr, size,
- test_perm, dst_state));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- src_page_table.m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Get the mapped extents.
- const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
- const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
- const size_t src_map_size = src_map_end - src_map_start;
-
- // Ensure that we clean up appropriately if we fail after this.
- const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
- ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
- : KMemoryPermission::UserRead;
- ON_RESULT_FAILURE {
- if (src_map_end > src_map_start) {
- src_page_table.CleanupForIpcClientOnServerSetupFailure(
- updater.GetPageList(), src_map_start, src_map_size, src_perm);
- }
- };
-
- // Perform server setup.
- R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
- src_page_table, send));
-
- // If anything was mapped, ipc-lock the pages.
- if (src_map_start < src_map_end) {
- // Get the source permission.
- src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
- (src_map_end - src_map_start) / PageSize,
- &KMemoryBlock::LockForIpc, src_perm);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
- KMemoryState dst_state) {
- // Validate the address.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, dst_state, KMemoryPermission::UserRead,
- KMemoryPermission::UserRead, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Get aligned extents.
- const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
- const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
- const size_t aligned_size = aligned_end - aligned_start;
- const size_t aligned_num_pages = aligned_size / PageSize;
-
- // Unmap the pages.
- R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Update memory blocks.
- m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
- KMemoryState::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- // Release from the resource limit as relevant.
- const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
- const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
-
- R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
- KMemoryState dst_state) {
- // Validate the address.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Get aligned source extents.
- const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
- const KProcessAddress mapping_last = mapping_end - 1;
- const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
-
- // If nothing was mapped, we're actually done immediately.
- R_SUCCEED_IF(mapping_size == 0);
-
- // Get the test state and attribute mask.
- KMemoryState test_state;
- KMemoryAttribute test_attr_mask;
- switch (dst_state) {
- case KMemoryState::Ipc:
- test_state = KMemoryState::FlagCanUseIpc;
- test_attr_mask =
- KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonSecureIpc:
- test_state = KMemoryState::FlagCanUseNonSecureIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonDeviceIpc:
- test_state = KMemoryState::FlagCanUseNonDeviceIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- default:
- R_THROW(ResultInvalidCombination);
- }
-
- // Lock the table.
- // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
- // convention elsewhere in KPageTable.
- KScopedLightLock lk(m_general_lock);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Ensure that on failure, we roll back appropriately.
- size_t mapped_size = 0;
- ON_RESULT_FAILURE {
- if (mapped_size > 0) {
- // Determine where the mapping ends.
- const auto mapped_end = (mapping_start) + mapped_size;
- const auto mapped_last = mapped_end - 1;
-
- // Get current and next iterators.
- KMemoryBlockManager::const_iterator start_it =
- m_memory_block_manager.FindIterator(mapping_start);
- KMemoryBlockManager::const_iterator next_it = start_it;
- ++next_it;
-
- // Get the current block info.
- KMemoryInfo cur_info = start_it->GetMemoryInfo();
-
- // Create tracking variables.
- KProcessAddress cur_address = cur_info.GetAddress();
- size_t cur_size = cur_info.GetSize();
- bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
- bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
- bool first =
- cur_info.GetIpcDisableMergeCount() == 1 &&
- (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
- KMemoryBlockDisableMergeAttribute::None;
-
- while (((cur_address) + cur_size - 1) < mapped_last) {
- // Check that we have a next block.
- ASSERT(next_it != m_memory_block_manager.end());
-
- // Get the next info.
- const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
- // Check if we can consolidate the next block's permission set with the current one.
-
- const bool next_perm_eq =
- next_info.GetPermission() == next_info.GetOriginalPermission();
- const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
- if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
- cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
- // We can consolidate the reprotection for the current and next block into a
- // single call.
- cur_size += next_info.GetSize();
- } else {
- // We have to operate on the current block.
- if ((cur_needs_set_perm || first) && !cur_perm_eq) {
- ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
-
- // Advance.
- cur_address = next_info.GetAddress();
- cur_size = next_info.GetSize();
- first = false;
- }
-
- // Advance.
- cur_info = next_info;
- cur_perm_eq = next_perm_eq;
- cur_needs_set_perm = next_needs_set_perm;
- ++next_it;
- }
-
- // Process the last block.
- if ((first || cur_needs_set_perm) && !cur_perm_eq) {
- ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
- }
- };
-
- // Iterate, reprotecting as needed.
- {
- // Get current and next iterators.
- KMemoryBlockManager::const_iterator start_it =
- m_memory_block_manager.FindIterator(mapping_start);
- KMemoryBlockManager::const_iterator next_it = start_it;
- ++next_it;
-
- // Validate the current block.
- KMemoryInfo cur_info = start_it->GetMemoryInfo();
- ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
- KMemoryPermission::None,
- test_attr_mask | KMemoryAttribute::IpcLocked,
- KMemoryAttribute::IpcLocked)
- .IsSuccess());
-
- // Create tracking variables.
- KProcessAddress cur_address = cur_info.GetAddress();
- size_t cur_size = cur_info.GetSize();
- bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
- bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
- bool first =
- cur_info.GetIpcDisableMergeCount() == 1 &&
- (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
- KMemoryBlockDisableMergeAttribute::None;
-
- while ((cur_address + cur_size - 1) < mapping_last) {
- // Check that we have a next block.
- ASSERT(next_it != m_memory_block_manager.end());
-
- // Get the next info.
- const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
- // Validate the next block.
- ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
- KMemoryPermission::None, KMemoryPermission::None,
- test_attr_mask | KMemoryAttribute::IpcLocked,
- KMemoryAttribute::IpcLocked)
- .IsSuccess());
-
- // Check if we can consolidate the next block's permission set with the current one.
- const bool next_perm_eq =
- next_info.GetPermission() == next_info.GetOriginalPermission();
- const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
- if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
- cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
- // We can consolidate the reprotection for the current and next block into a single
- // call.
- cur_size += next_info.GetSize();
- } else {
- // We have to operate on the current block.
- if ((cur_needs_set_perm || first) && !cur_perm_eq) {
- R_TRY(Operate(cur_address, cur_size / PageSize,
- cur_needs_set_perm ? cur_info.GetOriginalPermission()
- : cur_info.GetPermission(),
- OperationType::ChangePermissions));
- }
-
- // Mark that we mapped the block.
- mapped_size += cur_size;
-
- // Advance.
- cur_address = next_info.GetAddress();
- cur_size = next_info.GetSize();
- first = false;
- }
-
- // Advance.
- cur_info = next_info;
- cur_perm_eq = next_perm_eq;
- cur_needs_set_perm = next_needs_set_perm;
- ++next_it;
- }
-
- // Process the last block.
- const auto lock_count =
- cur_info.GetIpcLockCount() +
- (next_it != m_memory_block_manager.end()
- ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
- : 0);
- if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
- R_TRY(Operate(cur_address, cur_size / PageSize,
- cur_needs_set_perm ? cur_info.GetOriginalPermission()
- : cur_info.GetPermission(),
- OperationType::ChangePermissions));
- }
- }
-
- // Create an update allocator.
- // NOTE: Guaranteed zero blocks needed here.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, 0);
- R_TRY(allocator_result);
-
- // Unlock the pages.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
- mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
- KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
- KProcessAddress address, size_t size,
- KMemoryPermission prot_perm) {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(GetInteger(address), PageSize));
- ASSERT(Common::IsAligned(size, PageSize));
-
- // Get the mapped extents.
- const KProcessAddress src_map_start = address;
- const KProcessAddress src_map_end = address + size;
- const KProcessAddress src_map_last = src_map_end - 1;
-
- // This function is only invoked when there's something to do.
- ASSERT(src_map_end > src_map_start);
-
- // Iterate over blocks, fixing permissions.
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
- while (true) {
- const KMemoryInfo info = it->GetMemoryInfo();
-
- const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
- ? info.GetAddress()
- : GetInteger(src_map_start);
- const auto cur_end =
- src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
-
- // If we can, fix the protections on the block.
- if ((info.GetIpcLockCount() == 0 &&
- (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
- (info.GetIpcLockCount() != 0 &&
- (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
- // Check if we actually need to fix the protections on the block.
- if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
- (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
- ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
- }
-
- // If we're past the end of the region, we're done.
- if (src_map_last <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- ASSERT(it != m_memory_block_manager.end());
- }
-}
-
-Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
- // Calculate the last address for convenience.
- const KProcessAddress last_address = address + size - 1;
-
- // Define iteration variables.
- KProcessAddress cur_address;
- size_t mapped_size;
-
- // The entire mapping process can be retried.
- while (true) {
- // Check if the memory is already mapped.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Iterate over the memory.
- cur_address = address;
- mapped_size = 0;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- if (info.GetState() != KMemoryState::Free) {
- mapped_size += (last_address + 1 - cur_address);
- }
- break;
- }
-
- // Track the memory if it's mapped.
- if (info.GetState() != KMemoryState::Free) {
- mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If the size mapped is the size requested, we've nothing to do.
- R_SUCCEED_IF(size == mapped_size);
- }
-
- // Allocate and map the memory.
- {
- // Reserve the memory from the process resource limit.
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Allocate pages for the new memory.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
- &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
-
- // If we fail in the next bit (or retry), we need to cleanup the pages.
- // auto pg_guard = SCOPE_GUARD {
- // pg.OpenFirst();
- // pg.Close();
- //};
-
- // Map the memory.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- size_t num_allocator_blocks = 0;
-
- // Verify that nobody has mapped memory since we first checked.
- {
- // Iterate over the memory.
- size_t checked_mapped_size = 0;
- cur_address = address;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- const bool is_free = info.GetState() == KMemoryState::Free;
- if (is_free) {
- if (info.GetAddress() < GetInteger(address)) {
- ++num_allocator_blocks;
- }
- if (last_address < info.GetLastAddress()) {
- ++num_allocator_blocks;
- }
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- if (!is_free) {
- checked_mapped_size += (last_address + 1 - cur_address);
- }
- break;
- }
-
- // Track the memory if it's mapped.
- if (!is_free) {
- checked_mapped_size +=
- KProcessAddress(info.GetEndAddress()) - cur_address;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If the size now isn't what it was before, somebody mapped or unmapped
- // concurrently. If this happened, retry.
- if (mapped_size != checked_mapped_size) {
- continue;
- }
- }
-
- // Create an update allocator.
- ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Prepare to iterate over the memory.
- auto pg_it = pg.begin();
- KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- // Reset the current tracking address, and make sure we clean up on failure.
- // pg_guard.Cancel();
- cur_address = address;
- ON_RESULT_FAILURE {
- if (cur_address > address) {
- const KProcessAddress last_unmap_address = cur_address - 1;
-
- // Iterate, unmapping the pages.
- cur_address = address;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory state is free, we mapped it and need to unmap it.
- if (info.GetState() == KMemoryState::Free) {
- // Determine the range to unmap.
- const size_t cur_pages =
- std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_unmap_address + 1 - cur_address) /
- PageSize;
-
- // Unmap.
- ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
- OperationType::Unmap)
- .IsSuccess());
- }
-
- // Check if we're done.
- if (last_unmap_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
- }
-
- // Release any remaining unmapped memory.
- m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
- m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
- for (++pg_it; pg_it != pg.end(); ++pg_it) {
- m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
- pg_it->GetNumPages());
- m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
- pg_it->GetNumPages());
- }
- };
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If it's unmapped, we need to map it.
- if (info.GetState() == KMemoryState::Free) {
- // Determine the range to map.
- size_t map_pages =
- std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
-
- // While we have pages to map, map them.
- {
- // Create a page group for the current mapping range.
- KPageGroup cur_pg(m_kernel, m_block_info_manager);
- {
- ON_RESULT_FAILURE_2 {
- cur_pg.OpenFirst();
- cur_pg.Close();
- };
-
- size_t remain_pages = map_pages;
- while (remain_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Add whatever we can to the current block.
- const size_t cur_pages = std::min(pg_pages, remain_pages);
- R_TRY(cur_pg.AddBlock(pg_phys_addr +
- ((pg_pages - cur_pages) * PageSize),
- cur_pages));
-
- // Advance.
- remain_pages -= cur_pages;
- pg_pages -= cur_pages;
- }
- }
-
- // Map the pages.
- R_TRY(this->Operate(cur_address, map_pages, cur_pg,
- OperationType::MapFirstGroup));
- }
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // We succeeded, so commit the memory reservation.
- memory_reservation.Commit();
-
- // Increase our tracked mapped size.
- m_mapped_physical_memory_size += (size - mapped_size);
-
- // Update the relevant memory blocks.
- m_memory_block_manager.UpdateIfMatch(
- std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- address == this->GetAliasRegionStart()
- ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
- }
- }
- }
-}
-
-Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Calculate the last address for convenience.
- const KProcessAddress last_address = address + size - 1;
-
- // Define iteration variables.
- KProcessAddress map_start_address = 0;
- KProcessAddress map_last_address = 0;
-
- KProcessAddress cur_address;
- size_t mapped_size;
- size_t num_allocator_blocks = 0;
-
- // Check if the memory is mapped.
- {
- // Iterate over the memory.
- cur_address = address;
- mapped_size = 0;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Verify the memory's state.
- const bool is_normal = info.GetState() == KMemoryState::Normal &&
- info.GetAttribute() == KMemoryAttribute::None;
- const bool is_free = info.GetState() == KMemoryState::Free;
- R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
-
- if (is_normal) {
- R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
-
- if (map_start_address == 0) {
- map_start_address = cur_address;
- }
- map_last_address =
- (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
-
- if (info.GetAddress() < GetInteger(address)) {
- ++num_allocator_blocks;
- }
- if (last_address < info.GetLastAddress()) {
- ++num_allocator_blocks;
- }
-
- mapped_size += (map_last_address + 1 - cur_address);
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If there's nothing mapped, we've nothing to do.
- R_SUCCEED_IF(mapped_size == 0);
- }
-
- // Create an update allocator.
- ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Separate the mapping.
- R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
- KMemoryPermission::None, OperationType::Separate));
-
- // Reset the current tracking address, and make sure we clean up on failure.
- cur_address = address;
-
- // Iterate over the memory, unmapping as we go.
- auto it = m_memory_block_manager.FindIterator(cur_address);
-
- const auto clear_merge_attr =
- (it->GetState() == KMemoryState::Normal &&
- it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
- ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None;
-
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory state is normal, we need to unmap it.
- if (info.GetState() == KMemoryState::Normal) {
- // Determine the range to unmap.
- const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
-
- // Unmap.
- ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // Release the memory resource.
- m_mapped_physical_memory_size -= mapped_size;
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size);
-
- // Update memory blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- clear_merge_attr);
-
- // We succeeded.
- R_SUCCEED();
-}
-
-Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that the source address's state is valid.
- KMemoryState src_state;
- size_t num_src_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
- std::addressof(num_src_allocator_blocks), src_address, size,
- KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Validate that the dst address's state is valid.
- size_t num_dst_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result;
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result;
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Map the memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being unmapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Create the page group representing the source.
- R_TRY(this->MakePageGroup(pg, src_address, num_pages));
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reprotect the source as kernel-read/not mapped.
- const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
- KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
- const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
- const KPageProperties src_properties = {new_src_perm, false, false,
- DisableMergeAttribute::DisableHeadBodyTail};
- R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
- OperationType::ChangePermissions));
-
- // Ensure that we unprotect the source pages on failure.
- ON_RESULT_FAILURE {
- const KPageProperties unprotect_properties = {
- KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::EnableHeadBodyTail};
- ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
- OperationType::ChangePermissions) == ResultSuccess);
- };
-
- // Map the alias pages.
- const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
- false));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
- src_state, new_src_perm, new_src_attr,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that the source address's state is valid.
- KMemoryState src_state;
- size_t num_src_allocator_blocks;
- R_TRY(this->CheckMemoryState(
- std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
- src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
- KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
- KMemoryAttribute::All, KMemoryAttribute::Locked));
-
- // Validate that the dst address's state is valid.
- KMemoryPermission dst_perm;
- size_t num_dst_allocator_blocks;
- R_TRY(this->CheckMemoryState(
- nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
- dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result;
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result;
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Unmap the memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being unmapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Create the page group representing the destination.
- R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
-
- // Ensure the page group is the valid for the source.
- R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Unmap the aliased copy of the pages.
- const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(
- this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
-
- // Ensure that we re-map the aliased pages on failure.
- ON_RESULT_FAILURE {
- this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
- };
-
- // Try to set the permissions for the source pages back to what they should be.
- const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::EnableAndMergeHeadBodyTail};
- R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
- OperationType::ChangePermissions));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(
- std::addressof(src_allocator), src_address, num_pages, src_state,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
- size_t num_pages, KMemoryPermission perm) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Create a page group to hold the pages we allocate.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Allocate the pages.
- R_TRY(
- m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
-
- // Ensure that the page group is closed when we're done working with it.
- SCOPE_EXIT({ pg.Close(); });
-
- // Clear all pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
- it.GetSize());
- }
-
- // Map the pages.
- R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
-}
-
-Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
- const KPageGroup& pg, const KPageProperties properties,
- bool reuse_ll) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Note the current address, so that we can iterate.
- const KProcessAddress start_address = address;
- KProcessAddress cur_address = address;
-
- // Ensure that we clean up on failure.
- ON_RESULT_FAILURE {
- ASSERT(!reuse_ll);
- if (cur_address != start_address) {
- const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
- unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
- }
- };
-
- // Iterate, mapping all pages in the group.
- for (const auto& block : pg) {
- // Map and advance.
- const KPageProperties cur_properties =
- (cur_address == start_address)
- ? properties
- : KPageProperties{properties.perm, properties.io, properties.uncached,
- DisableMergeAttribute::None};
- this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
- block.GetAddress());
- cur_address += block.GetSize();
- }
-
- // We succeeded!
- R_SUCCEED();
-}
-
-void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
- const KPageGroup& pg) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Note the current address, so that we can iterate.
- const KProcessAddress start_address = address;
- const KProcessAddress last_address = start_address + size - 1;
- const KProcessAddress end_address = last_address + 1;
-
- // Iterate over the memory.
- auto pg_it = pg.begin();
- ASSERT(pg_it != pg.end());
-
- KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- auto it = m_memory_block_manager.FindIterator(start_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Determine the range to map.
- KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
- const KProcessAddress map_end_address =
- std::min<KProcessAddress>(info.GetEndAddress(), end_address);
- ASSERT(map_end_address != map_address);
-
- // Determine if we should disable head merge.
- const bool disable_head_merge =
- info.GetAddress() >= GetInteger(start_address) &&
- True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
- const KPageProperties map_properties = {
- info.GetPermission(), false, false,
- disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
-
- // While we have pages to map, map them.
- size_t map_pages = (map_end_address - map_address) / PageSize;
- while (map_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Map whatever we can.
- const size_t cur_pages = std::min(pg_pages, map_pages);
- ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
- pg_phys_addr) == ResultSuccess);
-
- // Advance.
- map_address += cur_pages * PageSize;
- map_pages -= cur_pages;
-
- pg_phys_addr += cur_pages * PageSize;
- pg_pages -= cur_pages;
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
-
- // Check that we re-mapped precisely the page group.
- ASSERT((++pg_it) == pg.end());
-}
-
-Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, bool is_pa_valid,
- KProcessAddress region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm) {
- ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
-
- // Ensure this is a valid map request.
- R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Find a random address to map at.
- KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
- 0, this->GetNumGuardPages());
- R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(Common::IsAligned(GetInteger(addr), alignment));
- ASSERT(this->CanContain(addr, num_pages * PageSize, state));
- ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- if (is_pa_valid) {
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
- } else {
- R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
- }
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- *out_addr = addr;
- R_SUCCEED();
-}
-
-Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- // Check that the map is in range.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Map the pages.
- R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
- // Check that the unmap is in range.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform the unmap.
- const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
- KProcessAddress region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid map request.
- const size_t num_pages = pg.GetNumPages();
- R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Find a random address to map at.
- KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
- 0, this->GetNumGuardPages());
- R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(this->CanContain(addr, num_pages * PageSize, state));
- ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- *out_addr = addr;
- R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
- KMemoryPermission perm) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid map request.
- const size_t num_pages = pg.GetNumPages();
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to map.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
- KMemoryState state) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid unmap request.
- const size_t num_pages = pg.GetNumPages();
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to unmap.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Check that the page group is valid.
- R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform unmapping operation.
- const KPageProperties properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) {
- // Ensure that the page group isn't null.
- ASSERT(out != nullptr);
-
- // Make sure that the region we're mapping is valid for the table.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to create the group.
- R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Create a new page group for the region.
- R_TRY(this->MakePageGroup(*out, address, num_pages));
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm) {
- const size_t num_pages = size / PageSize;
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory permission.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
- std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::FlagCode, KMemoryState::FlagCode,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Determine new perm/state.
- const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
- KMemoryState new_state = old_state;
- const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
- const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
- const bool was_x =
- (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
- ASSERT(!(is_w && is_x));
-
- if (is_w) {
- switch (old_state) {
- case KMemoryState::Code:
- new_state = KMemoryState::CodeData;
- break;
- case KMemoryState::AliasCode:
- new_state = KMemoryState::AliasCodeData;
- break;
- default:
- ASSERT(false);
- break;
- }
- }
-
- // Succeed if there's nothing to do.
- R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Perform mapping operation.
- const auto operation =
- was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
- R_TRY(Operate(addr, num_pages, new_perm, operation));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Ensure cache coherency, if we're setting pages as executable.
- if (is_x) {
- m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
- }
-
- R_SUCCEED();
-}
-
-KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
- KScopedLightLock lk(m_general_lock);
-
- return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
-}
-
-KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
- if (!Contains(addr, 1)) {
- return {
- .m_address = GetInteger(m_address_space_end),
- .m_size = 0 - GetInteger(m_address_space_end),
- .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
- .m_device_disable_merge_left_count = 0,
- .m_device_disable_merge_right_count = 0,
- .m_ipc_lock_count = 0,
- .m_device_use_count = 0,
- .m_ipc_disable_merge_count = 0,
- .m_permission = KMemoryPermission::None,
- .m_attribute = KMemoryAttribute::None,
- .m_original_permission = KMemoryPermission::None,
- .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
- };
- }
-
- return QueryInfoImpl(addr);
-}
-
-Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm) {
- const size_t num_pages = size / PageSize;
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory permission.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
- std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Determine new perm.
- const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
- R_SUCCEED_IF(old_perm == new_perm);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Perform mapping operation.
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
- const size_t num_pages = size / PageSize;
- ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
- KMemoryAttribute::SetMask);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory attribute.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- KMemoryAttribute old_attr;
- size_t num_allocator_blocks;
- constexpr auto AttributeTestMask =
- ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
- const KMemoryState state_test_mask =
- static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
- ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
- : 0) |
- ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
- ? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
- : 0));
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_test_mask, state_test_mask,
- KMemoryPermission::None, KMemoryPermission::None,
- AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // If we need to, perform a change attribute operation.
- if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
- // Perform operation.
- R_TRY(this->Operate(addr, num_pages, old_perm,
- OperationType::ChangePermissionsAndRefreshAndFlush, 0));
- }
-
- // Update the blocks.
- m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
- static_cast<KMemoryAttribute>(mask),
- static_cast<KMemoryAttribute>(attr));
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetMaxHeapSize(size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Only process page tables are allowed to set heap size.
- ASSERT(!this->IsKernel());
-
- m_max_heap_size = size;
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetHeapSize(u64* out, size_t size) {
- // Lock the physical memory mutex.
- KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
-
- // Try to perform a reduction in heap, instead of an extension.
- KProcessAddress cur_address{};
- size_t allocation_size{};
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that setting heap size is possible at all.
- R_UNLESS(!m_is_kernel, ResultOutOfMemory);
- R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
- ResultOutOfMemory);
- R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
-
- if (size < GetHeapSize()) {
- // The size being requested is less than the current size, so we need to free the end of
- // the heap.
-
- // Validate memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
- m_heap_region_start + size, GetHeapSize() - size,
- KMemoryState::All, KMemoryState::Normal,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Unmap the end of the heap.
- const auto num_pages = (GetHeapSize() - size) / PageSize;
- R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
- OperationType::Unmap));
-
- // Release the memory from the resource limit.
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize);
-
- // Apply the memory block update.
- m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
- num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None);
-
- // Update the current heap end.
- m_current_heap_end = m_heap_region_start + size;
-
- // Set the output.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- } else if (size == GetHeapSize()) {
- // The size requested is exactly the current size.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- } else {
- // We have to allocate memory. Determine how much to allocate and where while the table
- // is locked.
- cur_address = m_current_heap_end;
- allocation_size = size - GetHeapSize();
- }
- }
-
- // Reserve memory for the heap extension.
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Allocate pages for the heap extension.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, allocation_size / PageSize,
- KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
-
- // Clear all the newly allocated pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
- it.GetSize());
- }
-
- // Map the pages.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Ensure that the heap hasn't changed since we began executing.
- ASSERT(cur_address == m_current_heap_end);
-
- // Check the memory state.
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
- allocation_size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(
- std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Map the pages.
- const auto num_pages = allocation_size / PageSize;
- R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
-
- // Clear all the newly allocated pages.
- for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
- std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
- PageSize);
- }
-
- // We succeeded, so commit our memory reservation.
- memory_reservation.Commit();
-
- // Apply the memory block update.
- m_memory_block_manager.Update(
- std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Update the current heap end.
- m_current_heap_end = m_heap_region_start + size;
-
- // Set the output.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- }
-}
-
-Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
- size_t size, KMemoryPermission perm,
- bool is_aligned, bool check_heap) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- const auto test_state =
- (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
- (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
- size_t num_allocator_blocks;
- KMemoryState old_state;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
- std::addressof(num_allocator_blocks), address, size, test_state,
- test_state, perm, perm,
- KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
- KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
- &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
-
- // Set whether the locked memory was io.
- *out_is_io =
- static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
- bool check_heap) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- const auto test_state = KMemoryState::FlagCanDeviceMap |
- (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size, test_state, test_state,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
- m_enable_device_address_space_merge
- ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
- : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
- KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
- KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
- &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
- size_t size) {
- R_RETURN(this->LockMemoryAndOpen(
- nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
- KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
- KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
- KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
- R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
- KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Locked, nullptr));
-}
-
-Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
- KMemoryPermission perm) {
- R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
- KMemoryState::FlagCanTransfer, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size,
- const KPageGroup& pg) {
- R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
- KMemoryState::FlagCanTransfer, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Locked, std::addressof(pg)));
-}
-
-Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
- R_RETURN(this->LockMemoryAndOpen(
- out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
- KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
- R_RETURN(this->UnlockMemory(
- addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
- KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
-}
-
-bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
- auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
- for (u64 offset{}; offset < size; offset += PageSize) {
- if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
- return false;
- }
- start_ptr += PageSize;
- }
- return true;
-}
-
-void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
- KPageGroup& page_linked_list) {
- KProcessAddress addr{start};
- while (addr < start + (num_pages * PageSize)) {
- const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
- ASSERT(paddr != 0);
- page_linked_list.AddBlock(paddr, 1);
- addr += PageSize;
- }
-}
-
-KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
- u64 needed_num_pages, size_t align) {
- if (m_enable_aslr) {
- UNIMPLEMENTED();
- }
- return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
- IsKernel() ? 1 : 4);
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
- OperationType operation) {
- ASSERT(this->IsLockedByCurrentThread());
-
- ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
- ASSERT(num_pages > 0);
- ASSERT(num_pages == page_group.GetNumPages());
-
- switch (operation) {
- case OperationType::MapGroup:
- case OperationType::MapFirstGroup: {
- // We want to maintain a new reference to every page in the group.
- KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
-
- for (const auto& node : page_group) {
- const size_t size{node.GetNumPages() * PageSize};
-
- // Map the pages.
- m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
-
- addr += size;
- }
-
- // We succeeded! We want to persist the reference to the pages.
- spg.CancelClose();
-
- break;
- }
- default:
- ASSERT(false);
- break;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, KPhysicalAddress map_addr) {
- ASSERT(this->IsLockedByCurrentThread());
-
- ASSERT(num_pages > 0);
- ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
- ASSERT(ContainsPages(addr, num_pages));
-
- switch (operation) {
- case OperationType::Unmap: {
- // Ensure that any pages we track close on exit.
- KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
- SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
-
- this->AddRegionToPages(addr, num_pages, pages_to_close);
- m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
- break;
- }
- case OperationType::Map: {
- ASSERT(map_addr);
- ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
- m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
-
- // Open references to pages, if we should.
- if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
- m_kernel.MemoryManager().Open(map_addr, num_pages);
- }
- break;
- }
- case OperationType::Separate: {
- // HACK: Unimplemented.
- break;
- }
- case OperationType::ChangePermissions:
- case OperationType::ChangePermissionsAndRefresh:
- case OperationType::ChangePermissionsAndRefreshAndFlush:
- break;
- default:
- ASSERT(false);
- break;
- }
- R_SUCCEED();
-}
-
-void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
- while (page_list->Peek()) {
- [[maybe_unused]] auto page = page_list->Pop();
-
- // TODO(bunnei): Free pages once they are allocated in guest memory
- // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
- // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
- // this->GetPageTableManager().Free(page);
- }
-}
-
-KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return m_address_space_start;
- case Svc::MemoryState::Normal:
- return m_heap_region_start;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- return m_alias_region_start;
- case Svc::MemoryState::Stack:
- return m_stack_region_start;
- case Svc::MemoryState::Static:
- case Svc::MemoryState::ThreadLocal:
- return m_kernel_map_region_start;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return m_alias_code_region_start;
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- return m_code_region_start;
- default:
- UNREACHABLE();
- }
-}
-
-size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return m_address_space_end - m_address_space_start;
- case Svc::MemoryState::Normal:
- return m_heap_region_end - m_heap_region_start;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- return m_alias_region_end - m_alias_region_start;
- case Svc::MemoryState::Stack:
- return m_stack_region_end - m_stack_region_start;
- case Svc::MemoryState::Static:
- case Svc::MemoryState::ThreadLocal:
- return m_kernel_map_region_end - m_kernel_map_region_start;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return m_alias_code_region_end - m_alias_code_region_start;
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- return m_code_region_end - m_code_region_start;
- default:
- UNREACHABLE();
- }
-}
-
-bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
- const KProcessAddress end = addr + size;
- const KProcessAddress last = end - 1;
-
- const KProcessAddress region_start = this->GetRegionAddress(state);
- const size_t region_size = this->GetRegionSize(state);
-
- const bool is_in_region =
- region_start <= addr && addr < end && last <= region_start + region_size - 1;
- const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
- m_heap_region_start == m_heap_region_end);
- const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
- m_alias_region_start == m_alias_region_end);
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return is_in_region;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Static:
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Stack:
- case Svc::MemoryState::ThreadLocal:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return is_in_region && !is_in_heap && !is_in_alias;
- case Svc::MemoryState::Normal:
- ASSERT(is_in_heap);
- return is_in_region && !is_in_alias;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- ASSERT(is_in_alias);
- return is_in_region && !is_in_heap;
- default:
- return false;
- }
-}
-
-Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- // Validate the states match expectation.
- R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
- size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Get information about the first block.
- const KProcessAddress last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
- KMemoryInfo info = it->GetMemoryInfo();
-
- // If the start address isn't aligned, we need a block.
- const size_t blocks_for_start_align =
- (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
-
- while (true) {
- // Validate against the provided masks.
- R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
- // Break once we're done.
- if (last_addr <= info.GetLastAddress()) {
- break;
- }
-
- // Advance our iterator.
- it++;
- ASSERT(it != m_memory_block_manager.cend());
- info = it->GetMemoryInfo();
- }
-
- // If the end address isn't aligned, we need a block.
- const size_t blocks_for_end_align =
- (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
-
- if (out_blocks_needed != nullptr) {
- *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KMemoryBlockManager::const_iterator it,
- KProcessAddress last_addr, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Get information about the first block.
- KMemoryInfo info = it->GetMemoryInfo();
-
- // Validate all blocks in the range have correct state.
- const KMemoryState first_state = info.m_state;
- const KMemoryPermission first_perm = info.m_permission;
- const KMemoryAttribute first_attr = info.m_attribute;
- while (true) {
- // Validate the current block.
- R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
- R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
- ResultInvalidCurrentMemory);
-
- // Validate against the provided masks.
- R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
- // Break once we're done.
- if (last_addr <= info.GetLastAddress()) {
- break;
- }
-
- // Advance our iterator.
- it++;
- ASSERT(it != m_memory_block_manager.cend());
- info = it->GetMemoryInfo();
- }
-
- // Write output state.
- if (out_state != nullptr) {
- *out_state = first_state;
- }
- if (out_perm != nullptr) {
- *out_perm = first_perm;
- }
- if (out_attr != nullptr) {
- *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
- }
-
- // If the end address isn't aligned, we need a block.
- if (out_blocks_needed != nullptr) {
- const size_t blocks_for_end_align =
- (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
- ? 1
- : 0;
- *out_blocks_needed = blocks_for_end_align;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Check memory state.
- const KProcessAddress last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
- R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
- state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
-
- // If the start address isn't aligned, we need a block.
- if (out_blocks_needed != nullptr &&
- Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
- ++(*out_blocks_needed);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr) {
- // Validate basic preconditions.
- ASSERT((lock_attr & attr) == KMemoryAttribute::None);
- ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
- KMemoryAttribute::None);
-
- // Validate the lock request.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check that the output page group is empty, if it exists.
- if (out_pg) {
- ASSERT(out_pg->GetNumPages() == 0);
- }
-
- // Check the state.
- KMemoryState old_state{};
- KMemoryPermission old_perm{};
- KMemoryAttribute old_attr{};
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Get the physical address, if we're supposed to.
- if (out_KPhysicalAddress != nullptr) {
- ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
- }
-
- // Make the page group, if we're supposed to.
- if (out_pg != nullptr) {
- R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
- }
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Decide on new perm and attr.
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
- KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
-
- // Update permission, if we need to.
- if (new_perm != old_perm) {
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
- }
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- new_attr, KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
-
- // If we have an output page group, open.
- if (out_pg) {
- out_pg->Open();
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr, const KPageGroup* pg) {
- // Validate basic preconditions.
- ASSERT((attr_mask & lock_attr) == lock_attr);
- ASSERT((attr & lock_attr) == lock_attr);
-
- // Validate the unlock request.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the state.
- KMemoryState old_state{};
- KMemoryPermission old_perm{};
- KMemoryAttribute old_attr{};
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Check the page group.
- if (pg != nullptr) {
- R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
- }
-
- // Decide on new perm and attr.
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
- KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update permission, if we need to.
- if (new_perm != old_perm) {
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
- }
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- new_attr, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Locked);
-
- R_SUCCEED();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 66f16faaf..5541bc13f 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -3,548 +3,14 @@
#pragma once
-#include <memory>
-
-#include "common/common_funcs.h"
-#include "common/page_table.h"
-#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/k_dynamic_resource_manager.h"
-#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_typed_address.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Core {
-class System;
-}
+#include "core/hle/kernel/k_page_table_base.h"
namespace Kernel {
-enum class DisableMergeAttribute : u8 {
- None = (0U << 0),
- DisableHead = (1U << 0),
- DisableHeadAndBody = (1U << 1),
- EnableHeadAndBody = (1U << 2),
- DisableTail = (1U << 3),
- EnableTail = (1U << 4),
- EnableAndMergeHeadBodyTail = (1U << 5),
- EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
- DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
-};
-
-struct KPageProperties {
- KMemoryPermission perm;
- bool io;
- bool uncached;
- DisableMergeAttribute disable_merge_attributes;
-};
-static_assert(std::is_trivial_v<KPageProperties>);
-static_assert(sizeof(KPageProperties) == sizeof(u32));
-
-class KBlockInfoManager;
-class KMemoryBlockManager;
-class KResourceLimit;
-class KSystemResource;
-
-class KPageTable final {
-protected:
- struct PageLinkedList;
-
-public:
- enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
-
- YUZU_NON_COPYABLE(KPageTable);
- YUZU_NON_MOVEABLE(KPageTable);
-
- explicit KPageTable(Core::System& system_);
- ~KPageTable();
-
- Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
- bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
- KProcessAddress code_addr, size_t code_size,
- KSystemResource* system_resource, KResourceLimit* resource_limit,
- Core::Memory::Memory& memory);
-
- void Finalize();
-
- Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
- KMemoryPermission perm);
- Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
- Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy);
- Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
- KProcessAddress src_addr);
- Result MapPhysicalMemory(KProcessAddress addr, size_t size);
- Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
- Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
- Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
- Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm);
- KMemoryInfo QueryInfo(KProcessAddress addr);
- Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
- Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
- Result SetMaxHeapSize(size_t size);
- Result SetHeapSize(u64* out, size_t size);
- Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
- KMemoryPermission perm, bool is_aligned, bool check_heap);
- Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
-
- Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
-
- Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
- Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
-
- Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
- KPageTable& src_page_table, KMemoryPermission test_perm,
- KMemoryState dst_state, bool send);
- Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
- Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
-
- Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
- KMemoryPermission perm);
- Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
- Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
- Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
- Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr);
-
- Common::PageTable& PageTableImpl() {
- return *m_page_table_impl;
- }
-
- const Common::PageTable& PageTableImpl() const {
- return *m_page_table_impl;
- }
-
- KBlockInfoManager* GetBlockInfoManager() {
- return m_block_info_manager;
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, KProcessAddress region_start,
- size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
- region_num_pages, state, perm));
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
-
- Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
- KMemoryPermission perm);
- Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
-
- Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
- KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm);
- Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
- KMemoryPermission perm);
- Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
- void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
- const KPageGroup& pg);
-
- KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
- size_t GetRegionSize(Svc::MemoryState state) const;
- bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
-
- KProcessAddress GetRegionAddress(KMemoryState state) const {
- return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
- size_t GetRegionSize(KMemoryState state) const {
- return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
- bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
- return this->CanContain(addr, size,
- static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
-
-protected:
- struct PageLinkedList {
- private:
- struct Node {
- Node* m_next;
- std::array<u8, PageSize - sizeof(Node*)> m_buffer;
- };
-
- public:
- constexpr PageLinkedList() = default;
-
- void Push(Node* n) {
- ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
- n->m_next = m_root;
- m_root = n;
- }
-
- void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
- this->Push(memory.GetPointer<Node>(GetInteger(addr)));
- }
-
- Node* Peek() const {
- return m_root;
- }
-
- Node* Pop() {
- Node* const r = m_root;
-
- m_root = r->m_next;
- r->m_next = nullptr;
-
- return r;
- }
-
- private:
- Node* m_root{};
- };
- static_assert(std::is_trivially_destructible<PageLinkedList>::value);
-
-private:
- enum class OperationType : u32 {
- Map = 0,
- MapGroup = 1,
- MapFirstGroup = 2,
- Unmap = 3,
- ChangePermissions = 4,
- ChangePermissionsAndRefresh = 5,
- ChangePermissionsAndRefreshAndFlush = 6,
- Separate = 7,
- };
-
- static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
- KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
- size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
- bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
- void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
- KMemoryInfo QueryInfoImpl(KProcessAddress addr);
- KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
- u64 needed_num_pages, size_t align);
- Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
- OperationType operation);
- Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, KPhysicalAddress map_addr = 0);
- void FinalizeUpdate(PageLinkedList* page_list);
-
- KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages);
-
- Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
- perm, attr_mask, attr));
- }
-
- Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
- state_mask, state, perm_mask, perm, attr_mask, attr,
- ignore_attr));
- }
- Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
- attr_mask, attr, ignore_attr));
- }
-
- Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr);
- Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr,
- const KPageGroup* pg);
-
- Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
- bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
-
- bool IsLockedByCurrentThread() const {
- return m_general_lock.IsLockedByCurrentThread();
- }
-
- bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
- ASSERT(this->IsLockedByCurrentThread());
-
- return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
- }
-
- bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- *out = GetPhysicalAddr(virt_addr);
-
- return *out != 0;
- }
-
- Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- KProcessAddress address, size_t size, KMemoryPermission test_perm,
- KMemoryState dst_state);
- Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
- KMemoryPermission test_perm, KMemoryState dst_state,
- KPageTable& src_page_table, bool send);
- void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
- size_t size, KMemoryPermission prot_perm);
-
- Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
- size_t num_pages, KMemoryPermission perm);
- Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
- const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
-
- mutable KLightLock m_general_lock;
- mutable KLightLock m_map_physical_memory_lock;
-
-public:
- constexpr KProcessAddress GetAddressSpaceStart() const {
- return m_address_space_start;
- }
- constexpr KProcessAddress GetAddressSpaceEnd() const {
- return m_address_space_end;
- }
- constexpr size_t GetAddressSpaceSize() const {
- return m_address_space_end - m_address_space_start;
- }
- constexpr KProcessAddress GetHeapRegionStart() const {
- return m_heap_region_start;
- }
- constexpr KProcessAddress GetHeapRegionEnd() const {
- return m_heap_region_end;
- }
- constexpr size_t GetHeapRegionSize() const {
- return m_heap_region_end - m_heap_region_start;
- }
- constexpr KProcessAddress GetAliasRegionStart() const {
- return m_alias_region_start;
- }
- constexpr KProcessAddress GetAliasRegionEnd() const {
- return m_alias_region_end;
- }
- constexpr size_t GetAliasRegionSize() const {
- return m_alias_region_end - m_alias_region_start;
- }
- constexpr KProcessAddress GetStackRegionStart() const {
- return m_stack_region_start;
- }
- constexpr KProcessAddress GetStackRegionEnd() const {
- return m_stack_region_end;
- }
- constexpr size_t GetStackRegionSize() const {
- return m_stack_region_end - m_stack_region_start;
- }
- constexpr KProcessAddress GetKernelMapRegionStart() const {
- return m_kernel_map_region_start;
- }
- constexpr KProcessAddress GetKernelMapRegionEnd() const {
- return m_kernel_map_region_end;
- }
- constexpr KProcessAddress GetCodeRegionStart() const {
- return m_code_region_start;
- }
- constexpr KProcessAddress GetCodeRegionEnd() const {
- return m_code_region_end;
- }
- constexpr KProcessAddress GetAliasCodeRegionStart() const {
- return m_alias_code_region_start;
- }
- constexpr KProcessAddress GetAliasCodeRegionEnd() const {
- return m_alias_code_region_end;
- }
- constexpr size_t GetAliasCodeRegionSize() const {
- return m_alias_code_region_end - m_alias_code_region_start;
- }
- size_t GetNormalMemorySize() const {
- KScopedLightLock lk(m_general_lock);
- return GetHeapSize() + m_mapped_physical_memory_size;
- }
- constexpr size_t GetAddressSpaceWidth() const {
- return m_address_space_width;
- }
- constexpr size_t GetHeapSize() const {
- return m_current_heap_end - m_heap_region_start;
- }
- constexpr size_t GetNumGuardPages() const {
- return IsKernel() ? 1 : 4;
- }
- KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
- const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
- ASSERT(backing_addr);
- return backing_addr + GetInteger(addr);
- }
- constexpr bool Contains(KProcessAddress addr) const {
- return m_address_space_start <= addr && addr <= m_address_space_end - 1;
- }
- constexpr bool Contains(KProcessAddress addr, size_t size) const {
- return m_address_space_start <= addr && addr < addr + size &&
- addr + size - 1 <= m_address_space_end - 1;
- }
- constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
- return this->Contains(addr, size) && m_alias_region_start <= addr &&
- addr + size - 1 <= m_alias_region_end - 1;
- }
- constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
- return this->Contains(addr, size) && m_heap_region_start <= addr &&
- addr + size - 1 <= m_heap_region_end - 1;
- }
-
+class KPageTable final : public KPageTableBase {
public:
- static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return layout.GetLinearVirtualAddress(addr);
- }
-
- static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return layout.GetLinearPhysicalAddress(addr);
- }
-
- static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return GetLinearMappedVirtualAddress(layout, addr);
- }
-
- static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return GetLinearMappedPhysicalAddress(layout, addr);
- }
-
- static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return GetLinearMappedVirtualAddress(layout, addr);
- }
-
- static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return GetLinearMappedPhysicalAddress(layout, addr);
- }
-
-private:
- constexpr bool IsKernel() const {
- return m_is_kernel;
- }
- constexpr bool IsAslrEnabled() const {
- return m_enable_aslr;
- }
-
- constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
- return (m_address_space_start <= addr) &&
- (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
- (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
- }
-
-private:
- class KScopedPageTableUpdater {
- private:
- KPageTable* m_pt{};
- PageLinkedList m_ll;
-
- public:
- explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
- explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
- ~KScopedPageTableUpdater() {
- m_pt->FinalizeUpdate(this->GetPageList());
- }
-
- PageLinkedList* GetPageList() {
- return std::addressof(m_ll);
- }
- };
-
-private:
- KProcessAddress m_address_space_start{};
- KProcessAddress m_address_space_end{};
- KProcessAddress m_heap_region_start{};
- KProcessAddress m_heap_region_end{};
- KProcessAddress m_current_heap_end{};
- KProcessAddress m_alias_region_start{};
- KProcessAddress m_alias_region_end{};
- KProcessAddress m_stack_region_start{};
- KProcessAddress m_stack_region_end{};
- KProcessAddress m_kernel_map_region_start{};
- KProcessAddress m_kernel_map_region_end{};
- KProcessAddress m_code_region_start{};
- KProcessAddress m_code_region_end{};
- KProcessAddress m_alias_code_region_start{};
- KProcessAddress m_alias_code_region_end{};
-
- size_t m_max_heap_size{};
- size_t m_mapped_physical_memory_size{};
- size_t m_mapped_unsafe_physical_memory{};
- size_t m_mapped_insecure_memory{};
- size_t m_mapped_ipc_server_memory{};
- size_t m_address_space_width{};
-
- KMemoryBlockManager m_memory_block_manager;
- u32 m_allocate_option{};
-
- bool m_is_kernel{};
- bool m_enable_aslr{};
- bool m_enable_device_address_space_merge{};
-
- KMemoryBlockSlabManager* m_memory_block_slab_manager{};
- KBlockInfoManager* m_block_info_manager{};
- KResourceLimit* m_resource_limit{};
-
- u32 m_heap_fill_value{};
- u32 m_ipc_fill_value{};
- u32 m_stack_fill_value{};
- const KMemoryRegion* m_cached_physical_heap_region{};
-
- KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
- KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
-
- std::unique_ptr<Common::PageTable> m_page_table_impl;
-
- Core::System& m_system;
- KernelCore& m_kernel;
- Core::Memory::Memory* m_memory{};
+ explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {}
+ ~KPageTable() = default;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
new file mode 100644
index 000000000..6a57ad55c
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -0,0 +1,5716 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "common/settings.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_address_space_info.h"
+#include "core/hle/kernel/k_page_table_base.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_system_resource.h"
+
+namespace Kernel {
+
+namespace {
+
+class KScopedLightLockPair {
+ YUZU_NON_COPYABLE(KScopedLightLockPair);
+ YUZU_NON_MOVEABLE(KScopedLightLockPair);
+
+private:
+ KLightLock* m_lower;
+ KLightLock* m_upper;
+
+public:
+ KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
+ // Ensure our locks are in a consistent order.
+ if (std::addressof(lhs) <= std::addressof(rhs)) {
+ m_lower = std::addressof(lhs);
+ m_upper = std::addressof(rhs);
+ } else {
+ m_lower = std::addressof(rhs);
+ m_upper = std::addressof(lhs);
+ }
+
+ // Acquire both locks.
+ m_lower->Lock();
+ if (m_lower != m_upper) {
+ m_upper->Lock();
+ }
+ }
+
+ ~KScopedLightLockPair() {
+ // Unlock the upper lock.
+ if (m_upper != nullptr && m_upper != m_lower) {
+ m_upper->Unlock();
+ }
+
+ // Unlock the lower lock.
+ if (m_lower != nullptr) {
+ m_lower->Unlock();
+ }
+ }
+
+public:
+ // Utility.
+ void TryUnlockHalf(KLightLock& lock) {
+ // Only allow unlocking if the lock is half the pair.
+ if (m_lower != m_upper) {
+ // We want to be sure the lock is one we own.
+ if (m_lower == std::addressof(lock)) {
+ lock.Unlock();
+ m_lower = nullptr;
+ } else if (m_upper == std::addressof(lock)) {
+ lock.Unlock();
+ m_upper = nullptr;
+ }
+ }
+ }
+};
+
+template <typename AddressType>
+void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) {
+ system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
+}
+
+template <typename AddressType>
+Result InvalidateDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+template <typename AddressType>
+Result StoreDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+template <typename AddressType>
+Result FlushDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+} // namespace
+
+void KPageTableBase::MemoryRange::Open() {
+ // If the range contains heap pages, open them.
+ if (this->IsHeap()) {
+ m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
+ }
+}
+
+void KPageTableBase::MemoryRange::Close() {
+ // If the range contains heap pages, close them.
+ if (this->IsHeap()) {
+ m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
+ }
+}
+
+KPageTableBase::KPageTableBase(KernelCore& kernel)
+ : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel),
+ m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {}
+KPageTableBase::~KPageTableBase() = default;
+
+Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start,
+ KVirtualAddress end, Core::Memory::Memory& memory) {
+ // Initialize our members.
+ m_address_space_width =
+ static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>());
+ m_address_space_start = KProcessAddress(GetInteger(start));
+ m_address_space_end = KProcessAddress(GetInteger(end));
+ m_is_kernel = true;
+ m_enable_aslr = true;
+ m_enable_device_address_space_merge = false;
+
+ m_heap_region_start = 0;
+ m_heap_region_end = 0;
+ m_current_heap_end = 0;
+ m_alias_region_start = 0;
+ m_alias_region_end = 0;
+ m_stack_region_start = 0;
+ m_stack_region_end = 0;
+ m_kernel_map_region_start = 0;
+ m_kernel_map_region_end = 0;
+ m_alias_code_region_start = 0;
+ m_alias_code_region_end = 0;
+ m_code_region_start = 0;
+ m_code_region_end = 0;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ m_memory_block_slab_manager =
+ m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer();
+ m_resource_limit = m_kernel.GetSystemResourceLimit();
+
+ m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System,
+ KMemoryManager::Direction::FromFront);
+ m_heap_fill_value = MemoryFillValue_Zero;
+ m_ipc_fill_value = MemoryFillValue_Zero;
+ m_stack_fill_value = MemoryFillValue_Zero;
+
+ m_cached_physical_linear_region = nullptr;
+ m_cached_physical_heap_region = nullptr;
+
+ // Initialize our implementation.
+ m_impl = std::make_unique<Common::PageTable>();
+ m_impl->Resize(m_address_space_width, PageBits);
+
+ // Set the tracking memory.
+ m_memory = std::addressof(memory);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
+
+Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+ bool enable_das_merge, bool from_back,
+ KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory) {
+ // Calculate region extents.
+ const size_t as_width = GetAddressSpaceWidth(as_type);
+ const KProcessAddress start = 0;
+ const KProcessAddress end = (1ULL << as_width);
+
+ // Validate the region.
+ ASSERT(start <= code_address);
+ ASSERT(code_address < code_address + code_size);
+ ASSERT(code_address + code_size - 1 <= end - 1);
+
+ // Define helpers.
+ auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) {
+ return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
+ };
+ auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) {
+ return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
+ };
+
+ // Set our bit width and heap/alias sizes.
+ m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type));
+ size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+ size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+
+ // Adjust heap/alias size if we don't have an alias region.
+ if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) ==
+ Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
+ heap_region_size += alias_region_size;
+ alias_region_size = 0;
+ }
+
+ // Set code regions and determine remaining sizes.
+ KProcessAddress process_code_start;
+ KProcessAddress process_code_end;
+ size_t stack_region_size;
+ size_t kernel_map_region_size;
+ if (m_address_space_width == 39) {
+ alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+ heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+ stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
+ kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = m_code_region_end;
+ process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment);
+ process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
+ } else {
+ stack_region_size = 0;
+ kernel_map_region_size = 0;
+ m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_stack_region_start = m_code_region_start;
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
+ GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
+ m_stack_region_end = m_code_region_end;
+ m_kernel_map_region_start = m_code_region_start;
+ m_kernel_map_region_end = m_code_region_end;
+ process_code_start = m_code_region_start;
+ process_code_end = m_code_region_end;
+ }
+
+ // Set other basic fields.
+ m_enable_aslr = enable_aslr;
+ m_enable_device_address_space_merge = enable_das_merge;
+ m_address_space_start = start;
+ m_address_space_end = end;
+ m_is_kernel = false;
+ m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
+ m_resource_limit = resource_limit;
+
+ // Determine the region we can place our undetermineds in.
+ KProcessAddress alloc_start;
+ size_t alloc_size;
+ if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >=
+ (GetInteger(end) - GetInteger(process_code_end))) {
+ alloc_start = m_code_region_start;
+ alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
+ } else {
+ alloc_start = process_code_end;
+ alloc_size = GetInteger(end) - GetInteger(process_code_end);
+ }
+ const size_t needed_size =
+ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
+ R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
+
+ const size_t remaining_size = alloc_size - needed_size;
+
+ // Determine random placements for each region.
+ size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
+ if (enable_aslr) {
+ alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ }
+
+ // Setup heap and alias regions.
+ m_alias_region_start = alloc_start + alias_rnd;
+ m_alias_region_end = m_alias_region_start + alias_region_size;
+ m_heap_region_start = alloc_start + heap_rnd;
+ m_heap_region_end = m_heap_region_start + heap_region_size;
+
+ if (alias_rnd <= heap_rnd) {
+ m_heap_region_start += alias_region_size;
+ m_heap_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += heap_region_size;
+ m_alias_region_end += heap_region_size;
+ }
+
+ // Setup stack region.
+ if (stack_region_size) {
+ m_stack_region_start = alloc_start + stack_rnd;
+ m_stack_region_end = m_stack_region_start + stack_region_size;
+
+ if (alias_rnd < stack_rnd) {
+ m_stack_region_start += alias_region_size;
+ m_stack_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += stack_region_size;
+ m_alias_region_end += stack_region_size;
+ }
+
+ if (heap_rnd < stack_rnd) {
+ m_stack_region_start += heap_region_size;
+ m_stack_region_end += heap_region_size;
+ } else {
+ m_heap_region_start += stack_region_size;
+ m_heap_region_end += stack_region_size;
+ }
+ }
+
+ // Setup kernel map region.
+ if (kernel_map_region_size) {
+ m_kernel_map_region_start = alloc_start + kmap_rnd;
+ m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
+
+ if (alias_rnd < kmap_rnd) {
+ m_kernel_map_region_start += alias_region_size;
+ m_kernel_map_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += kernel_map_region_size;
+ m_alias_region_end += kernel_map_region_size;
+ }
+
+ if (heap_rnd < kmap_rnd) {
+ m_kernel_map_region_start += heap_region_size;
+ m_kernel_map_region_end += heap_region_size;
+ } else {
+ m_heap_region_start += kernel_map_region_size;
+ m_heap_region_end += kernel_map_region_size;
+ }
+
+ if (stack_region_size) {
+ if (stack_rnd < kmap_rnd) {
+ m_kernel_map_region_start += stack_region_size;
+ m_kernel_map_region_end += stack_region_size;
+ } else {
+ m_stack_region_start += kernel_map_region_size;
+ m_stack_region_end += kernel_map_region_size;
+ }
+ }
+ }
+
+ // Set heap and fill members.
+ m_current_heap_end = m_heap_region_start;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
+ const bool fill_memory = false;
+ m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
+ m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
+ m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
+
+ // Set allocation option.
+ m_allocate_option =
+ KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
+ : KMemoryManager::Direction::FromFront);
+
+ // Ensure that we regions inside our address space.
+ auto IsInAddressSpace = [&](KProcessAddress addr) {
+ return m_address_space_start <= addr && addr <= m_address_space_end;
+ };
+ ASSERT(IsInAddressSpace(m_alias_region_start));
+ ASSERT(IsInAddressSpace(m_alias_region_end));
+ ASSERT(IsInAddressSpace(m_heap_region_start));
+ ASSERT(IsInAddressSpace(m_heap_region_end));
+ ASSERT(IsInAddressSpace(m_stack_region_start));
+ ASSERT(IsInAddressSpace(m_stack_region_end));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_start));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_end));
+
+ // Ensure that we selected regions that don't overlap.
+ const KProcessAddress alias_start = m_alias_region_start;
+ const KProcessAddress alias_last = m_alias_region_end - 1;
+ const KProcessAddress heap_start = m_heap_region_start;
+ const KProcessAddress heap_last = m_heap_region_end - 1;
+ const KProcessAddress stack_start = m_stack_region_start;
+ const KProcessAddress stack_last = m_stack_region_end - 1;
+ const KProcessAddress kmap_start = m_kernel_map_region_start;
+ const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
+ ASSERT(alias_last < heap_start || heap_last < alias_start);
+ ASSERT(alias_last < stack_start || stack_last < alias_start);
+ ASSERT(alias_last < kmap_start || kmap_last < alias_start);
+ ASSERT(heap_last < stack_start || stack_last < heap_start);
+ ASSERT(heap_last < kmap_start || kmap_last < heap_start);
+
+ // Initialize our implementation.
+ m_impl = std::make_unique<Common::PageTable>();
+ m_impl->Resize(m_address_space_width, PageBits);
+
+ // Set the tracking memory.
+ m_memory = std::addressof(memory);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
+
+void KPageTableBase::Finalize() {
+ auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
+ if (Settings::IsFastmemEnabled()) {
+ m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
+ }
+ };
+
+ // Finalize memory blocks.
+ m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
+
+ // Free any unsafe mapped memory.
+ if (m_mapped_unsafe_physical_memory) {
+ UNIMPLEMENTED();
+ }
+
+ // Release any insecure mapped memory.
+ if (m_mapped_insecure_memory) {
+ if (auto* const insecure_resource_limit =
+ KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ insecure_resource_limit != nullptr) {
+ insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ m_mapped_insecure_memory);
+ }
+ }
+
+ // Release any ipc server memory.
+ if (m_mapped_ipc_server_memory) {
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ m_mapped_ipc_server_memory);
+ }
+
+ // Close the backing page table, as the destructor is not called for guest objects.
+ m_impl.reset();
+}
+
+KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const {
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return m_address_space_start;
+ case Svc::MemoryState::Normal:
+ return m_heap_region_start;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ return m_alias_region_start;
+ case Svc::MemoryState::Stack:
+ return m_stack_region_start;
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::ThreadLocal:
+ return m_kernel_map_region_start;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return m_alias_code_region_start;
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ return m_code_region_start;
+ default:
+ UNREACHABLE();
+ }
+}
+
+size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const {
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return m_address_space_end - m_address_space_start;
+ case Svc::MemoryState::Normal:
+ return m_heap_region_end - m_heap_region_start;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ return m_alias_region_end - m_alias_region_start;
+ case Svc::MemoryState::Stack:
+ return m_stack_region_end - m_stack_region_start;
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::ThreadLocal:
+ return m_kernel_map_region_end - m_kernel_map_region_start;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return m_alias_code_region_end - m_alias_code_region_start;
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ return m_code_region_end - m_code_region_start;
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
+ const KProcessAddress end = addr + size;
+ const KProcessAddress last = end - 1;
+
+ const KProcessAddress region_start = this->GetRegionAddress(state);
+ const size_t region_size = this->GetRegionSize(state);
+
+ const bool is_in_region =
+ region_start <= addr && addr < end && last <= region_start + region_size - 1;
+ const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
+ m_heap_region_start == m_heap_region_end);
+ const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
+ m_alias_region_start == m_alias_region_end);
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return is_in_region;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Stack:
+ case Svc::MemoryState::ThreadLocal:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return is_in_region && !is_in_heap && !is_in_alias;
+ case Svc::MemoryState::Normal:
+ ASSERT(is_in_heap);
+ return is_in_region && !is_in_alias;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ ASSERT(is_in_alias);
+ return is_in_region && !is_in_heap;
+ default:
+ return false;
+ }
+}
+
+Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ // Validate the states match expectation.
+ R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
+ size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm,
+ KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Get information about the first block.
+ const KProcessAddress last_addr = addr + size - 1;
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+ KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the start address isn't aligned, we need a block.
+ const size_t blocks_for_start_align =
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
+
+ while (true) {
+ // Validate against the provided masks.
+ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+ // Break once we're done.
+ if (last_addr <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance our iterator.
+ it++;
+ ASSERT(it != m_memory_block_manager.cend());
+ info = it->GetMemoryInfo();
+ }
+
+ // If the end address isn't aligned, we need a block.
+ const size_t blocks_for_end_align =
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+
+ if (out_blocks_needed != nullptr) {
+ *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KMemoryBlockManager::const_iterator it,
+ KProcessAddress last_addr, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Get information about the first block.
+ KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate all blocks in the range have correct state.
+ const KMemoryState first_state = info.m_state;
+ const KMemoryPermission first_perm = info.m_permission;
+ const KMemoryAttribute first_attr = info.m_attribute;
+ while (true) {
+ // Validate the current block.
+ R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
+ R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
+ ResultInvalidCurrentMemory);
+
+ // Validate against the provided masks.
+ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+ // Break once we're done.
+ if (last_addr <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance our iterator.
+ it++;
+ ASSERT(it != m_memory_block_manager.cend());
+ info = it->GetMemoryInfo();
+ }
+
+ // Write output state.
+ if (out_state != nullptr) {
+ *out_state = first_state;
+ }
+ if (out_perm != nullptr) {
+ *out_perm = first_perm;
+ }
+ if (out_attr != nullptr) {
+ *out_attr = first_attr & ~ignore_attr;
+ }
+
+ // If the end address isn't aligned, we need a block.
+ if (out_blocks_needed != nullptr) {
+ const size_t blocks_for_end_align =
+ (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
+ ? 1
+ : 0;
+ *out_blocks_needed = blocks_for_end_align;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Check memory state.
+ const KProcessAddress last_addr = addr + size - 1;
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+ R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
+ state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
+
+ // If the start address isn't aligned, we need a block.
+ if (out_blocks_needed != nullptr &&
+ Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
+ ++(*out_blocks_needed);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT(False(lock_attr & attr));
+ ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ const KPageProperties properties = {new_perm, false,
+ True(old_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // If we have an output group, open.
+ if (out_pg) {
+ out_pg->Open();
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageGroup* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ const KPageProperties properties = {new_perm, false,
+ True(old_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Locked);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+ KProcessAddress address) const {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(out_info != nullptr);
+ ASSERT(out_page != nullptr);
+
+ const KMemoryBlock* block = m_memory_block_manager.FindBlock(address);
+ R_UNLESS(block != nullptr, ResultInvalidCurrentMemory);
+
+ *out_info = block->GetMemoryInfo();
+ out_page->flags = 0;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+ Svc::MemoryState state) const {
+ ASSERT(!this->IsLockedByCurrentThread());
+ ASSERT(out != nullptr);
+
+ const KProcessAddress region_start = this->GetRegionAddress(state);
+ const size_t region_size = this->GetRegionSize(state);
+
+ // Check that the address/size are potentially valid.
+ R_UNLESS((address < address + size), ResultNotFound);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ bool cur_valid = false;
+ TraversalEntry next_entry;
+ bool next_valid;
+ size_t tot_size = 0;
+
+ next_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
+ next_entry.block_size =
+ (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
+
+ // Iterate, looking for entry.
+ while (true) {
+ if ((!next_valid && !cur_valid) ||
+ (next_valid && cur_valid &&
+ next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+ cur_entry.block_size += next_entry.block_size;
+ } else {
+ if (cur_valid && cur_entry.phys_addr <= address &&
+ address + size <= cur_entry.phys_addr + cur_entry.block_size) {
+ // Check if this region is valid.
+ const KProcessAddress mapped_address =
+ (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+ if (R_SUCCEEDED(this->CheckMemoryState(
+ mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state),
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None))) {
+ // It is!
+ *out = mapped_address;
+ R_SUCCEED();
+ }
+ }
+
+ // Update tracking variables.
+ tot_size += cur_entry.block_size;
+ cur_entry = next_entry;
+ cur_valid = next_valid;
+ }
+
+ if (cur_entry.block_size + tot_size >= region_size) {
+ break;
+ }
+
+ next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ }
+
+ // Check the last entry.
+ R_UNLESS(cur_valid, ResultNotFound);
+ R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound);
+ R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound);
+
+ // Check if the last region is valid.
+ const KProcessAddress mapped_address =
+ (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+ R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All,
+ static_cast<KMemoryState>(state),
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None)) {
+ R_CONVERT_ALL(ResultNotFound);
+ }
+ R_END_TRY_CATCH;
+
+ // We found the region.
+ *out = mapped_address;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Validate that the dst address's state is valid.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+ const KPageProperties src_properties = {new_src_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableHeadBodyTail};
+ R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+ unprotect_properties, OperationType::ChangePermissions, true));
+ };
+
+ // Map the alias pages.
+ const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
+ false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_src_perm, new_src_attr,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
+ src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
+ KMemoryAttribute::All, KMemoryAttribute::Locked));
+
+ // Validate that the dst address's state is valid.
+ KMemoryPermission dst_perm;
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
+ dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Unmap the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+ // Ensure the page group is the valid for the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ dst_unmap_properties, OperationType::Unmap, false));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
+
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, src_state,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify that the source memory is normal heap.
+ KMemoryState src_state;
+ KMemoryPermission src_perm;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Verify that the destination memory is unmapped.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the code memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const KMemoryPermission new_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KPageProperties src_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail};
+ R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+ unprotect_properties, OperationType::ChangePermissions, true));
+ };
+
+ // Map the alias pages.
+ const KPageProperties dst_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(
+ this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+ KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify that the source memory is locked normal heap.
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Verify that the destination memory is aliasable code.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
+ KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
+
+ // Determine whether any pages being unmapped are code.
+ bool any_code_pages = false;
+ {
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
+ while (true) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if the memory has code flag.
+ if (True(info.GetState() & KMemoryState::FlagCode)) {
+ any_code_pages = true;
+ break;
+ }
+
+ // Check if we're done.
+ if (dst_address + size - 1 <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+ }
+
+ // Ensure that we maintain the instruction cache.
+ bool reprotected_pages = false;
+ SCOPE_EXIT({
+ if (reprotected_pages && any_code_pages) {
+ InvalidateInstructionCache(m_system, dst_address, size);
+ }
+ });
+
+ // Unmap.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+ // Verify that the page group contains the same pages as the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ dst_unmap_properties, OperationType::Unmap, false));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
+
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+
+ // Note that we reprotected pages.
+ reprotected_pages = true;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
+ // Get the insecure memory resource limit and pool.
+ auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ const auto insecure_pool =
+ static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
+
+ // Reserve the insecure memory.
+ // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached.
+ KScopedResourceReservation memory_reservation(insecure_resource_limit,
+ Svc::LimitableResource::PhysicalMemoryMax, size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory);
+
+ // Allocate pages for the insecure memory.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateAndOpen(
+ std::addressof(pg), size / PageSize,
+ KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront)));
+
+ // Close the opened pages when we're done with them.
+ // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+ // automatically.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+ static_cast<u32>(m_heap_fill_value), it.GetSize());
+ }
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the address's state is valid.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties,
+ OperationType::MapGroup, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages,
+ KMemoryState::Insecure, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Update our mapped insecure size.
+ m_mapped_insecure_memory += size;
+
+ // Commit the memory reservation.
+ memory_reservation.Commit();
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the memory.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Update our mapped insecure size.
+ m_mapped_insecure_memory -= size;
+
+ // Release the insecure memory from the insecure limit.
+ if (auto* const insecure_resource_limit =
+ KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ insecure_resource_limit != nullptr) {
+ insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size);
+ }
+
+ R_SUCCEED();
+}
+
+KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const {
+ KProcessAddress address = 0;
+
+ if (num_pages <= region_num_pages) {
+ if (this->IsAslrEnabled()) {
+ // Try to directly find a free area up to 8 times.
+ for (size_t i = 0; i < 8; i++) {
+ const size_t random_offset =
+ KSystemControl::GenerateRandomRange(
+ 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
+ alignment;
+ const KProcessAddress candidate =
+ Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
+
+ KMemoryInfo info;
+ Svc::PageInfo page_info;
+ R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info),
+ candidate));
+
+ if (info.m_state != KMemoryState::Free) {
+ continue;
+ }
+ if (!(region_start <= candidate)) {
+ continue;
+ }
+ if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) {
+ continue;
+ }
+ if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+ info.GetLastAddress())) {
+ continue;
+ }
+ if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+ region_start + region_num_pages * PageSize - 1)) {
+ continue;
+ }
+
+ address = candidate;
+ break;
+ }
+ // Fall back to finding the first free area with a random offset.
+ if (address == 0) {
+ // NOTE: Nintendo does not account for guard pages here.
+ // This may theoretically cause an offset to be chosen that cannot be mapped.
+ // We will account for guard pages.
+ const size_t offset_pages = KSystemControl::GenerateRandomRange(
+ 0, region_num_pages - num_pages - guard_pages);
+ address = m_memory_block_manager.FindFreeArea(
+ region_start + offset_pages * PageSize, region_num_pages - offset_pages,
+ num_pages, alignment, offset, guard_pages);
+ }
+ }
+ // Find the first free area.
+ if (address == 0) {
+ address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
+ alignment, offset, guard_pages);
+ }
+ }
+
+ return address;
+}
+
+size_t KPageTableBase::GetSize(KMemoryState state) const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Iterate, counting blocks with the desired state.
+ size_t total_size = 0;
+ for (KMemoryBlockManager::const_iterator it =
+ m_memory_block_manager.FindIterator(m_address_space_start);
+ it != m_memory_block_manager.end(); ++it) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+ if (info.GetState() == state) {
+ total_size += info.GetSize();
+ }
+ }
+
+ return total_size;
+}
+
+size_t KPageTableBase::GetCodeSize() const {
+ return this->GetSize(KMemoryState::Code);
+}
+
+size_t KPageTableBase::GetCodeDataSize() const {
+ return this->GetSize(KMemoryState::CodeData);
+}
+
+size_t KPageTableBase::GetAliasCodeSize() const {
+ return this->GetSize(KMemoryState::AliasCode);
+}
+
+size_t KPageTableBase::GetAliasCodeDataSize() const {
+ return this->GetSize(KMemoryState::AliasCodeData);
+}
+
+Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Create a page group to hold the pages we allocate.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Allocate the pages.
+ R_TRY(
+ m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
+
+ // Ensure that the page group is closed when we're done working with it.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+ static_cast<u32>(m_heap_fill_value), it.GetSize());
+ }
+
+ // Map the pages.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None};
+ R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup,
+ false));
+}
+
+Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ KProcessAddress cur_address = address;
+
+ // Ensure that we clean up on failure.
+ ON_RESULT_FAILURE {
+ ASSERT(!reuse_ll);
+ if (cur_address != start_address) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(page_list, start_address,
+ (cur_address - start_address) / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ }
+ };
+
+ // Iterate, mapping all pages in the group.
+ for (const auto& block : pg) {
+ // Map and advance.
+ const KPageProperties cur_properties =
+ (cur_address == start_address)
+ ? properties
+ : KPageProperties{properties.perm, properties.io, properties.uncached,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true,
+ cur_properties, OperationType::Map, reuse_ll));
+ cur_address += block.GetSize();
+ }
+
+ // We succeeded!
+ R_SUCCEED();
+}
+
+void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ const KProcessAddress last_address = start_address + size - 1;
+ const KProcessAddress end_address = last_address + 1;
+
+ // Iterate over the memory.
+ auto pg_it = pg.begin();
+ ASSERT(pg_it != pg.end());
+
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ auto it = m_memory_block_manager.FindIterator(start_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Determine the range to map.
+ KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address));
+ const KProcessAddress map_end_address =
+ std::min(info.GetEndAddress(), GetInteger(end_address));
+ ASSERT(map_end_address != map_address);
+
+ // Determine if we should disable head merge.
+ const bool disable_head_merge =
+ info.GetAddress() >= GetInteger(start_address) &&
+ True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
+ const KPageProperties map_properties = {
+ info.GetPermission(), false, false,
+ disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
+
+ // While we have pages to map, map them.
+ size_t map_pages = (map_end_address - map_address) / PageSize;
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true,
+ map_properties, OperationType::Map, true));
+
+ // Advance.
+ map_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+
+ // Check that we re-mapped precisely the page group.
+ ASSERT((++pg_it) == pg.end());
+}
+
+Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // We're making a new group, not adding to an existing one.
+ R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr),
+ ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, adding to group as we go.
+ while (tot_size < size) {
+ R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)),
+ ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we add the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // add the last block.
+ const size_t cur_pages = cur_size / PageSize;
+ R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ R_SUCCEED();
+}
+
+bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr,
+ size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // Empty groups are necessarily invalid.
+ if (pg.empty()) {
+ return false;
+ }
+
+ auto& impl = this->GetImpl();
+
+ // We're going to validate that the group we'd expect is the group we see.
+ auto cur_it = pg.begin();
+ KPhysicalAddress cur_block_address = cur_it->GetAddress();
+ size_t cur_block_pages = cur_it->GetNumPages();
+
+ auto UpdateCurrentIterator = [&]() {
+ if (cur_block_pages == 0) {
+ if ((++cur_it) == pg.end()) {
+ return false;
+ }
+
+ cur_block_address = cur_it->GetAddress();
+ cur_block_pages = cur_it->GetNumPages();
+ }
+ return true;
+ };
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
+ return false;
+ }
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, comparing expected to actual.
+ while (tot_size < size) {
+ if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
+ return false;
+ }
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ if (!IsHeapPhysicalAddress(cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
+ return false;
+ }
+
+ cur_block_address += cur_size;
+ cur_block_pages -= cur_pages;
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we compare the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ if (!IsHeapPhysicalAddress(cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
+}
+
+Result KPageTableBase::GetContiguousMemoryRangeWithState(
+ MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ auto& impl = this->GetImpl();
+
+ // Begin a traversal.
+ TraversalContext context;
+ TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address),
+ ResultInvalidCurrentMemory);
+
+ // Traverse until we have enough size or we aren't contiguous any more.
+ const KPhysicalAddress phys_address = cur_entry.phys_addr;
+ size_t contig_size;
+ for (contig_size =
+ cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1));
+ contig_size < size; contig_size += cur_entry.block_size) {
+ if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
+ break;
+ }
+ if (cur_entry.phys_addr != phys_address + contig_size) {
+ break;
+ }
+ }
+
+ // Take the minimum size for our region.
+ size = std::min(size, contig_size);
+
+ // Check that the memory is contiguous (modulo the reference count bit).
+ const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted;
+ const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+ if (!is_heap) {
+ R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask,
+ perm, attr_mask, attr));
+ }
+
+ // The memory is contiguous, so set the output range.
+ out->Set(phys_address, size, is_heap);
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Determine new perm.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ R_SUCCEED_IF(old_perm == new_perm);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCode, KMemoryState::FlagCode,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Make a new page group for the region.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Determine new perm/state.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ KMemoryState new_state = old_state;
+ const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
+ const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+ const bool was_x =
+ (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+ ASSERT(!(is_w && is_x));
+
+ if (is_w) {
+ switch (old_state) {
+ case KMemoryState::Code:
+ new_state = KMemoryState::CodeData;
+ break;
+ case KMemoryState::AliasCode:
+ new_state = KMemoryState::AliasCodeData;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Create a page group, if we're setting execute permissions.
+ if (is_x) {
+ R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
+ }
+
+ // Succeed if there's nothing to do.
+ R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+ const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush
+ : OperationType::ChangePermissions;
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation,
+ false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Ensure cache coherency, if we're setting pages as executable.
+ if (is_x) {
+ for (const auto& block : pg) {
+ StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize());
+ }
+ InvalidateInstructionCache(m_system, addr, size);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr) {
+ const size_t num_pages = size / PageSize;
+ ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory attribute.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ constexpr KMemoryAttribute AttributeTestMask =
+ ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
+ const KMemoryState state_test_mask =
+ (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute
+ : KMemoryState::None) |
+ (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock
+ : KMemoryState::None);
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_test_mask, state_test_mask,
+ KMemoryPermission::None, KMemoryPermission::None,
+ AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // If we need to, perform a change attribute operation.
+ if (True(mask & KMemoryAttribute::Uncached)) {
+ // Determine the new attribute.
+ const KMemoryAttribute new_attr =
+ static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
+
+ // Perform operation.
+ const KPageProperties properties = {old_perm, false,
+ True(new_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissionsAndRefreshAndFlush, false));
+ }
+
+ // Update the blocks.
+ m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
+ // Lock the physical memory mutex.
+ KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
+
+ // Try to perform a reduction in heap, instead of an extension.
+ KProcessAddress cur_address;
+ size_t allocation_size;
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that setting heap size is possible at all.
+ R_UNLESS(!m_is_kernel, ResultOutOfMemory);
+ R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
+ ResultOutOfMemory);
+ R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
+
+ if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+ // The size being requested is less than the current size, so we need to free the end of
+ // the heap.
+
+ // Validate memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(num_allocator_blocks), m_heap_region_start + size,
+ (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All,
+ KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the end of the heap.
+ const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0,
+ false, unmap_properties, OperationType::Unmap, false));
+
+ // Release the memory from the resource limit.
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ num_pages * PageSize);
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
+ num_pages, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None);
+
+ // Update the current heap end.
+ m_current_heap_end = m_heap_region_start + size;
+
+ // Set the output.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+ // The size requested is exactly the current size.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ } else {
+ // We have to allocate memory. Determine how much to allocate and where while the table
+ // is locked.
+ cur_address = m_current_heap_end;
+ allocation_size = size - (m_current_heap_end - m_heap_region_start);
+ }
+ }
+
+ // Reserve memory for the heap extension.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the heap extension.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize,
+ m_allocate_option));
+
+ // Close the opened pages when we're done with them.
+ // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+ // automatically.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value,
+ it.GetSize());
+ }
+
+ // Map the pages.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Ensure that the heap hasn't changed since we began executing.
+ ASSERT(cur_address == m_current_heap_end);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
+ allocation_size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(
+ std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ const size_t num_pages = allocation_size / PageSize;
+ const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ (m_current_heap_end == m_heap_region_start)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg,
+ map_properties, OperationType::MapGroup, false));
+
+ // We succeeded, so commit our memory reservation.
+ memory_reservation.Commit();
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(
+ std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Update the current heap end.
+ m_current_heap_end = m_heap_region_start + size;
+
+ // Set the output.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ }
+}
+
+Result KPageTableBase::SetMaxHeapSize(size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Only process page tables are allowed to set heap size.
+ ASSERT(!this->IsKernel());
+
+ m_max_heap_size = size;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const {
+ // If the address is invalid, create a fake block.
+ if (!this->Contains(addr, 1)) {
+ *out_info = {
+ .m_address = GetInteger(m_address_space_end),
+ .m_size = 0 - GetInteger(m_address_space_end),
+ .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
+ .m_device_disable_merge_left_count = 0,
+ .m_device_disable_merge_right_count = 0,
+ .m_ipc_lock_count = 0,
+ .m_device_use_count = 0,
+ .m_ipc_disable_merge_count = 0,
+ .m_permission = KMemoryPermission::None,
+ .m_attribute = KMemoryAttribute::None,
+ .m_original_permission = KMemoryPermission::None,
+ .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
+ };
+ out_page_info->flags = 0;
+
+ R_SUCCEED();
+ }
+
+ // Otherwise, lock the table and query.
+ KScopedLightLock lk(m_general_lock);
+ R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
+}
+
+Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out,
+ KProcessAddress address) const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Align the address down to page size.
+ address = Common::AlignDown(GetInteger(address), PageSize);
+
+ // Verify that we can query the address.
+ KMemoryInfo info;
+ Svc::PageInfo page_info;
+ R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical,
+ KMemoryState::FlagCanQueryPhysical,
+ KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Prepare to traverse.
+ KPhysicalAddress phys_addr;
+ size_t phys_size;
+
+ KProcessAddress virt_addr = info.GetAddress();
+ KProcessAddress end_addr = info.GetEndAddress();
+
+ // Perform traversal.
+ {
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Set tracking variables.
+ phys_addr = next_entry.phys_addr;
+ phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+
+ // Iterate.
+ while (true) {
+ // Continue the traversal.
+ traverse_valid =
+ m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ if (!traverse_valid) {
+ break;
+ }
+
+ if (next_entry.phys_addr != (phys_addr + phys_size)) {
+ // Check if we're done.
+ if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
+ break;
+ }
+
+ // Advance.
+ phys_addr = next_entry.phys_addr;
+ virt_addr += next_entry.block_size;
+ phys_size =
+ next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+ } else {
+ phys_size += next_entry.block_size;
+ }
+
+ // Check if we're done.
+ if (end_addr < virt_addr + phys_size) {
+ break;
+ }
+ }
+ ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
+
+ // Ensure we use the right size.
+ if (end_addr < virt_addr + phys_size) {
+ phys_size = end_addr - virt_addr;
+ }
+ }
+
+ // Set the output.
+ out->physical_address = GetInteger(phys_addr);
+ out->virtual_address = GetInteger(virt_addr);
+ out->size = phys_size;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list,
+ KPhysicalAddress phys_addr, size_t size, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(size > 0);
+
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress last = phys_addr + size - 1;
+
+ // Get region extents.
+ const KProcessAddress region_start = m_kernel_map_region_start;
+ const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
+ const size_t region_num_pages = region_size / PageSize;
+
+ ASSERT(this->CanContain(region_start, region_size, state));
+
+ // Locate the memory region.
+ const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ ASSERT(region->Contains(GetInteger(phys_addr)));
+
+ // Ensure that the region is mappable.
+ const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+ while (true) {
+ // Check that the region exists.
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ // Check the region attributes.
+ R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+ ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+
+ // Check if we're done.
+ if (GetInteger(last) <= region->GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ region = region->GetNext();
+ };
+
+ // Select an address to map at.
+ KProcessAddress addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const KPhysicalAddress aligned_phys =
+ Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+ R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+ const KPhysicalAddress last_aligned_paddr =
+ Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+ R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+ ResultInvalidAddress);
+
+ addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ }
+
+ // Check that we can map IO here.
+ ASSERT(this->CanContain(addr, size, state));
+ R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map,
+ false));
+
+ // Set the output address.
+ *out = addr;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the io memory.
+ KProcessAddress addr;
+ R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size,
+ KMemoryState::IoRegister, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize,
+ KMemoryState::IoRegister, perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+ size_t size, Svc::MemoryMapping mapping,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::None, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
+ const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister,
+ mapping == Svc::MemoryMapping::Uncached,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+
+ // Update the blocks.
+ const auto state =
+ mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister;
+ m_memory_block_manager.Update(
+ std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+ size_t size, Svc::MemoryMapping mapping) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
+ std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+ mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Validate that the region being unmapped corresponds to the physical range described.
+ {
+ // Get the impl.
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ ASSERT(
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
+
+ // Check that the physical region matches.
+ R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion);
+
+ // Iterate.
+ for (size_t checked_size =
+ next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+ checked_size < size; checked_size += next_entry.block_size) {
+ // Continue the traversal.
+ ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
+
+ // Check that the physical region matches.
+ R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion);
+ }
+ }
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // If the region being unmapped is Memory, synchronize.
+ if (mapping == Svc::MemoryMapping::Memory) {
+ // Change the region to be uncached.
+ const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties,
+ OperationType::ChangePermissionsAndRefresh, false));
+
+ // Temporarily unlock ourselves, so that other operations can occur while we flush the
+ // region.
+ m_general_lock.Unlock();
+ SCOPE_EXIT({ m_general_lock.Lock(); });
+
+ // Flush the region.
+ R_ASSERT(FlushDataCache(dst_address, size));
+ }
+
+ // Perform the unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(size > 0);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress last = phys_addr + size - 1;
+
+ // Get region extents.
+ const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static);
+ const size_t region_size = this->GetRegionSize(KMemoryState::Static);
+ const size_t region_num_pages = region_size / PageSize;
+
+ // Locate the memory region.
+ const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ ASSERT(region->Contains(GetInteger(phys_addr)));
+ R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress);
+
+ // Check the region attributes.
+ const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+ R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+ ResultInvalidAddress);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Select an address to map at.
+ KProcessAddress addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const KPhysicalAddress aligned_phys =
+ Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+ R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+ const KPhysicalAddress last_aligned_paddr =
+ Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+ R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+ ResultInvalidAddress);
+
+ addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ }
+
+ // Check that we can map static here.
+ ASSERT(this->CanContain(addr, size, KMemoryState::Static));
+ R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static,
+ perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+ // Get the memory region.
+ const KMemoryRegion* region =
+ m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
+ R_UNLESS(region != nullptr, ResultOutOfRange);
+
+ // Check that the region is valid.
+ ASSERT(region->GetEndAddress() != 0);
+
+ // Map the region.
+ R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){
+ R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
+
+ // Ensure this is a valid map request.
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(Common::IsAligned(GetInteger(addr), alignment));
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ R_ASSERT(this->CheckMemoryState(
+ addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ if (is_pa_valid) {
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+ } else {
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
+ }
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check that the map is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
+ // Check that the unmap is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform the unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ R_ASSERT(this->CheckMemoryState(
+ addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to map.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
+ KMemoryState state) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid unmap request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to unmap.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Check that the page group is valid.
+ R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform unmapping operation.
+ const KPageProperties properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties,
+ OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address,
+ size_t num_pages, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) {
+ // Ensure that the page group isn't null.
+ ASSERT(out != nullptr);
+
+ // Make sure that the region we're mapping is valid for the table.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to create the group.
+ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Create a new page group for the region.
+ R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+ // Open a new reference to the pages in the group.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+ // Check that the region is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Check that the pages are linearly mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Invalidate the block.
+ if (cur_size > 0) {
+ // NOTE: Nintendo does not check the result of invalidation.
+ InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ // Advance.
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Check that the last block is linearly mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Invalidate the last block.
+ if (cur_size > 0) {
+ // NOTE: Nintendo does not check the result of invalidation.
+ InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
+ // Check pre-condition: this is being called on the current process.
+ ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable()));
+
+ // Check that the region is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+ // Invalidate the data cache.
+ R_RETURN(InvalidateDataCache(address, size));
+}
+
+Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lightly validate the region is in range.
+ R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Require that the memory either be user readable or debuggable.
+ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None));
+ if (!can_read) {
+ const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+ }
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+ auto& dst_memory = GetCurrentMemory(m_system.Kernel());
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ FlushDataCache(copy_src, copy_size);
+ R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer);
+
+ dst_address += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ FlushDataCache(copy_src, cur_size);
+ R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ dst_address += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lightly validate the region is in range.
+ R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Require that the memory either be user writable or debuggable.
+ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+ if (!can_read) {
+ const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+ }
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+ auto& src_memory = GetCurrentMemory(m_system.Kernel());
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size),
+ ResultInvalidCurrentMemory);
+
+ StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size);
+
+ src_address += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size),
+ ResultInvalidCurrentMemory);
+
+ StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ src_address += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+
+ // Invalidate the instruction cache, as this svc allows modifying executable pages.
+ InvalidateInstructionCache(m_system, dst_address, size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr,
+ size_t size, KMemoryState state) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Determine the mapping extents.
+ const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+ const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+ const size_t map_size = map_end - map_start;
+
+ // Get the memory reference to write into.
+ auto& dst_memory = GetCurrentMemory(m_kernel);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Temporarily map the io memory.
+ KProcessAddress io_addr;
+ R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+ state, KMemoryPermission::UserRead));
+
+ // Ensure we unmap the io memory when we're done with it.
+ const KPageProperties unmap_properties =
+ KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+ SCOPE_EXIT({
+ R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ });
+
+ // Read the memory.
+ const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+ dst_memory.CopyBlock(dst_addr, read_addr, size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr,
+ size_t size, KMemoryState state) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Determine the mapping extents.
+ const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+ const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+ const size_t map_size = map_end - map_start;
+
+ // Get the memory reference to read from.
+ auto& src_memory = GetCurrentMemory(m_kernel);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Temporarily map the io memory.
+ KProcessAddress io_addr;
+ R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+ state, KMemoryPermission::UserReadWrite));
+
+ // Ensure we unmap the io memory when we're done with it.
+ const KPageProperties unmap_properties =
+ KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+ SCOPE_EXIT({
+ R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ });
+
+ // Write the memory.
+ const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+ R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size, KMemoryState state) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+ // We need to lock both this table, and the current process's table, so set up some aliases.
+ KPageTableBase& src_page_table = *this;
+ KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the desired range is readable io memory.
+ R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state,
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Read the memory.
+ KProcessAddress dst = dst_address;
+ const KProcessAddress last_address = src_address + size - 1;
+ while (src_address <= last_address) {
+ // Get the current physical address.
+ KPhysicalAddress phys_addr;
+ ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address));
+
+ // Determine the current read size.
+ const size_t cur_size =
+ std::min<size_t>(last_address - src_address + 1,
+ Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) -
+ GetInteger(src_address));
+
+ // Read.
+ R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
+
+ // Advance.
+ src_address += cur_size;
+ dst += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size, KMemoryState state) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+ // We need to lock both this table, and the current process's table, so set up some aliases.
+ KPageTableBase& src_page_table = *this;
+ KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the desired range is writable io memory.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Read the memory.
+ KProcessAddress src = src_address;
+ const KProcessAddress last_address = dst_address + size - 1;
+ while (dst_address <= last_address) {
+ // Get the current physical address.
+ KPhysicalAddress phys_addr;
+ ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address));
+
+ // Determine the current read size.
+ const size_t cur_size =
+ std::min<size_t>(last_address - dst_address + 1,
+ Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) -
+ GetInteger(dst_address));
+
+ // Read.
+ R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
+
+ // Advance.
+ dst_address += cur_size;
+ src += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
+ size_t size, KMemoryPermission perm,
+ bool is_aligned, bool check_heap) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ const KMemoryState test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+ size_t num_allocator_blocks;
+ KMemoryState old_state;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
+ std::addressof(num_allocator_blocks), address, size, test_state,
+ test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
+ KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+
+ // Set whether the locked memory was io.
+ *out_is_io =
+ static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
+ bool check_heap) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ const KMemoryState test_state =
+ KMemoryState::FlagCanDeviceMap |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, test_state, test_state,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ size_t allocator_num_blocks = 0;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator for the region.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, allocator_num_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(
+ std::addressof(allocator), address, num_pages,
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm,
+ bool is_aligned) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ const KMemoryState test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, test_state, test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out,
+ KProcessAddress address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
+ size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, nullptr));
+}
+
+Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm) {
+ R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
+ KMemoryState::FlagCanTransfer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
+ KMemoryState::FlagCanTransfer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ out, nullptr, address, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out,
+ KProcessAddress address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached,
+ KMemoryAttribute::None));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToUser(
+ KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+ KMemoryAttribute src_attr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+ // Get the destination memory reference.
+ auto& dst_memory = GetCurrentMemory(m_kernel);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ R_UNLESS(dst_memory.WriteBlock(dst_addr,
+ GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+ copy_size),
+ ResultInvalidCurrentMemory);
+
+ dst_addr += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ R_UNLESS(dst_memory.WriteBlock(
+ dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+ ResultInvalidCurrentMemory);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ dst_addr += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToKernel(
+ void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+ KMemoryAttribute src_attr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromUserToLinear(
+ KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Get the source memory reference.
+ auto& src_memory = GetCurrentMemory(m_kernel);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ R_UNLESS(src_memory.ReadBlock(src_addr,
+ GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+ copy_size),
+ ResultInvalidCurrentMemory);
+ src_addr += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ R_UNLESS(src_memory.ReadBlock(
+ src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+ ResultInvalidCurrentMemory);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ src_addr += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask,
+ KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask,
+ KMemoryAttribute dst_attr, void* buffer) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size);
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeap(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ // For convenience, alias this.
+ KPageTableBase& src_page_table = *this;
+
+ // Lightly validate the ranges before doing anything else.
+ R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check memory state.
+ R_TRY(src_page_table.CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+ R_TRY(dst_page_table.CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ // Get implementations.
+ auto& src_impl = src_page_table.GetImpl();
+ auto& dst_impl = dst_page_table.GetImpl();
+
+ // Prepare for traversal.
+ TraversalContext src_context;
+ TraversalContext dst_context;
+ TraversalEntry src_next_entry;
+ TraversalEntry dst_next_entry;
+ bool traverse_valid;
+
+ // Begin traversal.
+ traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context), src_addr);
+ ASSERT(traverse_valid);
+ traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+ KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+ size_t cur_src_size = src_next_entry.block_size -
+ (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+ size_t cur_dst_size = dst_next_entry.block_size -
+ (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+ // Adjust the initial block sizes.
+ src_next_entry.block_size = cur_src_size;
+ dst_next_entry.block_size = cur_dst_size;
+
+ // Before we get any crazier, succeed if there's nothing to do.
+ R_SUCCEED_IF(size == 0);
+
+ // We're going to manage dual traversal via an offset against the total size.
+ KPhysicalAddress cur_src_addr = cur_src_block_addr;
+ KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+ size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+ // Iterate.
+ size_t ofs = 0;
+ while (ofs < size) {
+ // Determine how much we can copy this iteration.
+ const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+ // If we need to advance the traversals, do so.
+ bool updated_src = false, updated_dst = false, skip_copy = false;
+ if (ofs + cur_copy_size != size) {
+ if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+ // Continue the src traversal.
+ traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context));
+ ASSERT(traverse_valid);
+
+ // Update source.
+ updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+ }
+
+ if (cur_dst_addr + cur_min_size ==
+ dst_next_entry.phys_addr + dst_next_entry.block_size) {
+ // Continue the dst traversal.
+ traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context));
+ ASSERT(traverse_valid);
+
+ // Update destination.
+ updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+ }
+
+ // If we didn't update either of source/destination, skip the copy this iteration.
+ if (!updated_src && !updated_dst) {
+ skip_copy = true;
+
+ // Update the source block address.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ }
+ }
+
+ // Do the copy, unless we're skipping it.
+ if (!skip_copy) {
+ // We need both ends of the copy to be heap blocks.
+ R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+ R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+ GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+ // Update.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+ cur_dst_block_addr = dst_next_entry.phys_addr;
+ cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+ // Advance offset.
+ ofs += cur_copy_size;
+ }
+
+ // Update min size.
+ cur_src_size = src_next_entry.block_size;
+ cur_dst_size = dst_next_entry.block_size;
+ cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+ cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ // For convenience, alias this.
+ KPageTableBase& src_page_table = *this;
+
+ // Lightly validate the ranges before doing anything else.
+ R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check memory state for source.
+ R_TRY(src_page_table.CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ // Destination state is intentionally unchecked.
+
+ // Get implementations.
+ auto& src_impl = src_page_table.GetImpl();
+ auto& dst_impl = dst_page_table.GetImpl();
+
+ // Prepare for traversal.
+ TraversalContext src_context;
+ TraversalContext dst_context;
+ TraversalEntry src_next_entry;
+ TraversalEntry dst_next_entry;
+ bool traverse_valid;
+
+ // Begin traversal.
+ traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context), src_addr);
+ ASSERT(traverse_valid);
+ traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+ KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+ size_t cur_src_size = src_next_entry.block_size -
+ (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+ size_t cur_dst_size = dst_next_entry.block_size -
+ (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+ // Adjust the initial block sizes.
+ src_next_entry.block_size = cur_src_size;
+ dst_next_entry.block_size = cur_dst_size;
+
+ // Before we get any crazier, succeed if there's nothing to do.
+ R_SUCCEED_IF(size == 0);
+
+ // We're going to manage dual traversal via an offset against the total size.
+ KPhysicalAddress cur_src_addr = cur_src_block_addr;
+ KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+ size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+ // Iterate.
+ size_t ofs = 0;
+ while (ofs < size) {
+ // Determine how much we can copy this iteration.
+ const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+ // If we need to advance the traversals, do so.
+ bool updated_src = false, updated_dst = false, skip_copy = false;
+ if (ofs + cur_copy_size != size) {
+ if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+ // Continue the src traversal.
+ traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context));
+ ASSERT(traverse_valid);
+
+ // Update source.
+ updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+ }
+
+ if (cur_dst_addr + cur_min_size ==
+ dst_next_entry.phys_addr + dst_next_entry.block_size) {
+ // Continue the dst traversal.
+ traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context));
+ ASSERT(traverse_valid);
+
+ // Update destination.
+ updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+ }
+
+ // If we didn't update either of source/destination, skip the copy this iteration.
+ if (!updated_src && !updated_dst) {
+ skip_copy = true;
+
+ // Update the source block address.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ }
+ }
+
+ // Do the copy, unless we're skipping it.
+ if (!skip_copy) {
+ // We need both ends of the copy to be heap blocks.
+ R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+ R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+ GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+ // Update.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+ cur_dst_block_addr = dst_next_entry.phys_addr;
+ cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+ // Advance offset.
+ ofs += cur_copy_size;
+ }
+
+ // Update min size.
+ cur_src_size = src_next_entry.block_size;
+ cur_dst_size = dst_next_entry.block_size;
+ cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+ cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size,
+ KMemoryPermission test_perm, KMemoryState dst_state) {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
+ test_perm == KMemoryPermission::UserRead);
+
+ // Check that the address is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get the source permission.
+ const auto src_perm = static_cast<KMemoryPermission>(
+ (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead);
+
+ // Get aligned extents.
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+
+ const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
+ const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
+ src_perm);
+ }
+ };
+
+ size_t blocks_needed = 0;
+
+ // Iterate, mapping as needed.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate the current block.
+ R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
+ test_attr_mask, KMemoryAttribute::None));
+
+ if (mapping_src_start < mapping_src_end &&
+ GetInteger(mapping_src_start) < info.GetEndAddress() &&
+ info.GetAddress() < GetInteger(mapping_src_end)) {
+ const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
+ ? info.GetAddress()
+ : GetInteger(mapping_src_start);
+ const auto cur_end = mapping_src_last >= info.GetLastAddress()
+ ? info.GetEndAddress()
+ : GetInteger(mapping_src_end);
+ const size_t cur_size = cur_end - cur_start;
+
+ if (info.GetAddress() < GetInteger(mapping_src_start)) {
+ ++blocks_needed;
+ }
+ if (mapping_src_last < info.GetLastAddress()) {
+ ++blocks_needed;
+ }
+
+ // Set the permissions on the block, if we need to.
+ if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
+ const DisableMergeAttribute head_body_attr =
+ (GetInteger(mapping_src_start) >= info.GetAddress())
+ ? DisableMergeAttribute::DisableHeadAndBody
+ : DisableMergeAttribute::None;
+ const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end))
+ ? DisableMergeAttribute::DisableTail
+ : DisableMergeAttribute::None;
+ const KPageProperties properties = {
+ src_perm, false, false,
+ static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Note that we mapped this part.
+ mapped_size += cur_size;
+ }
+
+ // If the block is at the end, we're done.
+ if (aligned_src_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+
+ if (out_blocks_needed != nullptr) {
+ ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ *out_blocks_needed = blocks_needed;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
+ KProcessAddress src_addr, KMemoryPermission test_perm,
+ KMemoryState dst_state, KPageTableBase& src_page_table,
+ bool send) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(src_page_table.IsLockedByCurrentThread());
+
+ // Check that we can theoretically map.
+ const KProcessAddress region_start = m_alias_region_start;
+ const size_t region_size = m_alias_region_end - m_alias_region_start;
+ R_UNLESS(size < region_size, ResultOutOfAddressSpace);
+
+ // Get aligned source extents.
+ const KProcessAddress src_start = src_addr;
+ const KProcessAddress src_end = src_addr + size;
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
+ const KProcessAddress mapping_src_end =
+ Common::AlignDown(GetInteger(src_start) + size, PageSize);
+ const size_t aligned_src_size = aligned_src_end - aligned_src_start;
+ const size_t mapping_src_size =
+ (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
+
+ // Select a random address to map at.
+ KProcessAddress dst_addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
+
+ dst_addr =
+ this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
+ alignment, offset, this->GetNumGuardPages());
+ R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
+ }
+
+ // Check that we can perform the operation we're about to perform.
+ ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reserve space for any partial pages we allocate.
+ const size_t unmapped_size = aligned_src_size - mapping_src_size;
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Ensure that we manage page references correctly.
+ KPhysicalAddress start_partial_page = 0;
+ KPhysicalAddress end_partial_page = 0;
+ KProcessAddress cur_mapped_addr = dst_addr;
+
+ // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
+ // free on scope exit.
+ SCOPE_EXIT({
+ if (start_partial_page != 0) {
+ m_kernel.MemoryManager().Close(start_partial_page, 1);
+ }
+ if (end_partial_page != 0) {
+ m_kernel.MemoryManager().Close(end_partial_page, 1);
+ }
+ });
+
+ ON_RESULT_FAILURE {
+ if (cur_mapped_addr != dst_addr) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_addr,
+ (cur_mapped_addr - dst_addr) / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ }
+ };
+
+ // Allocate the start page as needed.
+ if (aligned_src_start < mapping_src_start) {
+ start_partial_page =
+ m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Allocate the end page as needed.
+ if (mapping_src_end < aligned_src_end &&
+ (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
+ end_partial_page =
+ m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Get the implementation.
+ auto& src_impl = src_page_table.GetImpl();
+
+ // Get the fill value for partial pages.
+ const auto fill_val = m_ipc_fill_value;
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry),
+ std::addressof(context), aligned_src_start);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_block_addr = next_entry.phys_addr;
+ size_t cur_block_size =
+ next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
+ size_t tot_block_size = cur_block_size;
+
+ // Map the start page, if we have one.
+ if (start_partial_page != 0) {
+ // Ensure the page holds correct data.
+ u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page);
+ if (send) {
+ const size_t partial_offset = src_start - aligned_src_start;
+ size_t copy_size, clear_size;
+ if (src_end < mapping_src_start) {
+ copy_size = size;
+ clear_size = mapping_src_start - src_end;
+ } else {
+ copy_size = mapping_src_start - src_start;
+ clear_size = 0;
+ }
+
+ std::memset(start_partial_virt, fill_val, partial_offset);
+ std::memcpy(start_partial_virt + partial_offset,
+ GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset,
+ copy_size);
+ if (clear_size > 0) {
+ std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size);
+ }
+ } else {
+ std::memset(start_partial_virt, fill_val, PageSize);
+ }
+
+ // Map the page.
+ const KPageProperties start_map_properties = {test_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true,
+ start_map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += PageSize;
+ cur_block_addr += PageSize;
+ cur_block_size -= PageSize;
+
+ // If the block's size was one page, we may need to continue traversal.
+ if (cur_block_size == 0 && aligned_src_size > PageSize) {
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ tot_block_size += next_entry.block_size;
+ }
+ }
+
+ // Map the remaining pages.
+ while (aligned_src_start + tot_block_size < mapping_src_end) {
+ // Continue the traversal.
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ // Process the block.
+ if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
+ // Map the block we've been processing so far.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize,
+ cur_block_addr, true, map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += cur_block_size;
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ } else {
+ cur_block_size += next_entry.block_size;
+ }
+ tot_block_size += next_entry.block_size;
+ }
+
+ // Handle the last direct-mapped page.
+ if (const KProcessAddress mapped_block_end =
+ aligned_src_start + tot_block_size - cur_block_size;
+ mapped_block_end < mapping_src_end) {
+ const size_t last_block_size = mapping_src_end - mapped_block_end;
+
+ // Map the last block.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize,
+ cur_block_addr, true, map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += last_block_size;
+ cur_block_addr += last_block_size;
+ if (mapped_block_end + cur_block_size < aligned_src_end &&
+ cur_block_size == last_block_size) {
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ }
+ }
+
+ // Map the end page, if we have one.
+ if (end_partial_page != 0) {
+ // Ensure the page holds correct data.
+ u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page);
+ if (send) {
+ const size_t copy_size = src_end - mapping_src_end;
+ std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr),
+ copy_size);
+ std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size);
+ } else {
+ std::memset(end_partial_virt, fill_val, PageSize);
+ }
+
+ // Map the page.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true,
+ map_properties, OperationType::Map, false));
+ }
+
+ // Update memory blocks to reflect our changes
+ m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
+ dst_state, test_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Set the output address.
+ *out_addr = dst_addr + (src_start - aligned_src_start);
+
+ // We succeeded.
+ memory_reservation.Commit();
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size,
+ KProcessAddress src_addr, KPageTableBase& src_page_table,
+ KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
+ // For convenience, alias this.
+ KPageTableBase& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(std::addressof(src_page_table));
+
+ // Perform client setup.
+ size_t num_allocator_blocks;
+ R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
+ std::addressof(num_allocator_blocks), src_addr, size,
+ test_perm, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ src_page_table.m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Get the mapped extents.
+ const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
+ const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
+ const size_t src_map_size = src_map_end - src_map_start;
+
+ // Ensure that we clean up appropriately if we fail after this.
+ const auto src_perm = static_cast<KMemoryPermission>(
+ (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead);
+ ON_RESULT_FAILURE {
+ if (src_map_end > src_map_start) {
+ src_page_table.CleanupForIpcClientOnServerSetupFailure(
+ updater.GetPageList(), src_map_start, src_map_size, src_perm);
+ }
+ };
+
+ // Perform server setup.
+ R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
+ src_page_table, send));
+
+ // If anything was mapped, ipc-lock the pages.
+ if (src_map_start < src_map_end) {
+ // Get the source permission.
+ src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
+ (src_map_end - src_map_start) / PageSize,
+ &KMemoryBlock::LockForIpc, src_perm);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, dst_state, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Get aligned extents.
+ const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const size_t aligned_size = aligned_end - aligned_start;
+ const size_t aligned_num_pages = aligned_size / PageSize;
+
+ // Unmap the pages.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
+ KMemoryState::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Release from the resource limit as relevant.
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ aligned_size - mapping_size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get aligned source extents.
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_last = mapping_end - 1;
+ const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
+
+ // If nothing was mapped, we're actually done immediately.
+ R_SUCCEED_IF(mapping_size == 0);
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Lock the table.
+ // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
+ // convention elsewhere in KPageTableBase.
+ KScopedLightLock lk(m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ // Determine where the mapping ends.
+ const auto mapped_end = GetInteger(mapping_start) + mapped_size;
+ const auto mapped_last = mapped_end - 1;
+
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Get the current block info.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+
+ // Create tracking variables.
+ KProcessAddress cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first = cur_info.GetIpcDisableMergeCount() == 1 &&
+ False(cur_info.GetDisableMergeAttribute() &
+ KMemoryBlockDisableMergeAttribute::Locked);
+
+ while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a
+ // single call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_info.GetPermission(), false, false,
+ first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+ : DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+ cur_size / PageSize, 0, false, properties,
+ OperationType::ChangePermissions, true));
+ }
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ if ((first || cur_needs_set_perm) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_info.GetPermission(), false, false,
+ first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+ : DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+ false, properties, OperationType::ChangePermissions, true));
+ }
+ }
+ };
+
+ // Iterate, reprotecting as needed.
+ {
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Validate the current block.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+ R_ASSERT(this->CheckMemoryState(
+ cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+ // Create tracking variables.
+ KProcessAddress cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first =
+ cur_info.GetIpcDisableMergeCount() == 1 &&
+ False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked);
+
+ while ((cur_address + cur_size - 1) < mapping_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Validate the next block.
+ R_ASSERT(this->CheckMemoryState(
+ next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a single
+ // call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_needs_set_perm ? cur_info.GetOriginalPermission()
+ : cur_info.GetPermission(),
+ false, false,
+ first ? DisableMergeAttribute::EnableHeadAndBody
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+ false, properties, OperationType::ChangePermissions,
+ false));
+ }
+
+ // Mark that we mapped the block.
+ mapped_size += cur_size;
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ const auto lock_count =
+ cur_info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
+ const DisableMergeAttribute head_body_attr =
+ first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None;
+ const DisableMergeAttribute tail_attr =
+ lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None;
+ const KPageProperties properties = {
+ cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(),
+ false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false,
+ properties, OperationType::ChangePermissions, false));
+ }
+ }
+
+ // Create an update allocator.
+ // NOTE: Guaranteed zero blocks needed here.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, 0);
+ R_TRY(allocator_result);
+
+ // Unlock the pages.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
+ mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list,
+ KProcessAddress address, size_t size,
+ KMemoryPermission prot_perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+
+ // Get the mapped extents.
+ const KProcessAddress src_map_start = address;
+ const KProcessAddress src_map_end = address + size;
+ const KProcessAddress src_map_last = src_map_end - 1;
+
+ // This function is only invoked when there's something to do.
+ ASSERT(src_map_end > src_map_start);
+
+ // Iterate over blocks, fixing permissions.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
+ ? info.GetAddress()
+ : GetInteger(src_map_start);
+ const auto cur_end =
+ src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
+
+ // If we can, fix the protections on the block.
+ if ((info.GetIpcLockCount() == 0 &&
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
+ (info.GetIpcLockCount() != 0 &&
+ (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
+ // Check if we actually need to fix the protections on the block.
+ if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
+ const bool start_nc = (info.GetAddress() == GetInteger(src_map_start))
+ ? (False(info.GetDisableMergeAttribute() &
+ (KMemoryBlockDisableMergeAttribute::Locked |
+ KMemoryBlockDisableMergeAttribute::IpcLeft)))
+ : info.GetAddress() <= GetInteger(src_map_start);
+
+ const DisableMergeAttribute head_body_attr =
+ start_nc ? DisableMergeAttribute::EnableHeadAndBody
+ : DisableMergeAttribute::None;
+ DisableMergeAttribute tail_attr;
+ if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
+ auto next_it = it;
+ ++next_it;
+
+ const auto lock_count =
+ info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail
+ : DisableMergeAttribute::None;
+ } else {
+ tail_attr = DisableMergeAttribute::None;
+ }
+
+ const KPageProperties properties = {
+ info.GetPermission(), false, false,
+ static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0,
+ false, properties, OperationType::ChangePermissions, true));
+ }
+ }
+
+ // If we're past the end of the region, we're done.
+ if (src_map_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+}
+
+Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+ // Calculate the last address for convenience.
+ const KProcessAddress last_address = address + size - 1;
+
+ // Define iteration variables.
+ KProcessAddress cur_address;
+ size_t mapped_size;
+
+ // The entire mapping process can be retried.
+ while (true) {
+ // Check if the memory is already mapped.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size mapped is the size requested, we've nothing to do.
+ R_SUCCEED_IF(size == mapped_size);
+ }
+
+ // Allocate and map the memory.
+ {
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the new memory.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateForProcess(
+ std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option,
+ GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
+
+ // If we fail in the next bit (or retry), we need to cleanup the pages.
+ auto pg_guard = SCOPE_GUARD({
+ pg.OpenFirst();
+ pg.Close();
+ });
+
+ // Map the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ size_t num_allocator_blocks = 0;
+
+ // Verify that nobody has mapped memory since we first checked.
+ {
+ // Iterate over the memory.
+ size_t checked_mapped_size = 0;
+ cur_address = address;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ if (is_free) {
+ if (info.GetAddress() < GetInteger(address)) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (!is_free) {
+ checked_mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (!is_free) {
+ checked_mapped_size +=
+ KProcessAddress(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size now isn't what it was before, somebody mapped or unmapped
+ // concurrently. If this happened, retry.
+ if (mapped_size != checked_mapped_size) {
+ continue;
+ }
+ }
+
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Prepare to iterate over the memory.
+ auto pg_it = pg.begin();
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ pg_guard.Cancel();
+ cur_address = address;
+ ON_RESULT_FAILURE {
+ if (cur_address > address) {
+ const KProcessAddress last_unmap_address = cur_address - 1;
+
+ // Iterate, unmapping the pages.
+ cur_address = address;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is free, we mapped it and need to unmap it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to unmap.
+ const KPageProperties unmap_properties = {
+ KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ const size_t cur_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_unmap_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+ cur_pages, 0, false, unmap_properties,
+ OperationType::Unmap, true));
+ }
+
+ // Check if we're done.
+ if (last_unmap_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ }
+
+ // Release any remaining unmapped memory.
+ m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
+ m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages);
+ for (++pg_it; pg_it != pg.end(); ++pg_it) {
+ m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(),
+ pg_it->GetNumPages());
+ m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
+ }
+ };
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If it's unmapped, we need to map it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to map.
+ const KPageProperties map_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ cur_address == this->GetAliasRegionStart()
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ size_t map_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ {
+ // Create a page group for the current mapping range.
+ KPageGroup cur_pg(m_kernel, m_block_info_manager);
+ {
+ ON_RESULT_FAILURE_2 {
+ cur_pg.OpenFirst();
+ cur_pg.Close();
+ };
+
+ size_t remain_pages = map_pages;
+ while (remain_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Add whatever we can to the current block.
+ const size_t cur_pages = std::min(pg_pages, remain_pages);
+ R_TRY(cur_pg.AddBlock(pg_phys_addr +
+ ((pg_pages - cur_pages) * PageSize),
+ cur_pages));
+
+ // Advance.
+ remain_pages -= cur_pages;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Map the papges.
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
+ cur_pg, map_properties,
+ OperationType::MapFirstGroup, false));
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Increase our tracked mapped size.
+ m_mapped_physical_memory_size += (size - mapped_size);
+
+ // Update the relevant memory blocks.
+ m_memory_block_manager.UpdateIfMatch(
+ std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ address == this->GetAliasRegionStart()
+ ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+ }
+ }
+ }
+}
+
+Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Calculate the last address for convenience.
+ const KProcessAddress last_address = address + size - 1;
+
+ // Define iteration variables.
+ KProcessAddress map_start_address = 0;
+ KProcessAddress map_last_address = 0;
+
+ KProcessAddress cur_address;
+ size_t mapped_size;
+ size_t num_allocator_blocks = 0;
+
+ // Check if the memory is mapped.
+ {
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Verify the memory's state.
+ const bool is_normal = info.GetState() == KMemoryState::Normal &&
+ info.GetAttribute() == KMemoryAttribute::None;
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+ if (is_normal) {
+ R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+ if (map_start_address == 0) {
+ map_start_address = cur_address;
+ }
+ map_last_address =
+ (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
+
+ if (info.GetAddress() < GetInteger(address)) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+
+ mapped_size += (map_last_address + 1 - cur_address);
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If there's nothing mapped, we've nothing to do.
+ R_SUCCEED_IF(mapped_size == 0);
+ }
+
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Separate the mapping.
+ const KPageProperties sep_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), map_start_address,
+ (map_last_address + 1 - map_start_address) / PageSize, 0, false,
+ sep_properties, OperationType::Separate, false));
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+
+ // Iterate over the memory, unmapping as we go.
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+
+ const auto clear_merge_attr =
+ (it->GetState() == KMemoryState::Normal &&
+ it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
+ ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None;
+
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is normal, we need to unmap it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // Release the memory resource.
+ m_mapped_physical_memory_size -= mapped_size;
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size);
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ clear_merge_attr);
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+ KPageTableBase& src_page_table,
+ KProcessAddress src_address) {
+ // We need to lock both this table, and the current process's table, so set up an alias.
+ KPageTableBase& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the memory is mapped in the destination process.
+ size_t num_allocator_blocks;
+ R_TRY(dst_page_table.CheckMemoryState(
+ std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+ KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Check that the memory is mapped in the source process.
+ R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Validate that the memory ranges are compatible.
+ {
+ // Define a helper type.
+ struct ContiguousRangeInfo {
+ public:
+ KPageTableBase& m_pt;
+ TraversalContext m_context;
+ TraversalEntry m_entry;
+ KPhysicalAddress m_phys_addr;
+ size_t m_cur_size;
+ size_t m_remaining_size;
+
+ public:
+ ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size)
+ : m_pt(pt), m_remaining_size(size) {
+ // Begin a traversal.
+ ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry),
+ std::addressof(m_context), address));
+
+ // Setup tracking fields.
+ m_phys_addr = m_entry.phys_addr;
+ m_cur_size = std::min<size_t>(
+ m_remaining_size,
+ m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
+
+ // Consume the whole contiguous block.
+ this->DetermineContiguousBlockExtents();
+ }
+
+ void ContinueTraversal() {
+ // Update our remaining size.
+ m_remaining_size = m_remaining_size - m_cur_size;
+
+ // Update our tracking fields.
+ if (m_remaining_size > 0) {
+ m_phys_addr = m_entry.phys_addr;
+ m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
+
+ // Consume the whole contiguous block.
+ this->DetermineContiguousBlockExtents();
+ }
+ }
+
+ private:
+ void DetermineContiguousBlockExtents() {
+ // Continue traversing until we're not contiguous, or we have enough.
+ while (m_cur_size < m_remaining_size) {
+ ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry),
+ std::addressof(m_context)));
+
+ // If we're not contiguous, we're done.
+ if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
+ break;
+ }
+
+ // Update our current size.
+ m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
+ }
+ }
+ };
+
+ // Create ranges for both tables.
+ ContiguousRangeInfo src_range(src_page_table, src_address, size);
+ ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
+
+ // Validate the ranges.
+ while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
+ R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion);
+ R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion);
+
+ src_range.ContinueTraversal();
+ dst_range.ContinueTraversal();
+ }
+ }
+
+ // We no longer need to hold our lock on the source page table.
+ lk.TryUnlockHalf(src_page_table.m_general_lock);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the memory.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+ size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid,
+ const KPageProperties properties, OperationType operation,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(num_pages > 0);
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ ASSERT(this->ContainsPages(virt_addr, num_pages));
+
+ // As we don't allocate page entries in guest memory, we don't need to allocate them from
+ // or free them to the page list, and so it goes unused (along with page properties).
+
+ switch (operation) {
+ case OperationType::Unmap: {
+ // Ensure that any pages we track are closed on exit.
+ KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
+ SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
+
+ // Make a page group representing the region to unmap.
+ this->MakePageGroup(pages_to_close, virt_addr, num_pages);
+
+ // Unmap.
+ m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
+
+ R_SUCCEED();
+ }
+ case OperationType::Map: {
+ ASSERT(virt_addr != 0);
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr);
+
+ // Open references to pages, if we should.
+ if (this->IsHeapPhysicalAddress(phys_addr)) {
+ m_kernel.MemoryManager().Open(phys_addr, num_pages);
+ }
+
+ R_SUCCEED();
+ }
+ case OperationType::Separate: {
+ // TODO: Unimplemented.
+ R_SUCCEED();
+ }
+ case OperationType::ChangePermissions:
+ case OperationType::ChangePermissionsAndRefresh:
+ case OperationType::ChangePermissionsAndRefreshAndFlush:
+ R_SUCCEED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+ size_t num_pages, const KPageGroup& page_group,
+ const KPageProperties properties, OperationType operation,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ ASSERT(num_pages > 0);
+ ASSERT(num_pages == page_group.GetNumPages());
+
+ // As we don't allocate page entries in guest memory, we don't need to allocate them from
+ // the page list, and so it goes unused (along with page properties).
+
+ switch (operation) {
+ case OperationType::MapGroup:
+ case OperationType::MapFirstGroup: {
+ // We want to maintain a new reference to every page in the group.
+ KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
+
+ for (const auto& node : page_group) {
+ const size_t size{node.GetNumPages() * PageSize};
+
+ // Map the pages.
+ m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress());
+
+ virt_addr += size;
+ }
+
+ // We succeeded! We want to persist the reference to the pages.
+ spg.CancelClose();
+
+ R_SUCCEED();
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) {
+ while (page_list->Peek()) {
+ [[maybe_unused]] auto page = page_list->Pop();
+
+ // TODO: Free page entries once they are allocated in guest memory.
+ // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
+ // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
+ // this->GetPageTableManager().Free(page);
+ }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
new file mode 100644
index 000000000..ee2c41e67
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -0,0 +1,759 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_funcs.h"
+#include "common/page_table.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_block_manager.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/result.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+enum class DisableMergeAttribute : u8 {
+ None = (0U << 0),
+
+ DisableHead = (1U << 0),
+ DisableHeadAndBody = (1U << 1),
+ EnableHeadAndBody = (1U << 2),
+ DisableTail = (1U << 3),
+ EnableTail = (1U << 4),
+ EnableAndMergeHeadBodyTail = (1U << 5),
+
+ EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
+ DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
+};
+DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute);
+
+struct KPageProperties {
+ KMemoryPermission perm;
+ bool io;
+ bool uncached;
+ DisableMergeAttribute disable_merge_attributes;
+};
+static_assert(std::is_trivial_v<KPageProperties>);
+static_assert(sizeof(KPageProperties) == sizeof(u32));
+
+class KResourceLimit;
+class KSystemResource;
+
+class KPageTableBase {
+ YUZU_NON_COPYABLE(KPageTableBase);
+ YUZU_NON_MOVEABLE(KPageTableBase);
+
+public:
+ using TraversalEntry = Common::PageTable::TraversalEntry;
+ using TraversalContext = Common::PageTable::TraversalContext;
+
+ class MemoryRange {
+ private:
+ KernelCore& m_kernel;
+ KPhysicalAddress m_address;
+ size_t m_size;
+ bool m_heap;
+
+ public:
+ explicit MemoryRange(KernelCore& kernel)
+ : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {}
+
+ void Set(KPhysicalAddress address, size_t size, bool heap) {
+ m_address = address;
+ m_size = size;
+ m_heap = heap;
+ }
+
+ KPhysicalAddress GetAddress() const {
+ return m_address;
+ }
+ size_t GetSize() const {
+ return m_size;
+ }
+ bool IsHeap() const {
+ return m_heap;
+ }
+
+ void Open();
+ void Close();
+ };
+
+protected:
+ enum MemoryFillValue : u8 {
+ MemoryFillValue_Zero = 0,
+ MemoryFillValue_Stack = 'X',
+ MemoryFillValue_Ipc = 'Y',
+ MemoryFillValue_Heap = 'Z',
+ };
+
+ enum class OperationType {
+ Map = 0,
+ MapGroup = 1,
+ MapFirstGroup = 2,
+ Unmap = 3,
+ ChangePermissions = 4,
+ ChangePermissionsAndRefresh = 5,
+ ChangePermissionsAndRefreshAndFlush = 6,
+ Separate = 7,
+ };
+
+ static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
+ static constexpr size_t RegionAlignment = 2_MiB;
+ static_assert(RegionAlignment == KernelAslrAlignment);
+
+ struct PageLinkedList {
+ private:
+ struct Node {
+ Node* m_next;
+ std::array<u8, PageSize - sizeof(Node*)> m_buffer;
+ };
+ static_assert(std::is_trivial_v<Node>);
+
+ private:
+ Node* m_root{};
+
+ public:
+ constexpr PageLinkedList() : m_root(nullptr) {}
+
+ void Push(Node* n) {
+ ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
+ n->m_next = m_root;
+ m_root = n;
+ }
+
+ Node* Peek() const {
+ return m_root;
+ }
+
+ Node* Pop() {
+ Node* const r = m_root;
+
+ m_root = r->m_next;
+ r->m_next = nullptr;
+
+ return r;
+ }
+ };
+ static_assert(std::is_trivially_destructible_v<PageLinkedList>);
+
+ static constexpr auto DefaultMemoryIgnoreAttr =
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
+
+ static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) {
+ switch (static_cast<Svc::CreateProcessFlag>(as_type &
+ Svc::CreateProcessFlag::AddressSpaceMask)) {
+ case Svc::CreateProcessFlag::AddressSpace64Bit:
+ return 39;
+ case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
+ return 36;
+ case Svc::CreateProcessFlag::AddressSpace32Bit:
+ case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
+ return 32;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+private:
+ class KScopedPageTableUpdater {
+ private:
+ KPageTableBase* m_pt;
+ PageLinkedList m_ll;
+
+ public:
+ explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {}
+ explicit KScopedPageTableUpdater(KPageTableBase& pt)
+ : KScopedPageTableUpdater(std::addressof(pt)) {}
+ ~KScopedPageTableUpdater() {
+ m_pt->FinalizeUpdate(this->GetPageList());
+ }
+
+ PageLinkedList* GetPageList() {
+ return std::addressof(m_ll);
+ }
+ };
+
+private:
+ KernelCore& m_kernel;
+ Core::System& m_system;
+ KProcessAddress m_address_space_start{};
+ KProcessAddress m_address_space_end{};
+ KProcessAddress m_heap_region_start{};
+ KProcessAddress m_heap_region_end{};
+ KProcessAddress m_current_heap_end{};
+ KProcessAddress m_alias_region_start{};
+ KProcessAddress m_alias_region_end{};
+ KProcessAddress m_stack_region_start{};
+ KProcessAddress m_stack_region_end{};
+ KProcessAddress m_kernel_map_region_start{};
+ KProcessAddress m_kernel_map_region_end{};
+ KProcessAddress m_alias_code_region_start{};
+ KProcessAddress m_alias_code_region_end{};
+ KProcessAddress m_code_region_start{};
+ KProcessAddress m_code_region_end{};
+ size_t m_max_heap_size{};
+ size_t m_mapped_physical_memory_size{};
+ size_t m_mapped_unsafe_physical_memory{};
+ size_t m_mapped_insecure_memory{};
+ size_t m_mapped_ipc_server_memory{};
+ mutable KLightLock m_general_lock;
+ mutable KLightLock m_map_physical_memory_lock;
+ KLightLock m_device_map_lock;
+ std::unique_ptr<Common::PageTable> m_impl{};
+ Core::Memory::Memory* m_memory{};
+ KMemoryBlockManager m_memory_block_manager{};
+ u32 m_allocate_option{};
+ u32 m_address_space_width{};
+ bool m_is_kernel{};
+ bool m_enable_aslr{};
+ bool m_enable_device_address_space_merge{};
+ KMemoryBlockSlabManager* m_memory_block_slab_manager{};
+ KBlockInfoManager* m_block_info_manager{};
+ KResourceLimit* m_resource_limit{};
+ const KMemoryRegion* m_cached_physical_linear_region{};
+ const KMemoryRegion* m_cached_physical_heap_region{};
+ MemoryFillValue m_heap_fill_value{};
+ MemoryFillValue m_ipc_fill_value{};
+ MemoryFillValue m_stack_fill_value{};
+
+public:
+ explicit KPageTableBase(KernelCore& kernel);
+ ~KPageTableBase();
+
+ Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end,
+ Core::Memory::Memory& memory);
+ Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+ bool enable_device_address_space_merge, bool from_back,
+ KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit, Core::Memory::Memory& memory);
+
+ void Finalize();
+
+ bool IsKernel() const {
+ return m_is_kernel;
+ }
+ bool IsAslrEnabled() const {
+ return m_enable_aslr;
+ }
+
+ bool Contains(KProcessAddress addr) const {
+ return m_address_space_start <= addr && addr <= m_address_space_end - 1;
+ }
+
+ bool Contains(KProcessAddress addr, size_t size) const {
+ return m_address_space_start <= addr && addr < addr + size &&
+ addr + size - 1 <= m_address_space_end - 1;
+ }
+
+ bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+ return this->Contains(addr, size) && m_alias_region_start <= addr &&
+ addr + size - 1 <= m_alias_region_end - 1;
+ }
+
+ bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+ return this->Contains(addr, size) && m_heap_region_start <= addr &&
+ addr + size - 1 <= m_heap_region_end - 1;
+ }
+
+ bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+ // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the
+ // alias code region.
+ return this->CanContain(addr, size, Svc::MemoryState::AliasCode);
+ }
+
+ KScopedLightLock AcquireDeviceMapLock() {
+ return KScopedLightLock(m_device_map_lock);
+ }
+
+ KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
+ size_t GetRegionSize(Svc::MemoryState state) const;
+ bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
+
+ KProcessAddress GetRegionAddress(KMemoryState state) const {
+ return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+ size_t GetRegionSize(KMemoryState state) const {
+ return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ return this->CanContain(addr, size,
+ static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+
+public:
+ Core::Memory::Memory& GetMemory() {
+ return *m_memory;
+ }
+
+ Core::Memory::Memory& GetMemory() const {
+ return *m_memory;
+ }
+
+ Common::PageTable& GetImpl() {
+ return *m_impl;
+ }
+
+ Common::PageTable& GetImpl() const {
+ return *m_impl;
+ }
+
+ size_t GetNumGuardPages() const {
+ return this->IsKernel() ? 1 : 4;
+ }
+
+protected:
+ // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions
+ // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived
+ // class, and this avoids unnecessary virtual function calls.
+ Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+ KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties,
+ OperationType operation, bool reuse_ll);
+ Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+ const KPageGroup& page_group, const KPageProperties properties,
+ OperationType operation, bool reuse_ll);
+ void FinalizeUpdate(PageLinkedList* page_list);
+
+ bool IsLockedByCurrentThread() const {
+ return m_general_lock.IsLockedByCurrentThread();
+ }
+
+ bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+ m_cached_physical_linear_region, phys_addr);
+ }
+
+ bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+ m_cached_physical_linear_region, phys_addr, size);
+ }
+
+ bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr);
+ }
+
+ bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr, size);
+ }
+
+ bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr);
+ }
+
+ bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
+ return (m_address_space_start <= addr) &&
+ (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
+ (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
+ }
+
+private:
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const;
+
+ Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
+ perm, attr_mask, attr));
+ }
+
+ Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
+ state_mask, state, perm_mask, perm, attr_mask, attr,
+ ignore_attr));
+ }
+ Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
+ attr_mask, attr, ignore_attr));
+ }
+
+ Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageGroup* pg);
+
+ Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+ KProcessAddress address) const;
+
+ Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+ Svc::MemoryState state) const;
+
+ Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm);
+ Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
+
+ void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg);
+
+ Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+
+ Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
+
+ Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr,
+ size_t size, KMemoryState state, KMemoryPermission perm);
+ Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size,
+ KMemoryState state);
+ Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size,
+ KMemoryState state);
+
+ Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size, KMemoryPermission test_perm,
+ KMemoryState dst_state);
+ Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
+ KMemoryPermission test_perm, KMemoryState dst_state,
+ KPageTableBase& src_page_table, bool send);
+ void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
+ size_t size, KMemoryPermission prot_perm);
+
+ size_t GetSize(KMemoryState state) const;
+
+ bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return this->GetImpl().GetPhysicalAddress(out, virt_addr);
+ }
+
+public:
+ bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+ // Validate pre-conditions.
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Acquire exclusive access to the table while doing address translation.
+ KScopedLightLock lk(m_general_lock);
+
+ return this->GetPhysicalAddressLocked(out, virt_addr);
+ }
+
+ KBlockInfoManager* GetBlockInfoManager() const {
+ return m_block_info_manager;
+ }
+
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr);
+ Result SetHeapSize(KProcessAddress* out, size_t size);
+ Result SetMaxHeapSize(size_t size);
+ Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const;
+ Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const;
+ Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+ R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static));
+ }
+ Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+ R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io));
+ }
+ Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+ Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping, Svc::MemoryPermission perm);
+ Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping);
+ Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+ Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
+ Result MapInsecureMemory(KProcessAddress address, size_t size);
+ Result UnmapInsecureMemory(KProcessAddress address, size_t size);
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
+ region_num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
+
+ Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
+
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+ Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
+ Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
+
+ Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state);
+
+ Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state);
+
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap);
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
+
+ Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
+ Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
+
+ Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned);
+ Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address,
+ size_t size);
+
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
+
+ Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm);
+ Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size);
+ Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+
+ Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address,
+ size_t size);
+
+ Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state,
+ KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr);
+ Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ void* buffer);
+ Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr,
+ size_t size, KMemoryState dst_state_mask,
+ KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KPageTableBase& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send);
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
+
+ Result MapPhysicalMemory(KProcessAddress address, size_t size);
+ Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
+
+ Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+ Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+
+ Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt,
+ KProcessAddress src_address);
+
+public:
+ KProcessAddress GetAddressSpaceStart() const {
+ return m_address_space_start;
+ }
+ KProcessAddress GetHeapRegionStart() const {
+ return m_heap_region_start;
+ }
+ KProcessAddress GetAliasRegionStart() const {
+ return m_alias_region_start;
+ }
+ KProcessAddress GetStackRegionStart() const {
+ return m_stack_region_start;
+ }
+ KProcessAddress GetKernelMapRegionStart() const {
+ return m_kernel_map_region_start;
+ }
+ KProcessAddress GetCodeRegionStart() const {
+ return m_code_region_start;
+ }
+ KProcessAddress GetAliasCodeRegionStart() const {
+ return m_alias_code_region_start;
+ }
+
+ size_t GetAddressSpaceSize() const {
+ return m_address_space_end - m_address_space_start;
+ }
+ size_t GetHeapRegionSize() const {
+ return m_heap_region_end - m_heap_region_start;
+ }
+ size_t GetAliasRegionSize() const {
+ return m_alias_region_end - m_alias_region_start;
+ }
+ size_t GetStackRegionSize() const {
+ return m_stack_region_end - m_stack_region_start;
+ }
+ size_t GetKernelMapRegionSize() const {
+ return m_kernel_map_region_end - m_kernel_map_region_start;
+ }
+ size_t GetCodeRegionSize() const {
+ return m_code_region_end - m_code_region_start;
+ }
+ size_t GetAliasCodeRegionSize() const {
+ return m_alias_code_region_end - m_alias_code_region_start;
+ }
+
+ size_t GetNormalMemorySize() const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
+ }
+
+ size_t GetCodeSize() const;
+ size_t GetCodeDataSize() const;
+ size_t GetAliasCodeSize() const;
+ size_t GetAliasCodeDataSize() const;
+
+ u32 GetAllocateOption() const {
+ return m_allocate_option;
+ }
+
+ u32 GetAddressSpaceWidth() const {
+ return m_address_space_width;
+ }
+
+public:
+ // Linear mapped
+ static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+ return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+ }
+
+ static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel,
+ KVirtualAddress addr) {
+ return kernel.MemoryLayout().GetLinearPhysicalAddress(addr);
+ }
+
+ static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel,
+ KPhysicalAddress addr) {
+ return kernel.MemoryLayout().GetLinearVirtualAddress(addr);
+ }
+
+ // Heap
+ static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+ return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+ }
+
+ static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) {
+ return GetLinearMappedPhysicalAddress(kernel, addr);
+ }
+
+ static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) {
+ return GetLinearMappedVirtualAddress(kernel, addr);
+ }
+
+ // Member heap
+ u8* GetHeapVirtualPointer(KPhysicalAddress addr) {
+ return GetHeapVirtualPointer(m_kernel, addr);
+ }
+
+ KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
+ return GetHeapPhysicalAddress(m_kernel, addr);
+ }
+
+ KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
+ return GetHeapVirtualAddress(m_kernel, addr);
+ }
+
+ // TODO: GetPageTableVirtualAddress
+ // TODO: GetPageTablePhysicalAddress
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 1f4b0755d..3cfb414e5 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
const bool enable_das_merge =
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
- R_TRY(m_page_table.InitializeForProcess(
- as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address,
- params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory()));
+ R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+ params.code_address, params.code_num_pages * PageSize,
+ m_system_resource, res_limit, this->GetMemory()));
}
ON_RESULT_FAILURE_2 {
m_page_table.Finalize();
@@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
const bool enable_das_merge =
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
- R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
- !enable_aslr, pool, params.code_address, code_size,
- m_system_resource, res_limit, this->GetMemory()));
+ R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+ params.code_address, code_size, m_system_resource, res_limit,
+ this->GetMemory()));
}
ON_RESULT_FAILURE_2 {
m_page_table.Finalize();
@@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_
void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
KProcess::KProcess(KernelCore& kernel)
- : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()},
- m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()},
- m_address_arbiter{kernel.System()}, m_handle_table{kernel} {}
+ : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
+ m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
+ m_handle_table{kernel} {}
KProcess::~KProcess() = default;
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index f9f755afa..8339465fd 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -5,13 +5,14 @@
#include <map>
+#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_page_table_manager.h"
+#include "core/hle/kernel/k_process_page_table.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
@@ -65,7 +66,7 @@ private:
using TLPIterator = TLPTree::iterator;
private:
- KPageTable m_page_table;
+ KProcessPageTable m_page_table;
std::atomic<size_t> m_used_kernel_memory_size{};
TLPTree m_fully_used_tlp_tree{};
TLPTree m_partially_used_tlp_tree{};
@@ -254,9 +255,8 @@ public:
return m_is_hbl;
}
- Kernel::KMemoryManager::Direction GetAllocateOption() const {
- // TODO: property of the KPageTableBase
- return KMemoryManager::Direction::FromFront;
+ u32 GetAllocateOption() const {
+ return m_page_table.GetAllocateOption();
}
ThreadList& GetThreadList() {
@@ -295,10 +295,10 @@ public:
return m_list_lock;
}
- KPageTable& GetPageTable() {
+ KProcessPageTable& GetPageTable() {
return m_page_table;
}
- const KPageTable& GetPageTable() const {
+ const KProcessPageTable& GetPageTable() const {
return m_page_table;
}
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h
new file mode 100644
index 000000000..b7ae5abd0
--- /dev/null
+++ b/src/core/hle/kernel/k_process_page_table.h
@@ -0,0 +1,480 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Core {
+class ARM_Interface;
+}
+
+namespace Kernel {
+
+class KProcessPageTable {
+private:
+ KPageTable m_page_table;
+
+public:
+ KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {}
+
+ Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge,
+ bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit, Core::Memory::Memory& memory) {
+ R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
+ from_back, pool, code_address, code_size,
+ system_resource, resource_limit, memory));
+ }
+
+ void Finalize() {
+ m_page_table.Finalize();
+ }
+
+ Core::Memory::Memory& GetMemory() {
+ return m_page_table.GetMemory();
+ }
+
+ Core::Memory::Memory& GetMemory() const {
+ return m_page_table.GetMemory();
+ }
+
+ Common::PageTable& GetImpl() {
+ return m_page_table.GetImpl();
+ }
+
+ Common::PageTable& GetImpl() const {
+ return m_page_table.GetImpl();
+ }
+
+ size_t GetNumGuardPages() const {
+ return m_page_table.GetNumGuardPages();
+ }
+
+ KScopedLightLock AcquireDeviceMapLock() {
+ return m_page_table.AcquireDeviceMapLock();
+ }
+
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
+ }
+
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
+ }
+
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr) {
+ R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
+ }
+
+ Result SetHeapSize(KProcessAddress* out, size_t size) {
+ R_RETURN(m_page_table.SetHeapSize(out, size));
+ }
+
+ Result SetMaxHeapSize(size_t size) {
+ R_RETURN(m_page_table.SetMaxHeapSize(size));
+ }
+
+ Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const {
+ R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
+ }
+
+ Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) {
+ R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
+ }
+
+ Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+ R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
+ }
+
+ Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+ R_RETURN(m_page_table.QueryIoMapping(out, address, size));
+ }
+
+ Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
+ }
+
+ Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
+ }
+
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
+ }
+
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
+ }
+
+ Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
+ }
+
+ Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping, Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
+ }
+
+ Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping) {
+ R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
+ }
+
+ Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
+ }
+
+ Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapRegion(region_type, perm));
+ }
+
+ Result MapInsecureMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapInsecureMemory(address, size));
+ }
+
+ Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
+ }
+
+ Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
+ }
+
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) {
+ R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
+ }
+
+ Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
+ R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
+ }
+
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state,
+ perm_mask, perm, attr_mask, attr));
+ }
+
+ Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
+ }
+
+ Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size));
+ }
+
+ Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state) {
+ R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state));
+ }
+
+ Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size));
+ }
+
+ Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state) {
+ R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state));
+ }
+
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap) {
+ R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm,
+ is_aligned, check_heap));
+ }
+
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
+ R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
+ }
+
+ Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
+ }
+
+ Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
+ }
+
+ Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned) {
+ R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm,
+ is_aligned));
+ }
+
+ Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
+ }
+
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
+ }
+
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
+ }
+
+ Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
+ }
+
+ Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+ R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
+ }
+
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
+ }
+
+ Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+ R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
+ }
+
+ Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
+ }
+
+ Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask,
+ src_state, src_test_perm, src_attr_mask,
+ src_attr));
+ }
+
+ Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state,
+ KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask,
+ src_state, src_test_perm, src_attr_mask,
+ src_attr));
+ }
+
+ Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr) {
+ R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state,
+ dst_test_perm, dst_attr_mask, dst_attr,
+ src_addr));
+ }
+
+ Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ void* src_addr) {
+ R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask,
+ dst_state, dst_test_perm, dst_attr_mask,
+ dst_attr, src_addr));
+ }
+
+ Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr,
+ size_t size, KMemoryState dst_state_mask,
+ KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(
+ dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+ dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+ src_attr_mask, src_attr));
+ }
+
+ Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+ dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+ src_attr_mask, src_attr));
+ }
+
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KProcessPageTable& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send) {
+ R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table,
+ test_perm, dst_state, send));
+ }
+
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
+ R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
+ }
+
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
+ R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
+ }
+
+ Result MapPhysicalMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapPhysicalMemory(address, size));
+ }
+
+ Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
+ }
+
+ Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
+ }
+
+ Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
+ }
+
+ Result UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+ KProcessPageTable& src_page_table, KProcessAddress src_address) {
+ R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table,
+ src_address));
+ }
+
+ bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) {
+ return m_page_table.GetPhysicalAddress(out, address);
+ }
+
+ bool Contains(KProcessAddress addr, size_t size) const {
+ return m_page_table.Contains(addr, size);
+ }
+
+ bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInAliasRegion(addr, size);
+ }
+ bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInHeapRegion(addr, size);
+ }
+ bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInUnsafeAliasRegion(addr, size);
+ }
+
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ return m_page_table.CanContain(addr, size, state);
+ }
+
+ KProcessAddress GetAddressSpaceStart() const {
+ return m_page_table.GetAddressSpaceStart();
+ }
+ KProcessAddress GetHeapRegionStart() const {
+ return m_page_table.GetHeapRegionStart();
+ }
+ KProcessAddress GetAliasRegionStart() const {
+ return m_page_table.GetAliasRegionStart();
+ }
+ KProcessAddress GetStackRegionStart() const {
+ return m_page_table.GetStackRegionStart();
+ }
+ KProcessAddress GetKernelMapRegionStart() const {
+ return m_page_table.GetKernelMapRegionStart();
+ }
+ KProcessAddress GetCodeRegionStart() const {
+ return m_page_table.GetCodeRegionStart();
+ }
+ KProcessAddress GetAliasCodeRegionStart() const {
+ return m_page_table.GetAliasCodeRegionStart();
+ }
+
+ size_t GetAddressSpaceSize() const {
+ return m_page_table.GetAddressSpaceSize();
+ }
+ size_t GetHeapRegionSize() const {
+ return m_page_table.GetHeapRegionSize();
+ }
+ size_t GetAliasRegionSize() const {
+ return m_page_table.GetAliasRegionSize();
+ }
+ size_t GetStackRegionSize() const {
+ return m_page_table.GetStackRegionSize();
+ }
+ size_t GetKernelMapRegionSize() const {
+ return m_page_table.GetKernelMapRegionSize();
+ }
+ size_t GetCodeRegionSize() const {
+ return m_page_table.GetCodeRegionSize();
+ }
+ size_t GetAliasCodeRegionSize() const {
+ return m_page_table.GetAliasCodeRegionSize();
+ }
+
+ size_t GetNormalMemorySize() const {
+ return m_page_table.GetNormalMemorySize();
+ }
+
+ size_t GetCodeSize() const {
+ return m_page_table.GetCodeSize();
+ }
+ size_t GetCodeDataSize() const {
+ return m_page_table.GetCodeDataSize();
+ }
+
+ size_t GetAliasCodeSize() const {
+ return m_page_table.GetAliasCodeSize();
+ }
+ size_t GetAliasCodeDataSize() const {
+ return m_page_table.GetAliasCodeDataSize();
+ }
+
+ u32 GetAllocateOption() const {
+ return m_page_table.GetAllocateOption();
+ }
+
+ u32 GetAddressSpaceWidth() const {
+ return m_page_table.GetAddressSpaceWidth();
+ }
+
+ KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) {
+ return m_page_table.GetHeapPhysicalAddress(address);
+ }
+
+ u8* GetHeapVirtualPointer(KPhysicalAddress address) {
+ return m_page_table.GetHeapVirtualPointer(address);
+ }
+
+ KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) {
+ return m_page_table.GetHeapVirtualAddress(address);
+ }
+
+ KBlockInfoManager* GetBlockInfoManager() {
+ return m_page_table.GetBlockInfoManager();
+ }
+
+ KPageTable& GetBasePageTable() {
+ return m_page_table;
+ }
+
+ const KPageTable& GetBasePageTable() const {
+ return m_page_table;
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index c64ceb530..3ea653163 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) {
if (event != nullptr) {
// // Get the client process/page table.
// KProcess *client_process = client_thread->GetOwnerProcess();
- // KPageTable *client_page_table = std::addressof(client_process->PageTable());
+ // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable());
// // If we need to, reply with an async error.
// if (R_FAILED(client_result)) {
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index 07e92aa80..b51941faf 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l
// Get resource pointer.
KPhysicalAddress resource_paddr =
- KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
+ KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address);
auto* resource =
m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 2c45b4232..a632d1634 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
- const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr);
- ASSERT(phys_addr);
+ KPhysicalAddress phys_addr{};
+ ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr));
// Unmap the page.
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
deleted file mode 100644
index 773319ad8..000000000
--- a/src/core/hle/kernel/process_capability.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <bit>
-
-#include "common/bit_util.h"
-#include "common/logging/log.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/svc_results.h"
-
-namespace Kernel {
-namespace {
-
-// clang-format off
-
-// Shift offsets for kernel capability types.
-enum : u32 {
- CapabilityOffset_PriorityAndCoreNum = 3,
- CapabilityOffset_Syscall = 4,
- CapabilityOffset_MapPhysical = 6,
- CapabilityOffset_MapIO = 7,
- CapabilityOffset_MapRegion = 10,
- CapabilityOffset_Interrupt = 11,
- CapabilityOffset_ProgramType = 13,
- CapabilityOffset_KernelVersion = 14,
- CapabilityOffset_HandleTableSize = 15,
- CapabilityOffset_Debug = 16,
-};
-
-// Combined mask of all parameters that may be initialized only once.
-constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
- (1U << CapabilityOffset_ProgramType) |
- (1U << CapabilityOffset_KernelVersion) |
- (1U << CapabilityOffset_HandleTableSize) |
- (1U << CapabilityOffset_Debug);
-
-// Packed kernel version indicating 10.4.0
-constexpr u32 PackedKernelVersion = 0x520000;
-
-// Indicates possible types of capabilities that can be specified.
-enum class CapabilityType : u32 {
- Unset = 0U,
- PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
- Syscall = (1U << CapabilityOffset_Syscall) - 1,
- MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1,
- MapIO = (1U << CapabilityOffset_MapIO) - 1,
- MapRegion = (1U << CapabilityOffset_MapRegion) - 1,
- Interrupt = (1U << CapabilityOffset_Interrupt) - 1,
- ProgramType = (1U << CapabilityOffset_ProgramType) - 1,
- KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1,
- HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1,
- Debug = (1U << CapabilityOffset_Debug) - 1,
- Ignorable = 0xFFFFFFFFU,
-};
-
-// clang-format on
-
-constexpr CapabilityType GetCapabilityType(u32 value) {
- return static_cast<CapabilityType>((~value & (value + 1)) - 1);
-}
-
-u32 GetFlagBitOffset(CapabilityType type) {
- const auto value = static_cast<u32>(type);
- return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
-}
-
-} // Anonymous namespace
-
-Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
- Clear();
-
- // Allow all cores and priorities.
- core_mask = 0xF;
- priority_mask = 0xFFFFFFFFFFFFFFFF;
- kernel_version = PackedKernelVersion;
-
- return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
- Clear();
-
- return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-void ProcessCapabilities::InitializeForMetadatalessProcess() {
- // Allow all cores and priorities
- core_mask = 0xF;
- priority_mask = 0xFFFFFFFFFFFFFFFF;
- kernel_version = PackedKernelVersion;
-
- // Allow all system calls and interrupts.
- svc_capabilities.set();
- interrupt_capabilities.set();
-
- // Allow using the maximum possible amount of handles
- handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
-
- // Allow all debugging capabilities.
- is_debuggable = true;
- can_force_debug = true;
-}
-
-Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table) {
- u32 set_flags = 0;
- u32 set_svc_bits = 0;
-
- for (std::size_t i = 0; i < num_capabilities; ++i) {
- const u32 descriptor = capabilities[i];
- const auto type = GetCapabilityType(descriptor);
-
- if (type == CapabilityType::MapPhysical) {
- i++;
-
- // The MapPhysical type uses two descriptor flags for its parameters.
- // If there's only one, then there's a problem.
- if (i >= num_capabilities) {
- LOG_ERROR(Kernel, "Invalid combination! i={}", i);
- return ResultInvalidCombination;
- }
-
- const auto size_flags = capabilities[i];
- if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
- LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
- return ResultInvalidCombination;
- }
-
- const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
- if (result.IsError()) {
- LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
- descriptor, size_flags);
- return result;
- }
- } else {
- const auto result =
- ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
- if (result.IsError()) {
- LOG_ERROR(
- Kernel,
- "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
- set_flags, set_svc_bits, descriptor);
- return result;
- }
- }
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table) {
- const auto type = GetCapabilityType(flag);
-
- if (type == CapabilityType::Unset) {
- return ResultInvalidArgument;
- }
-
- // Bail early on ignorable entries, as one would expect,
- // ignorable descriptors can be ignored.
- if (type == CapabilityType::Ignorable) {
- return ResultSuccess;
- }
-
- // Ensure that the give flag hasn't already been initialized before.
- // If it has been, then bail.
- const u32 flag_length = GetFlagBitOffset(type);
- const u32 set_flag = 1U << flag_length;
- if ((set_flag & set_flags & InitializeOnceMask) != 0) {
- LOG_ERROR(Kernel,
- "Attempted to initialize flags that may only be initialized once. set_flags={}",
- set_flags);
- return ResultInvalidCombination;
- }
- set_flags |= set_flag;
-
- switch (type) {
- case CapabilityType::PriorityAndCoreNum:
- return HandlePriorityCoreNumFlags(flag);
- case CapabilityType::Syscall:
- return HandleSyscallFlags(set_svc_bits, flag);
- case CapabilityType::MapIO:
- return HandleMapIOFlags(flag, page_table);
- case CapabilityType::MapRegion:
- return HandleMapRegionFlags(flag, page_table);
- case CapabilityType::Interrupt:
- return HandleInterruptFlags(flag);
- case CapabilityType::ProgramType:
- return HandleProgramTypeFlags(flag);
- case CapabilityType::KernelVersion:
- return HandleKernelVersionFlags(flag);
- case CapabilityType::HandleTableSize:
- return HandleHandleTableFlags(flag);
- case CapabilityType::Debug:
- return HandleDebugFlags(flag);
- default:
- break;
- }
-
- LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
- return ResultInvalidArgument;
-}
-
-void ProcessCapabilities::Clear() {
- svc_capabilities.reset();
- interrupt_capabilities.reset();
-
- core_mask = 0;
- priority_mask = 0;
-
- handle_table_size = 0;
- kernel_version = 0;
-
- program_type = ProgramType::SysModule;
-
- is_debuggable = false;
- can_force_debug = false;
-}
-
-Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
- if (priority_mask != 0 || core_mask != 0) {
- LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
- priority_mask, core_mask);
- return ResultInvalidArgument;
- }
-
- const u32 core_num_min = (flags >> 16) & 0xFF;
- const u32 core_num_max = (flags >> 24) & 0xFF;
- if (core_num_min > core_num_max) {
- LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
- core_num_min, core_num_max);
- return ResultInvalidCombination;
- }
-
- const u32 priority_min = (flags >> 10) & 0x3F;
- const u32 priority_max = (flags >> 4) & 0x3F;
- if (priority_min > priority_max) {
- LOG_ERROR(Kernel,
- "Priority min is greater than priority max! priority_min={}, priority_max={}",
- core_num_min, priority_max);
- return ResultInvalidCombination;
- }
-
- // The switch only has 4 usable cores.
- if (core_num_max >= 4) {
- LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
- return ResultInvalidCoreId;
- }
-
- const auto make_mask = [](u64 min, u64 max) {
- const u64 range = max - min + 1;
- const u64 mask = (1ULL << range) - 1;
-
- return mask << min;
- };
-
- core_mask = make_mask(core_num_min, core_num_max);
- priority_mask = make_mask(priority_min, priority_max);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
- const u32 index = flags >> 29;
- const u32 svc_bit = 1U << index;
-
- // If we've already set this svc before, bail.
- if ((set_svc_bits & svc_bit) != 0) {
- return ResultInvalidCombination;
- }
- set_svc_bits |= svc_bit;
-
- const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
- for (u32 i = 0; i < 24; ++i) {
- const u32 svc_number = index * 24 + i;
-
- if ((svc_mask & (1U << i)) == 0) {
- continue;
- }
-
- svc_capabilities[svc_number] = true;
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
- KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
- constexpr u32 interrupt_ignore_value = 0x3FF;
- const u32 interrupt0 = (flags >> 12) & 0x3FF;
- const u32 interrupt1 = (flags >> 22) & 0x3FF;
-
- for (u32 interrupt : {interrupt0, interrupt1}) {
- if (interrupt == interrupt_ignore_value) {
- continue;
- }
-
- // NOTE:
- // This should be checking a generic interrupt controller value
- // as part of the calculation, however, given we don't currently
- // emulate that, it's sufficient to mark every interrupt as defined.
-
- if (interrupt >= interrupt_capabilities.size()) {
- LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
- interrupt);
- return ResultOutOfRange;
- }
-
- interrupt_capabilities[interrupt] = true;
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
- const u32 reserved = flags >> 17;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
- // Yes, the internal member variable is checked in the actual kernel here.
- // This might look odd for options that are only allowed to be initialized
- // just once, however the kernel has a separate initialization function for
- // kernel processes and userland processes. The kernel variant sets this
- // member variable ahead of time.
-
- const u32 major_version = kernel_version >> 19;
-
- if (major_version != 0 || flags < 0x80000) {
- LOG_ERROR(Kernel,
- "Kernel version is non zero or flags are too small! major_version={}, flags={}",
- major_version, flags);
- return ResultInvalidArgument;
- }
-
- kernel_version = flags;
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
- const u32 reserved = flags >> 26;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
- const u32 reserved = flags >> 19;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- is_debuggable = (flags & 0x20000) != 0;
- can_force_debug = (flags & 0x40000) != 0;
- return ResultSuccess;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
deleted file mode 100644
index ff05dc5ff..000000000
--- a/src/core/hle/kernel/process_capability.h
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <bitset>
-
-#include "common/common_types.h"
-
-union Result;
-
-namespace Kernel {
-
-class KPageTable;
-
-/// The possible types of programs that may be indicated
-/// by the program type capability descriptor.
-enum class ProgramType {
- SysModule,
- Application,
- Applet,
-};
-
-/// Handles kernel capability descriptors that are provided by
-/// application metadata. These descriptors provide information
-/// that alters certain parameters for kernel process instance
-/// that will run said application (or applet).
-///
-/// Capabilities are a sequence of flag descriptors, that indicate various
-/// configurations and constraints for a particular process.
-///
-/// Flag types are indicated by a sequence of set low bits. E.g. the
-/// types are indicated with the low bits as follows (where x indicates "don't care"):
-///
-/// - Priority and core mask : 0bxxxxxxxxxxxx0111
-/// - Allowed service call mask: 0bxxxxxxxxxxx01111
-/// - Map physical memory : 0bxxxxxxxxx0111111
-/// - Map IO memory : 0bxxxxxxxx01111111
-/// - Interrupts : 0bxxxx011111111111
-/// - Application type : 0bxx01111111111111
-/// - Kernel version : 0bx011111111111111
-/// - Handle table size : 0b0111111111111111
-/// - Debugger flags : 0b1111111111111111
-///
-/// These are essentially a bit offset subtracted by 1 to create a mask.
-/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
-/// subtracted by one (7 -> 0b0111)
-///
-/// An example of a bit layout (using the map physical layout):
-/// <example>
-/// The MapPhysical type indicates a sequence entry pair of:
-///
-/// [initial, memory_flags], where:
-///
-/// initial:
-/// bits:
-/// 7-24: Starting page to map memory at.
-/// 25 : Indicates if the memory should be mapped as read only.
-///
-/// memory_flags:
-/// bits:
-/// 7-20 : Number of pages to map
-/// 21-25: Seems to be reserved (still checked against though)
-/// 26 : Whether or not the memory being mapped is IO memory, or physical memory
-/// </example>
-///
-class ProcessCapabilities {
-public:
- using InterruptCapabilities = std::bitset<1024>;
- using SyscallCapabilities = std::bitset<192>;
-
- ProcessCapabilities() = default;
- ProcessCapabilities(const ProcessCapabilities&) = delete;
- ProcessCapabilities(ProcessCapabilities&&) = default;
-
- ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
- ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
-
- /// Initializes this process capabilities instance for a kernel process.
- ///
- /// @param capabilities The capabilities to parse
- /// @param num_capabilities The number of capabilities to parse.
- /// @param page_table The memory manager to use for handling any mapping-related
- /// operations (such as mapping IO memory, etc).
- ///
- /// @returns ResultSuccess if this capabilities instance was able to be initialized,
- /// otherwise, an error code upon failure.
- ///
- Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Initializes this process capabilities instance for a userland process.
- ///
- /// @param capabilities The capabilities to parse.
- /// @param num_capabilities The total number of capabilities to parse.
- /// @param page_table The memory manager to use for handling any mapping-related
- /// operations (such as mapping IO memory, etc).
- ///
- /// @returns ResultSuccess if this capabilities instance was able to be initialized,
- /// otherwise, an error code upon failure.
- ///
- Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Initializes this process capabilities instance for a process that does not
- /// have any metadata to parse.
- ///
- /// This is necessary, as we allow running raw executables, and the internal
- /// kernel process capabilities also determine what CPU cores the process is
- /// allowed to run on, and what priorities are allowed for threads. It also
- /// determines the max handle table size, what the program type is, whether or
- /// not the process can be debugged, or whether it's possible for a process to
- /// forcibly debug another process.
- ///
- /// Given the above, this essentially enables all capabilities across the board
- /// for the process. It allows the process to:
- ///
- /// - Run on any core
- /// - Use any thread priority
- /// - Use the maximum amount of handles a process is allowed to.
- /// - Be debuggable
- /// - Forcibly debug other processes.
- ///
- /// Note that this is not a behavior that the kernel allows a process to do via
- /// a single function like this. This is yuzu-specific behavior to handle
- /// executables with no capability descriptors whatsoever to derive behavior from.
- /// It being yuzu-specific is why this is also not the default behavior and not
- /// done by default in the constructor.
- ///
- void InitializeForMetadatalessProcess();
-
- /// Gets the allowable core mask
- u64 GetCoreMask() const {
- return core_mask;
- }
-
- /// Gets the allowable priority mask
- u64 GetPriorityMask() const {
- return priority_mask;
- }
-
- /// Gets the SVC access permission bits
- const SyscallCapabilities& GetServiceCapabilities() const {
- return svc_capabilities;
- }
-
- /// Gets the valid interrupt bits.
- const InterruptCapabilities& GetInterruptCapabilities() const {
- return interrupt_capabilities;
- }
-
- /// Gets the program type for this process.
- ProgramType GetProgramType() const {
- return program_type;
- }
-
- /// Gets the number of total allowable handles for the process' handle table.
- s32 GetHandleTableSize() const {
- return handle_table_size;
- }
-
- /// Gets the kernel version value.
- u32 GetKernelVersion() const {
- return kernel_version;
- }
-
- /// Whether or not this process can be debugged.
- bool IsDebuggable() const {
- return is_debuggable;
- }
-
- /// Whether or not this process can forcibly debug another
- /// process, even if that process is not considered debuggable.
- bool CanForceDebug() const {
- return can_force_debug;
- }
-
-private:
- /// Attempts to parse a given sequence of capability descriptors.
- ///
- /// @param capabilities The sequence of capability descriptors to parse.
- /// @param num_capabilities The number of descriptors within the given sequence.
- /// @param page_table The memory manager that will perform any memory
- /// mapping if necessary.
- ///
- /// @return ResultSuccess if no errors occur, otherwise an error code.
- ///
- Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Attempts to parse a capability descriptor that is only represented by a
- /// single flag set.
- ///
- /// @param set_flags Running set of flags that are used to catch
- /// flags being initialized more than once when they shouldn't be.
- /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
- /// @param flag The flag to attempt to parse.
- /// @param page_table The memory manager that will perform any memory
- /// mapping if necessary.
- ///
- /// @return ResultSuccess if no errors occurred, otherwise an error code.
- ///
- Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table);
-
- /// Clears the internal state of this process capability instance. Necessary,
- /// to have a sane starting point due to us allowing running executables without
- /// configuration metadata. We assume a process is not going to have metadata,
- /// and if it turns out that the process does, in fact, have metadata, then
- /// we attempt to parse it. Thus, we need this to reset data members back to
- /// a good state.
- ///
- /// DO NOT ever make this a public member function. This isn't an invariant
- /// anything external should depend upon (and if anything comes to rely on it,
- /// you should immediately be questioning the design of that thing, not this
- /// class. If the kernel itself can run without depending on behavior like that,
- /// then so can yuzu).
- ///
- void Clear();
-
- /// Handles flags related to the priority and core number capability flags.
- Result HandlePriorityCoreNumFlags(u32 flags);
-
- /// Handles flags related to determining the allowable SVC mask.
- Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
-
- /// Handles flags related to mapping physical memory pages.
- Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
-
- /// Handles flags related to mapping IO pages.
- Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
-
- /// Handles flags related to mapping physical memory regions.
- Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
-
- /// Handles flags related to the interrupt capability flags.
- Result HandleInterruptFlags(u32 flags);
-
- /// Handles flags related to the program type.
- Result HandleProgramTypeFlags(u32 flags);
-
- /// Handles flags related to the handle table size.
- Result HandleHandleTableFlags(u32 flags);
-
- /// Handles flags related to the kernel version capability flags.
- Result HandleKernelVersionFlags(u32 flags);
-
- /// Handles flags related to debug-specific capabilities.
- Result HandleDebugFlags(u32 flags);
-
- SyscallCapabilities svc_capabilities;
- InterruptCapabilities interrupt_capabilities;
-
- u64 core_mask = 0;
- u64 priority_mask = 0;
-
- s32 handle_table_size = 0;
- u32 kernel_version = 0;
-
- ProgramType program_type = ProgramType::SysModule;
-
- bool is_debuggable = false;
- bool can_force_debug = false;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 97f1210de..4ca62860d 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) {
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
+Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr,
+ u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
R_THROW(ResultInvalidAddress);
@@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Set the memory attribute.
- R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
+ R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
+ static_cast<KMemoryAttribute>(attr)));
}
/// Maps a memory range into a different range.
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index 99330d02a..793e9f8d0 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
// Set the heap size.
- R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size));
+ KProcessAddress address{};
+ R_TRY(GetCurrentProcess(system.Kernel())
+ .GetPageTable()
+ .SetHeapSize(std::addressof(address), size));
+
+ // We succeeded.
+ *out_address = GetInteger(address);
+ R_SUCCEED();
}
/// Maps memory at a desired address
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index 07cd48175..e1427947b 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
R_THROW(ResultInvalidCurrentMemory);
}
- R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
- KPageTable::ICacheInvalidationStrategy::InvalidateAll));
+ R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size));
}
Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 51af06e97..816dcb8d0 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
}
auto& current_memory{GetCurrentMemory(system.Kernel())};
- const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()};
- current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
+ KMemoryInfo mem_info;
+ R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address));
- //! This is supposed to be part of the QueryInfo call.
- *out_page_info = {};
+ const auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+ current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info));
R_SUCCEED();
}
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index dd0b27f47..749f51f69 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
/// Evaluates a boolean expression, and succeeds if that expression is true.
#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
+
+#define R_TRY_CATCH(res_expr) \
+ { \
+ const auto R_CURRENT_RESULT = (res_expr); \
+ if (R_FAILED(R_CURRENT_RESULT)) { \
+ if (false)
+
+#define R_END_TRY_CATCH \
+ else if (R_FAILED(R_CURRENT_RESULT)) { \
+ R_THROW(R_CURRENT_RESULT); \
+ } \
+ } \
+ }
+
+#define R_CATCH_ALL() \
+ } \
+ else if (R_FAILED(R_CURRENT_RESULT)) { \
+ if (true)
+
+#define R_CATCH(res_expr) \
+ } \
+ else if ((res_expr) == (R_CURRENT_RESULT)) { \
+ if (true)
+
+#define R_CONVERT(catch_type, convert_type) \
+ R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); }
+
+#define R_CONVERT_ALL(convert_type) \
+ R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); }
+
+#define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr))
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index 1b1c8190e..f21553644 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -3,11 +3,13 @@
#include <algorithm>
#include <array>
+
#include "common/common_types.h"
#include "common/fs/file.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
+#include "common/stb.h"
#include "common/string_util.h"
#include "common/swap.h"
#include "core/constants.h"
@@ -38,9 +40,36 @@ static std::filesystem::path GetImagePath(const Common::UUID& uuid) {
fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString());
}
-static constexpr u32 SanitizeJPEGSize(std::size_t size) {
+static void JPGToMemory(void* context, void* data, int len) {
+ std::vector<u8>* jpg_image = static_cast<std::vector<u8>*>(context);
+ unsigned char* jpg = static_cast<unsigned char*>(data);
+ jpg_image->insert(jpg_image->end(), jpg, jpg + len);
+}
+
+static void SanitizeJPEGImageSize(std::vector<u8>& image) {
constexpr std::size_t max_jpeg_image_size = 0x20000;
- return static_cast<u32>(std::min(size, max_jpeg_image_size));
+ constexpr int profile_dimensions = 256;
+ int original_width, original_height, color_channels;
+
+ const auto plain_image =
+ stbi_load_from_memory(image.data(), static_cast<int>(image.size()), &original_width,
+ &original_height, &color_channels, STBI_rgb);
+
+ // Resize image to match 256*256
+ if (original_width != profile_dimensions || original_height != profile_dimensions) {
+ // Use vector instead of array to avoid overflowing the stack
+ std::vector<u8> out_image(profile_dimensions * profile_dimensions * STBI_rgb);
+ stbir_resize_uint8_srgb(plain_image, original_width, original_height, 0, out_image.data(),
+ profile_dimensions, profile_dimensions, 0, STBI_rgb, 0,
+ STBIR_FILTER_BOX);
+ image.clear();
+ if (!stbi_write_jpg_to_func(JPGToMemory, &image, profile_dimensions, profile_dimensions,
+ STBI_rgb, out_image.data(), 0)) {
+ LOG_ERROR(Service_ACC, "Failed to resize the user provided image.");
+ }
+ }
+
+ image.resize(std::min(image.size(), max_jpeg_image_size));
}
class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {
@@ -339,19 +368,20 @@ protected:
LOG_WARNING(Service_ACC,
"Failed to load user provided image! Falling back to built-in backup...");
ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG);
- rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
+ rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
return;
}
- const u32 size = SanitizeJPEGSize(image.GetSize());
- std::vector<u8> buffer(size);
+ std::vector<u8> buffer(image.GetSize());
if (image.Read(buffer) != buffer.size()) {
LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
}
+ SanitizeJPEGImageSize(buffer);
+
ctx.WriteBuffer(buffer);
- rb.Push<u32>(size);
+ rb.Push(static_cast<u32>(buffer.size()));
}
void GetImageSize(HLERequestContext& ctx) {
@@ -365,10 +395,18 @@ protected:
if (!image.IsOpen()) {
LOG_WARNING(Service_ACC,
"Failed to load user provided image! Falling back to built-in backup...");
- rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
- } else {
- rb.Push(SanitizeJPEGSize(image.GetSize()));
+ rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
+ return;
}
+
+ std::vector<u8> buffer(image.GetSize());
+
+ if (image.Read(buffer) != buffer.size()) {
+ LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
+ }
+
+ SanitizeJPEGImageSize(buffer);
+ rb.Push(static_cast<u32>(buffer.size()));
}
void Store(HLERequestContext& ctx) {
diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h
index f02bbc450..0bf2598b7 100644
--- a/src/core/hle/service/am/applets/applets.h
+++ b/src/core/hle/service/am/applets/applets.h
@@ -69,6 +69,30 @@ enum class AppletId : u32 {
MyPage = 0x1A,
};
+enum class AppletProgramId : u64 {
+ QLaunch = 0x0100000000001000ull,
+ Auth = 0x0100000000001001ull,
+ Cabinet = 0x0100000000001002ull,
+ Controller = 0x0100000000001003ull,
+ DataErase = 0x0100000000001004ull,
+ Error = 0x0100000000001005ull,
+ NetConnect = 0x0100000000001006ull,
+ ProfileSelect = 0x0100000000001007ull,
+ SoftwareKeyboard = 0x0100000000001008ull,
+ MiiEdit = 0x0100000000001009ull,
+ Web = 0x010000000000100Aull,
+ Shop = 0x010000000000100Bull,
+ OverlayDisplay = 0x010000000000100Cull,
+ PhotoViewer = 0x010000000000100Dull,
+ Settings = 0x010000000000100Eull,
+ OfflineWeb = 0x010000000000100Full,
+ LoginShare = 0x0100000000001010ull,
+ WebAuth = 0x0100000000001011ull,
+ Starter = 0x0100000000001012ull,
+ MyPage = 0x0100000000001013ull,
+ MaxProgramId = 0x0100000000001FFFull,
+};
+
enum class LibraryAppletMode : u32 {
AllForeground = 0,
Background = 1,
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index 221c33b86..d383a266d 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -138,7 +138,7 @@ void IRS::RunMomentProcessor(HLERequestContext& ctx) {
if (result.IsSuccess()) {
auto& device = GetIrCameraSharedMemoryDeviceEntry(parameters.camera_handle);
- MakeProcessor<MomentProcessor>(parameters.camera_handle, device);
+ MakeProcessorWithCoreContext<MomentProcessor>(parameters.camera_handle, device);
auto& image_transfer_processor = GetProcessor<MomentProcessor>(parameters.camera_handle);
image_transfer_processor.SetConfig(parameters.processor_config);
npad_device->SetPollingMode(Core::HID::EmulatedDeviceIndex::RightIndex,
diff --git a/src/core/hle/service/hid/irsensor/clustering_processor.cpp b/src/core/hle/service/hid/irsensor/clustering_processor.cpp
index e2f4ae876..c559eb0d5 100644
--- a/src/core/hle/service/hid/irsensor/clustering_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/clustering_processor.cpp
@@ -3,16 +3,18 @@
#include <queue>
+#include "core/core.h"
+#include "core/core_timing.h"
#include "core/hid/emulated_controller.h"
#include "core/hid/hid_core.h"
#include "core/hle/service/hid/irsensor/clustering_processor.h"
namespace Service::IRS {
-ClusteringProcessor::ClusteringProcessor(Core::HID::HIDCore& hid_core_,
+ClusteringProcessor::ClusteringProcessor(Core::System& system_,
Core::IrSensor::DeviceFormat& device_format,
std::size_t npad_index)
- : device{device_format} {
- npad_device = hid_core_.GetEmulatedControllerByIndex(npad_index);
+ : device{device_format}, system{system_} {
+ npad_device = system.HIDCore().GetEmulatedControllerByIndex(npad_index);
device.mode = Core::IrSensor::IrSensorMode::ClusteringProcessor;
device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected;
@@ -48,7 +50,7 @@ void ClusteringProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType ty
}
next_state = {};
- const auto camera_data = npad_device->GetCamera();
+ const auto& camera_data = npad_device->GetCamera();
auto filtered_image = camera_data.data;
RemoveLowIntensityData(filtered_image);
@@ -83,7 +85,7 @@ void ClusteringProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType ty
}
next_state.sampling_number = camera_data.sample;
- next_state.timestamp = next_state.timestamp + 131;
+ next_state.timestamp = system.CoreTiming().GetGlobalTimeNs().count();
next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
shared_memory->clustering_lifo.WriteNextEntry(next_state);
@@ -202,14 +204,14 @@ ClusteringProcessor::ClusteringData ClusteringProcessor::MergeCluster(
}
u8 ClusteringProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const {
- if ((y * width) + x > data.size()) {
+ if ((y * width) + x >= data.size()) {
return 0;
}
return data[(y * width) + x];
}
void ClusteringProcessor::SetPixel(std::vector<u8>& data, std::size_t x, std::size_t y, u8 value) {
- if ((y * width) + x > data.size()) {
+ if ((y * width) + x >= data.size()) {
return;
}
data[(y * width) + x] = value;
diff --git a/src/core/hle/service/hid/irsensor/clustering_processor.h b/src/core/hle/service/hid/irsensor/clustering_processor.h
index dc01a8ea7..83f34734a 100644
--- a/src/core/hle/service/hid/irsensor/clustering_processor.h
+++ b/src/core/hle/service/hid/irsensor/clustering_processor.h
@@ -8,6 +8,10 @@
#include "core/hle/service/hid/irs_ring_lifo.h"
#include "core/hle/service/hid/irsensor/processor_base.h"
+namespace Core {
+class System;
+}
+
namespace Core::HID {
class EmulatedController;
} // namespace Core::HID
@@ -15,8 +19,7 @@ class EmulatedController;
namespace Service::IRS {
class ClusteringProcessor final : public ProcessorBase {
public:
- explicit ClusteringProcessor(Core::HID::HIDCore& hid_core_,
- Core::IrSensor::DeviceFormat& device_format,
+ explicit ClusteringProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format,
std::size_t npad_index);
~ClusteringProcessor() override;
@@ -106,5 +109,7 @@ private:
Core::IrSensor::DeviceFormat& device;
Core::HID::EmulatedController* npad_device;
int callback_key{};
+
+ Core::System& system;
};
} // namespace Service::IRS
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
index 803a6277c..22067a591 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
@@ -49,7 +49,7 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType
return;
}
- const auto camera_data = npad_device->GetCamera();
+ const auto& camera_data = npad_device->GetCamera();
// This indicates how much ambient light is present
processor_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
diff --git a/src/core/hle/service/hid/irsensor/moment_processor.cpp b/src/core/hle/service/hid/irsensor/moment_processor.cpp
index dbaca420a..cf045bda7 100644
--- a/src/core/hle/service/hid/irsensor/moment_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/moment_processor.cpp
@@ -1,24 +1,137 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hid/emulated_controller.h"
+#include "core/hid/hid_core.h"
#include "core/hle/service/hid/irsensor/moment_processor.h"
namespace Service::IRS {
-MomentProcessor::MomentProcessor(Core::IrSensor::DeviceFormat& device_format)
- : device(device_format) {
+static constexpr auto format = Core::IrSensor::ImageTransferProcessorFormat::Size40x30;
+static constexpr std::size_t ImageWidth = 40;
+static constexpr std::size_t ImageHeight = 30;
+
+MomentProcessor::MomentProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format,
+ std::size_t npad_index)
+ : device(device_format), system{system_} {
+ npad_device = system.HIDCore().GetEmulatedControllerByIndex(npad_index);
+
device.mode = Core::IrSensor::IrSensorMode::MomentProcessor;
device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected;
device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Stopped;
+
+ shared_memory = std::construct_at(
+ reinterpret_cast<MomentSharedMemory*>(&device_format.state.processor_raw_data));
+
+ Core::HID::ControllerUpdateCallback engine_callback{
+ .on_change = [this](Core::HID::ControllerTriggerType type) { OnControllerUpdate(type); },
+ .is_npad_service = true,
+ };
+ callback_key = npad_device->SetCallback(engine_callback);
}
-MomentProcessor::~MomentProcessor() = default;
+MomentProcessor::~MomentProcessor() {
+ npad_device->DeleteCallback(callback_key);
+};
-void MomentProcessor::StartProcessor() {}
+void MomentProcessor::StartProcessor() {
+ device.camera_status = Core::IrSensor::IrCameraStatus::Available;
+ device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Ready;
+}
void MomentProcessor::SuspendProcessor() {}
void MomentProcessor::StopProcessor() {}
+void MomentProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType type) {
+ if (type != Core::HID::ControllerTriggerType::IrSensor) {
+ return;
+ }
+
+ next_state = {};
+ const auto& camera_data = npad_device->GetCamera();
+
+ const auto window_width = static_cast<std::size_t>(current_config.window_of_interest.width);
+ const auto window_height = static_cast<std::size_t>(current_config.window_of_interest.height);
+ const auto window_start_x = static_cast<std::size_t>(current_config.window_of_interest.x);
+ const auto window_start_y = static_cast<std::size_t>(current_config.window_of_interest.y);
+
+ const std::size_t block_width = window_width / Columns;
+ const std::size_t block_height = window_height / Rows;
+
+ for (std::size_t row = 0; row < Rows; row++) {
+ for (std::size_t column = 0; column < Columns; column++) {
+ const size_t x_pos = (column * block_width) + window_start_x;
+ const size_t y_pos = (row * block_height) + window_start_y;
+ auto& statistic = next_state.statistic[column + (row * Columns)];
+ statistic = GetStatistic(camera_data.data, x_pos, y_pos, block_width, block_height);
+ }
+ }
+
+ next_state.sampling_number = camera_data.sample;
+ next_state.timestamp = system.CoreTiming().GetGlobalTimeNs().count();
+ next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
+ shared_memory->moment_lifo.WriteNextEntry(next_state);
+
+ if (!IsProcessorActive()) {
+ StartProcessor();
+ }
+}
+
+u8 MomentProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const {
+ if ((y * ImageWidth) + x >= data.size()) {
+ return 0;
+ }
+ return data[(y * ImageWidth) + x];
+}
+
+MomentProcessor::MomentStatistic MomentProcessor::GetStatistic(const std::vector<u8>& data,
+ std::size_t start_x,
+ std::size_t start_y,
+ std::size_t width,
+ std::size_t height) const {
+ // The actual implementation is always 320x240
+ static constexpr std::size_t RealWidth = 320;
+ static constexpr std::size_t RealHeight = 240;
+ static constexpr std::size_t Threshold = 30;
+ MomentStatistic statistic{};
+ std::size_t active_points{};
+
+ // Sum all data points on the block that meet with the threshold
+ for (std::size_t y = 0; y < width; y++) {
+ for (std::size_t x = 0; x < height; x++) {
+ const size_t x_pos = x + start_x;
+ const size_t y_pos = y + start_y;
+ const auto pixel =
+ GetPixel(data, x_pos * ImageWidth / RealWidth, y_pos * ImageHeight / RealHeight);
+
+ if (pixel < Threshold) {
+ continue;
+ }
+
+ statistic.average_intensity += pixel;
+
+ statistic.centroid.x += static_cast<float>(x_pos);
+ statistic.centroid.y += static_cast<float>(y_pos);
+
+ active_points++;
+ }
+ }
+
+ // Return an empty field if no points were available
+ if (active_points == 0) {
+ return {};
+ }
+
+ // Finally calculate the actual centroid and average intensity
+ statistic.centroid.x /= static_cast<float>(active_points);
+ statistic.centroid.y /= static_cast<float>(active_points);
+ statistic.average_intensity /= static_cast<f32>(width * height);
+
+ return statistic;
+}
+
void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig config) {
current_config.camera_config.exposure_time = config.camera_config.exposure_time;
current_config.camera_config.gain = config.camera_config.gain;
@@ -29,6 +142,8 @@ void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig conf
current_config.preprocess =
static_cast<Core::IrSensor::MomentProcessorPreprocess>(config.preprocess);
current_config.preprocess_intensity_threshold = config.preprocess_intensity_threshold;
+
+ npad_device->SetCameraFormat(format);
}
} // namespace Service::IRS
diff --git a/src/core/hle/service/hid/irsensor/moment_processor.h b/src/core/hle/service/hid/irsensor/moment_processor.h
index d4bd22e0f..398cfbdc1 100644
--- a/src/core/hle/service/hid/irsensor/moment_processor.h
+++ b/src/core/hle/service/hid/irsensor/moment_processor.h
@@ -6,12 +6,22 @@
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hid/irs_types.h"
+#include "core/hle/service/hid/irs_ring_lifo.h"
#include "core/hle/service/hid/irsensor/processor_base.h"
+namespace Core {
+class System;
+}
+
+namespace Core::HID {
+class EmulatedController;
+} // namespace Core::HID
+
namespace Service::IRS {
class MomentProcessor final : public ProcessorBase {
public:
- explicit MomentProcessor(Core::IrSensor::DeviceFormat& device_format);
+ explicit MomentProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format,
+ std::size_t npad_index);
~MomentProcessor() override;
// Called when the processor is initialized
@@ -27,6 +37,9 @@ public:
void SetConfig(Core::IrSensor::PackedMomentProcessorConfig config);
private:
+ static constexpr std::size_t Columns = 8;
+ static constexpr std::size_t Rows = 6;
+
// This is nn::irsensor::MomentProcessorConfig
struct MomentProcessorConfig {
Core::IrSensor::CameraConfig camera_config;
@@ -50,12 +63,29 @@ private:
u64 timestamp;
Core::IrSensor::CameraAmbientNoiseLevel ambient_noise_level;
INSERT_PADDING_BYTES(4);
- std::array<MomentStatistic, 0x30> stadistic;
+ std::array<MomentStatistic, Columns * Rows> statistic;
};
static_assert(sizeof(MomentProcessorState) == 0x258, "MomentProcessorState is an invalid size");
+ struct MomentSharedMemory {
+ Service::IRS::Lifo<MomentProcessorState, 6> moment_lifo;
+ };
+ static_assert(sizeof(MomentSharedMemory) == 0xE20, "MomentSharedMemory is an invalid size");
+
+ void OnControllerUpdate(Core::HID::ControllerTriggerType type);
+ u8 GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const;
+ MomentStatistic GetStatistic(const std::vector<u8>& data, std::size_t start_x,
+ std::size_t start_y, std::size_t width, std::size_t height) const;
+
+ MomentSharedMemory* shared_memory = nullptr;
+ MomentProcessorState next_state{};
+
MomentProcessorConfig current_config{};
Core::IrSensor::DeviceFormat& device;
+ Core::HID::EmulatedController* npad_device;
+ int callback_key{};
+
+ Core::System& system;
};
} // namespace Service::IRS
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index c73035c77..97b6a9385 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -286,9 +286,14 @@ public:
rb.Push(ResultSuccess);
}
- bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
+ bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start,
+ std::size_t size) const {
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
- const auto start_info{page_table.QueryInfo(start - 1)};
+
+ Kernel::KMemoryInfo start_info;
+ Kernel::Svc::PageInfo page_info;
+ R_ASSERT(
+ page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1));
if (start_info.GetState() != Kernel::KMemoryState::Free) {
return {};
@@ -298,7 +303,9 @@ public:
return {};
}
- const auto end_info{page_table.QueryInfo(start + size)};
+ Kernel::KMemoryInfo end_info;
+ R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info),
+ start + size));
if (end_info.GetState() != Kernel::KMemoryState::Free) {
return {};
@@ -307,7 +314,7 @@ public:
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
}
- Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
+ Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) {
size = Common::AlignUp(size, Kernel::PageSize);
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
@@ -391,12 +398,8 @@ public:
if (bss_size) {
auto block_guard = detail::ScopeExit([&] {
- page_table.UnmapCodeMemory(
- addr + nro_size, bss_addr, bss_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
- page_table.UnmapCodeMemory(
- addr, nro_addr, nro_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
+ page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
+ page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
});
const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
@@ -578,21 +581,17 @@ public:
auto& page_table{system.ApplicationProcess()->GetPageTable()};
if (info.bss_size != 0) {
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address,
- info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size +
+ info.data_size,
+ info.bss_address, info.bss_size));
}
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address + info.text_size + info.ro_size,
- info.src_addr + info.text_size + info.ro_size, info.data_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address, info.src_addr, info.text_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
+ info.src_addr + info.text_size + info.ro_size,
+ info.data_size));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
+ info.src_addr + info.text_size, info.ro_size));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
return ResultSuccess;
}
diff --git a/src/core/hle/service/nvdrv/devices/ioctl_serialization.h b/src/core/hle/service/nvdrv/devices/ioctl_serialization.h
new file mode 100644
index 000000000..b12bcd138
--- /dev/null
+++ b/src/core/hle/service/nvdrv/devices/ioctl_serialization.h
@@ -0,0 +1,159 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <span>
+#include <vector>
+
+#include "common/concepts.h"
+#include "core/hle/service/nvdrv/devices/nvdevice.h"
+
+namespace Service::Nvidia::Devices {
+
+struct IoctlOneArgTraits {
+ template <typename T, typename R, typename A, typename... B>
+ static A GetFirstArgImpl(R (T::*)(A, B...));
+};
+
+struct IoctlTwoArgTraits {
+ template <typename T, typename R, typename A, typename B, typename... C>
+ static A GetFirstArgImpl(R (T::*)(A, B, C...));
+
+ template <typename T, typename R, typename A, typename B, typename... C>
+ static B GetSecondArgImpl(R (T::*)(A, B, C...));
+};
+
+struct Null {};
+
+// clang-format off
+
+template <typename FixedArg, typename VarArg, typename InlInVarArg, typename InlOutVarArg, typename F>
+NvResult WrapGeneric(F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, std::span<u8> inline_output) {
+ constexpr bool HasFixedArg = !std::is_same_v<FixedArg, Null>;
+ constexpr bool HasVarArg = !std::is_same_v<VarArg, Null>;
+ constexpr bool HasInlInVarArg = !std::is_same_v<InlInVarArg, Null>;
+ constexpr bool HasInlOutVarArg = !std::is_same_v<InlOutVarArg, Null>;
+
+ // Declare the fixed-size input value.
+ FixedArg fixed{};
+ size_t var_offset = 0;
+
+ if constexpr (HasFixedArg) {
+ // Read the fixed-size input value.
+ var_offset = std::min(sizeof(FixedArg), input.size());
+ if (var_offset > 0) {
+ std::memcpy(&fixed, input.data(), var_offset);
+ }
+ }
+
+ // Read the variable-sized inputs.
+ const size_t num_var_args = HasVarArg ? ((input.size() - var_offset) / sizeof(VarArg)) : 0;
+ std::vector<VarArg> var_args(num_var_args);
+ if constexpr (HasVarArg) {
+ if (num_var_args > 0) {
+ std::memcpy(var_args.data(), input.data() + var_offset, num_var_args * sizeof(VarArg));
+ }
+ }
+
+ const size_t num_inl_in_var_args = HasInlInVarArg ? (inline_input.size() / sizeof(InlInVarArg)) : 0;
+ std::vector<InlInVarArg> inl_in_var_args(num_inl_in_var_args);
+ if constexpr (HasInlInVarArg) {
+ if (num_inl_in_var_args > 0) {
+ std::memcpy(inl_in_var_args.data(), inline_input.data(), num_inl_in_var_args * sizeof(InlInVarArg));
+ }
+ }
+
+ // Construct inline output data.
+ const size_t num_inl_out_var_args = HasInlOutVarArg ? (inline_output.size() / sizeof(InlOutVarArg)) : 0;
+ std::vector<InlOutVarArg> inl_out_var_args(num_inl_out_var_args);
+
+ // Perform the call.
+ NvResult result = callable(fixed, var_args, inl_in_var_args, inl_out_var_args);
+
+ // Copy outputs.
+ if constexpr (HasFixedArg) {
+ if (output.size() > 0) {
+ std::memcpy(output.data(), &fixed, std::min(output.size(), sizeof(FixedArg)));
+ }
+ }
+
+ if constexpr (HasVarArg) {
+ if (num_var_args > 0 && output.size() > var_offset) {
+ const size_t max_var_size = output.size() - var_offset;
+ std::memcpy(output.data() + var_offset, var_args.data(), std::min(max_var_size, num_var_args * sizeof(VarArg)));
+ }
+ }
+
+ // Copy inline outputs.
+ if constexpr (HasInlOutVarArg) {
+ if (num_inl_out_var_args > 0) {
+ std::memcpy(inline_output.data(), inl_out_var_args.data(), num_inl_out_var_args * sizeof(InlOutVarArg));
+ }
+ }
+
+ // We're done.
+ return result;
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixed(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, Null, Null, Null>(std::move(Callable), input, {}, output, {});
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixedInlOut(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, std::span<u8> inline_output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
+ using InlOutVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, inl_out, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, Null, Null, InlOutVarArg>(std::move(Callable), input, {}, output, inline_output);
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
+ using VarArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(var, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<Null, VarArg, Null, Null>(std::move(Callable), input, {}, output, {});
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixedVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
+ using VarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, var, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, VarArg, Null, Null>(std::move(Callable), input, {}, output, {});
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixedInlIn(Self* self, F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
+ using InlInVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, inl_in, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, Null, InlInVarArg, Null>(std::move(Callable), input, inline_input, output, {});
+}
+
+// clang-format on
+
+} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 7d7bb8687..6b3639008 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -11,6 +11,7 @@
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
#include "core/hle/service/nvdrv/nvdrv.h"
@@ -33,21 +34,21 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> i
case 'A':
switch (command.cmd) {
case 0x1:
- return BindChannel(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::BindChannel, input, output);
case 0x2:
- return AllocateSpace(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::AllocateSpace, input, output);
case 0x3:
- return FreeSpace(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output);
case 0x5:
- return UnmapBuffer(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output);
case 0x6:
- return MapBufferEx(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output);
case 0x8:
- return GetVARegions(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output);
case 0x9:
- return AllocAsEx(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output);
case 0x14:
- return Remap(input, output);
+ return WrapVariable(this, &nvhost_as_gpu::Remap, input, output);
default:
break;
}
@@ -72,7 +73,8 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
case 'A':
switch (command.cmd) {
case 0x8:
- return GetVARegions(input, output, inline_output);
+ return WrapFixedInlOut(this, &nvhost_as_gpu::GetVARegions3, input, output,
+ inline_output);
default:
break;
}
@@ -87,10 +89,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocAsEx params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
std::scoped_lock lock(mutex);
@@ -141,10 +140,7 @@ NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> outpu
return NvResult::Success;
}
-NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocSpace params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
params.page_size, params.flags);
@@ -194,7 +190,6 @@ NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> o
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
};
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
@@ -222,10 +217,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
mapping_map.erase(offset);
}
-NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> output) {
- IoctlFreeSpace params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
params.pages, params.page_size);
@@ -264,18 +256,11 @@ NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> outpu
return NvResult::BadValue;
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
- const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
-
- LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
-
- std::scoped_lock lock(mutex);
- entries.resize_destructive(num_entries);
- std::memcpy(entries.data(), input.data(), input.size());
+NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
+ LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size());
if (!vm.initialised) {
return NvResult::BadValue;
@@ -317,14 +302,10 @@ NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
}
}
- std::memcpy(output.data(), entries.data(), output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> output) {
- IoctlMapBufferEx params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
LOG_DEBUG(Service_NVDRV,
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
", offset={}",
@@ -421,14 +402,10 @@ NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> out
mapping_map[params.offset] = mapping;
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
- IoctlUnmapBuffer params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
std::scoped_lock lock(mutex);
@@ -464,9 +441,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> out
return NvResult::Success;
}
-NvResult nvhost_as_gpu::BindChannel(std::span<const u8> input, std::span<u8> output) {
- IoctlBindChannel params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_as_gpu::BindChannel(IoctlBindChannel& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
@@ -493,10 +468,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
};
}
-NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output) {
- IoctlGetVaRegions params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::GetVARegions1(IoctlGetVaRegions& params) {
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
params.buf_size);
@@ -508,15 +480,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
GetVARegionsImpl(params);
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output) {
- IoctlGetVaRegions params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions) {
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
params.buf_size);
@@ -528,9 +495,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
GetVARegionsImpl(params);
- std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.regions[0], sizeof(VaRegion));
- std::memcpy(inline_output.data() + sizeof(VaRegion), &params.regions[1], sizeof(VaRegion));
+ const size_t num_regions = std::min(params.regions.size(), regions.size());
+ for (size_t i = 0; i < num_regions; i++) {
+ regions[i] = params.regions[i];
+ }
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 2af3e1260..932997e75 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -139,18 +139,17 @@ private:
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
"IoctlGetVaRegions is incorrect size");
- NvResult AllocAsEx(std::span<const u8> input, std::span<u8> output);
- NvResult AllocateSpace(std::span<const u8> input, std::span<u8> output);
- NvResult Remap(std::span<const u8> input, std::span<u8> output);
- NvResult MapBufferEx(std::span<const u8> input, std::span<u8> output);
- NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
- NvResult FreeSpace(std::span<const u8> input, std::span<u8> output);
- NvResult BindChannel(std::span<const u8> input, std::span<u8> output);
+ NvResult AllocAsEx(IoctlAllocAsEx& params);
+ NvResult AllocateSpace(IoctlAllocSpace& params);
+ NvResult Remap(std::span<IoctlRemapEntry> params);
+ NvResult MapBufferEx(IoctlMapBufferEx& params);
+ NvResult UnmapBuffer(IoctlUnmapBuffer& params);
+ NvResult FreeSpace(IoctlFreeSpace& params);
+ NvResult BindChannel(IoctlBindChannel& params);
void GetVARegionsImpl(IoctlGetVaRegions& params);
- NvResult GetVARegions(std::span<const u8> input, std::span<u8> output);
- NvResult GetVARegions(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output);
+ NvResult GetVARegions1(IoctlGetVaRegions& params);
+ NvResult GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions);
void FreeMappingLocked(u64 offset);
@@ -213,7 +212,6 @@ private:
bool initialised{};
} vm;
std::shared_ptr<Tegra::MemoryManager> gmmu;
- Common::ScratchBuffer<IoctlRemapEntry> entries;
// s32 channel{};
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 4d55554b4..b8dd34e24 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -14,6 +14,7 @@
#include "core/hle/kernel/k_event.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
#include "video_core/gpu.h"
#include "video_core/host1x/host1x.h"
@@ -40,19 +41,19 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inp
case 0x0:
switch (command.cmd) {
case 0x1b:
- return NvOsGetConfigU32(input, output);
+ return WrapFixed(this, &nvhost_ctrl::NvOsGetConfigU32, input, output);
case 0x1c:
- return IocCtrlClearEventWait(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlClearEventWait, input, output);
case 0x1d:
- return IocCtrlEventWait(input, output, true);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, true);
case 0x1e:
- return IocCtrlEventWait(input, output, false);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, false);
case 0x1f:
- return IocCtrlEventRegister(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventRegister, input, output);
case 0x20:
- return IocCtrlEventUnregister(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregister, input, output);
case 0x21:
- return IocCtrlEventUnregisterBatch(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregisterBatch, input, output);
}
break;
default:
@@ -79,25 +80,19 @@ void nvhost_ctrl::OnOpen(DeviceFD fd) {}
void nvhost_ctrl::OnClose(DeviceFD fd) {}
-NvResult nvhost_ctrl::NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output) {
- IocGetConfigParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::NvOsGetConfigU32(IocGetConfigParams& params) {
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
params.param_str.data());
return NvResult::ConfigVarNotFound; // Returns error on production mode
}
-NvResult nvhost_ctrl::IocCtrlEventWait(std::span<const u8> input, std::span<u8> output,
- bool is_allocation) {
- IocCtrlEventWaitParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation) {
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
params.fence.id, params.fence.value, params.timeout, is_allocation);
bool must_unmark_fail = !is_allocation;
const u32 event_id = params.value.raw;
SCOPE_EXIT({
- std::memcpy(output.data(), &params, sizeof(params));
if (must_unmark_fail) {
events[event_id].fails = 0;
}
@@ -231,9 +226,7 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) {
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventRegisterParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventRegister(IocCtrlEventRegisterParams& params) {
const u32 event_id = params.user_event_id;
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
if (event_id >= MaxNvEvents) {
@@ -252,9 +245,7 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventUnregisterParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params) {
const u32 event_id = params.user_event_id & 0x00FF;
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
@@ -262,9 +253,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::spa
return FreeEvent(event_id);
}
-NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventUnregisterBatchParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params) {
u64 event_mask = params.user_events;
LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
@@ -280,10 +269,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventClearParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvhost_ctrl::IocCtrlClearEventWait(IocCtrlEventClearParams& params) {
u32 event_id = params.event_id.slot;
LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 2efed4862..992124b60 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -186,12 +186,12 @@ private:
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
"IocCtrlEventKill is incorrect size");
- NvResult NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlEventWait(std::span<const u8> input, std::span<u8> output, bool is_allocation);
- NvResult IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output);
+ NvResult NvOsGetConfigU32(IocGetConfigParams& params);
+ NvResult IocCtrlEventRegister(IocCtrlEventRegisterParams& params);
+ NvResult IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params);
+ NvResult IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params);
+ NvResult IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation);
+ NvResult IocCtrlClearEventWait(IocCtrlEventClearParams& params);
NvResult FreeEvent(u32 slot);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 6081d92e9..61a2df121 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -6,6 +6,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
#include "core/hle/service/nvdrv/nvdrv.h"
@@ -27,23 +28,23 @@ NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8>
case 'G':
switch (command.cmd) {
case 0x1:
- return ZCullGetCtxSize(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetCtxSize, input, output);
case 0x2:
- return ZCullGetInfo(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetInfo, input, output);
case 0x3:
- return ZBCSetTable(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZBCSetTable, input, output);
case 0x4:
- return ZBCQueryTable(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZBCQueryTable, input, output);
case 0x5:
- return GetCharacteristics(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetCharacteristics1, input, output);
case 0x6:
- return GetTPCMasks(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetTPCMasks1, input, output);
case 0x7:
- return FlushL2(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::FlushL2, input, output);
case 0x14:
- return GetActiveSlotMask(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetActiveSlotMask, input, output);
case 0x1c:
- return GetGpuTime(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetGpuTime, input, output);
default:
break;
}
@@ -65,9 +66,11 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
case 'G':
switch (command.cmd) {
case 0x5:
- return GetCharacteristics(input, output, inline_output);
+ return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetCharacteristics3, input, output,
+ inline_output);
case 0x6:
- return GetTPCMasks(input, output, inline_output);
+ return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetTPCMasks3, input, output,
+ inline_output);
default:
break;
}
@@ -82,10 +85,8 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
LOG_DEBUG(Service_NVDRV, "called");
- IoctlCharacteristics params{};
- std::memcpy(&params, input.data(), input.size());
params.gc.arch = 0x120;
params.gc.impl = 0xb;
params.gc.rev = 0xa1;
@@ -123,15 +124,13 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa
params.gc.gr_compbit_store_base_hw = 0x0;
params.gpu_characteristics_buf_size = 0xA0;
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output) {
+NvResult nvhost_ctrl_gpu::GetCharacteristics3(
+ IoctlCharacteristics& params, std::span<IoctlGpuCharacteristics> gpu_characteristics) {
LOG_DEBUG(Service_NVDRV, "called");
- IoctlCharacteristics params{};
- std::memcpy(&params, input.data(), input.size());
+
params.gc.arch = 0x120;
params.gc.impl = 0xb;
params.gc.rev = 0xa1;
@@ -169,70 +168,47 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa
params.gc.gr_compbit_store_base_hw = 0x0;
params.gpu_characteristics_buf_size = 0xA0;
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
-
- std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.gc, inline_output.size());
+ if (!gpu_characteristics.empty()) {
+ gpu_characteristics.front() = params.gc;
+ }
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output) {
- IoctlGpuGetTpcMasksArgs params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_ctrl_gpu::GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params) {
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
if (params.mask_buffer_size != 0) {
params.tcp_mask = 3;
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output) {
- IoctlGpuGetTpcMasksArgs params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_ctrl_gpu::GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask) {
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
if (params.mask_buffer_size != 0) {
params.tcp_mask = 3;
}
- std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.tcp_mask, inline_output.size());
+ if (!tpc_mask.empty()) {
+ tpc_mask.front() = params.tcp_mask;
+ }
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetActiveSlotMask(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::GetActiveSlotMask(IoctlActiveSlotMask& params) {
LOG_DEBUG(Service_NVDRV, "called");
- IoctlActiveSlotMask params{};
- if (input.size() > 0) {
- std::memcpy(&params, input.data(), input.size());
- }
params.slot = 0x07;
params.mask = 0x01;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(IoctlZcullGetCtxSize& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlZcullGetCtxSize params{};
- if (input.size() > 0) {
- std::memcpy(&params, input.data(), input.size());
- }
params.size = 0x1;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlNvgpuGpuZcullGetInfoArgs params{};
-
- if (input.size() > 0) {
- std::memcpy(&params, input.data(), input.size());
- }
-
params.width_align_pixels = 0x20;
params.height_align_pixels = 0x20;
params.pixel_squares_by_aliquots = 0x400;
@@ -243,53 +219,28 @@ NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8>
params.subregion_width_align_pixels = 0x20;
params.subregion_height_align_pixels = 0x40;
params.subregion_count = 0x10;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZBCSetTable(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZBCSetTable(IoctlZbcSetTable& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
-
- IoctlZbcSetTable params{};
- std::memcpy(&params, input.data(), input.size());
// TODO(ogniK): What does this even actually do?
-
- // Prevent null pointer being passed as arg 1
- if (output.empty()) {
- LOG_WARNING(Service_NVDRV, "Avoiding passing null pointer to memcpy");
- } else {
- std::memcpy(output.data(), &params, output.size());
- }
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZBCQueryTable(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZBCQueryTable(IoctlZbcQueryTable& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
-
- IoctlZbcQueryTable params{};
- std::memcpy(&params, input.data(), input.size());
- // TODO : To implement properly
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::FlushL2(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::FlushL2(IoctlFlushL2& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
-
- IoctlFlushL2 params{};
- std::memcpy(&params, input.data(), input.size());
- // TODO : To implement properly
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetGpuTime(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::GetGpuTime(IoctlGetGpuTime& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlGetGpuTime params{};
- std::memcpy(&params, input.data(), input.size());
params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 97995551c..d170299bd 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -151,21 +151,20 @@ private:
};
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
- NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output);
- NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output);
-
- NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output);
- NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output);
-
- NvResult GetActiveSlotMask(std::span<const u8> input, std::span<u8> output);
- NvResult ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output);
- NvResult ZCullGetInfo(std::span<const u8> input, std::span<u8> output);
- NvResult ZBCSetTable(std::span<const u8> input, std::span<u8> output);
- NvResult ZBCQueryTable(std::span<const u8> input, std::span<u8> output);
- NvResult FlushL2(std::span<const u8> input, std::span<u8> output);
- NvResult GetGpuTime(std::span<const u8> input, std::span<u8> output);
+ NvResult GetCharacteristics1(IoctlCharacteristics& params);
+ NvResult GetCharacteristics3(IoctlCharacteristics& params,
+ std::span<IoctlGpuCharacteristics> gpu_characteristics);
+
+ NvResult GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params);
+ NvResult GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask);
+
+ NvResult GetActiveSlotMask(IoctlActiveSlotMask& params);
+ NvResult ZCullGetCtxSize(IoctlZcullGetCtxSize& params);
+ NvResult ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params);
+ NvResult ZBCSetTable(IoctlZbcSetTable& params);
+ NvResult ZBCQueryTable(IoctlZbcQueryTable& params);
+ NvResult FlushL2(IoctlFlushL2& params);
+ NvResult GetGpuTime(IoctlGetGpuTime& params);
EventInterface& events_interface;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index 46a25fcab..b0395c2f0 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -8,6 +8,7 @@
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/memory.h"
@@ -52,7 +53,7 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 0x0:
switch (command.cmd) {
case 0x3:
- return GetWaitbase(input, output);
+ return WrapFixed(this, &nvhost_gpu::GetWaitbase, input, output);
default:
break;
}
@@ -60,25 +61,25 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetNVMAPfd, input, output);
case 0x3:
- return ChannelSetTimeout(input, output);
+ return WrapFixed(this, &nvhost_gpu::ChannelSetTimeout, input, output);
case 0x8:
- return SubmitGPFIFOBase(input, output, false);
+ return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, false);
case 0x9:
- return AllocateObjectContext(input, output);
+ return WrapFixed(this, &nvhost_gpu::AllocateObjectContext, input, output);
case 0xb:
- return ZCullBind(input, output);
+ return WrapFixed(this, &nvhost_gpu::ZCullBind, input, output);
case 0xc:
- return SetErrorNotifier(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetErrorNotifier, input, output);
case 0xd:
- return SetChannelPriority(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetChannelPriority, input, output);
case 0x1a:
- return AllocGPFIFOEx2(input, output);
+ return WrapFixed(this, &nvhost_gpu::AllocGPFIFOEx2, input, output);
case 0x1b:
- return SubmitGPFIFOBase(input, output, true);
+ return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, true);
case 0x1d:
- return ChannelSetTimeslice(input, output);
+ return WrapFixed(this, &nvhost_gpu::ChannelSetTimeslice, input, output);
default:
break;
}
@@ -86,9 +87,9 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'G':
switch (command.cmd) {
case 0x14:
- return SetClientData(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetClientData, input, output);
case 0x15:
- return GetClientData(input, output);
+ return WrapFixed(this, &nvhost_gpu::GetClientData, input, output);
default:
break;
}
@@ -104,7 +105,8 @@ NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'H':
switch (command.cmd) {
case 0x1b:
- return SubmitGPFIFOBase(input, inline_input, output);
+ return WrapFixedInlIn(this, &nvhost_gpu::SubmitGPFIFOBase2, input, inline_input,
+ output);
}
break;
}
@@ -121,63 +123,45 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
void nvhost_gpu::OnOpen(DeviceFD fd) {}
void nvhost_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_gpu::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) {
- IoctlSetNvmapFD params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
return NvResult::Success;
}
-NvResult nvhost_gpu::SetClientData(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_gpu::SetClientData(IoctlClientData& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlClientData params{};
- std::memcpy(&params, input.data(), input.size());
user_data = params.data;
return NvResult::Success;
}
-NvResult nvhost_gpu::GetClientData(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_gpu::GetClientData(IoctlClientData& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlClientData params{};
- std::memcpy(&params, input.data(), input.size());
params.data = user_data;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::ZCullBind(std::span<const u8> input, std::span<u8> output) {
- std::memcpy(&zcull_params, input.data(), input.size());
+NvResult nvhost_gpu::ZCullBind(IoctlZCullBind& params) {
+ zcull_params = params;
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
zcull_params.mode);
-
- std::memcpy(output.data(), &zcull_params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::SetErrorNotifier(std::span<const u8> input, std::span<u8> output) {
- IoctlSetErrorNotifier params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::SetErrorNotifier(IoctlSetErrorNotifier& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
params.size, params.mem);
-
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::SetChannelPriority(std::span<const u8> input, std::span<u8> output) {
- std::memcpy(&channel_priority, input.data(), input.size());
+NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) {
+ channel_priority = params.priority;
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
-
return NvResult::Success;
}
-NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocGpfifoEx2 params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params) {
LOG_WARNING(Service_NVDRV,
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
"unk1={:X}, unk2={:X}, unk3={:X}",
@@ -193,18 +177,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> out
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::AllocateObjectContext(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocObjCtx params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
params.flags);
params.obj_id = 0x0;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
@@ -248,8 +228,7 @@ static boost::container::small_vector<Tegra::CommandHeader, 512> BuildIncrementW
return result;
}
-NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output,
- Tegra::CommandList&& entries) {
+NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries) {
LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address,
params.num_entries, params.flags.raw);
@@ -290,65 +269,55 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> o
flags.raw = 0;
- std::memcpy(output.data(), &params, sizeof(IoctlSubmitGpfifo));
return NvResult::Success;
}
-NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output,
- bool kickoff) {
- if (input.size() < sizeof(IoctlSubmitGpfifo)) {
+NvResult nvhost_gpu::SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
+ std::span<Tegra::CommandListHeader> commands, bool kickoff) {
+ if (params.num_entries > commands.size()) {
UNIMPLEMENTED();
return NvResult::InvalidSize;
}
- IoctlSubmitGpfifo params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSubmitGpfifo));
- Tegra::CommandList entries(params.num_entries);
+ Tegra::CommandList entries(params.num_entries);
if (kickoff) {
system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(),
params.num_entries * sizeof(Tegra::CommandListHeader));
} else {
- std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)],
+ std::memcpy(entries.command_lists.data(), commands.data(),
params.num_entries * sizeof(Tegra::CommandListHeader));
}
- return SubmitGPFIFOImpl(params, output, std::move(entries));
+ return SubmitGPFIFOImpl(params, std::move(entries));
}
-NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
- std::span<u8> output) {
- if (input.size() < sizeof(IoctlSubmitGpfifo)) {
+NvResult nvhost_gpu::SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
+ std::span<const Tegra::CommandListHeader> commands) {
+ if (params.num_entries > commands.size()) {
UNIMPLEMENTED();
return NvResult::InvalidSize;
}
- IoctlSubmitGpfifo params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSubmitGpfifo));
+
Tegra::CommandList entries(params.num_entries);
- std::memcpy(entries.command_lists.data(), input_inline.data(), input_inline.size());
- return SubmitGPFIFOImpl(params, output, std::move(entries));
+ std::memcpy(entries.command_lists.data(), commands.data(),
+ params.num_entries * sizeof(Tegra::CommandListHeader));
+ return SubmitGPFIFOImpl(params, std::move(entries));
}
-NvResult nvhost_gpu::GetWaitbase(std::span<const u8> input, std::span<u8> output) {
- IoctlGetWaitbase params{};
- std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
+NvResult nvhost_gpu::GetWaitbase(IoctlGetWaitbase& params) {
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
params.value = 0; // Seems to be hard coded at 0
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::ChannelSetTimeout(std::span<const u8> input, std::span<u8> output) {
- IoctlChannelSetTimeout params{};
- std::memcpy(&params, input.data(), sizeof(IoctlChannelSetTimeout));
+NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) {
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
return NvResult::Success;
}
-NvResult nvhost_gpu::ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output) {
- IoctlSetTimeslice params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSetTimeslice));
+NvResult nvhost_gpu::ChannelSetTimeslice(IoctlSetTimeslice& params) {
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
channel_timeslice = params.timeslice;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 529c20526..88fd228ff 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -186,23 +186,24 @@ private:
u32_le channel_priority{};
u32_le channel_timeslice{};
- NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output);
- NvResult SetClientData(std::span<const u8> input, std::span<u8> output);
- NvResult GetClientData(std::span<const u8> input, std::span<u8> output);
- NvResult ZCullBind(std::span<const u8> input, std::span<u8> output);
- NvResult SetErrorNotifier(std::span<const u8> input, std::span<u8> output);
- NvResult SetChannelPriority(std::span<const u8> input, std::span<u8> output);
- NvResult AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output);
- NvResult AllocateObjectContext(std::span<const u8> input, std::span<u8> output);
- NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output,
- Tegra::CommandList&& entries);
- NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output,
- bool kickoff = false);
- NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
- std::span<u8> output);
- NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output);
- NvResult ChannelSetTimeout(std::span<const u8> input, std::span<u8> output);
- NvResult ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output);
+ NvResult SetNVMAPfd(IoctlSetNvmapFD& params);
+ NvResult SetClientData(IoctlClientData& params);
+ NvResult GetClientData(IoctlClientData& params);
+ NvResult ZCullBind(IoctlZCullBind& params);
+ NvResult SetErrorNotifier(IoctlSetErrorNotifier& params);
+ NvResult SetChannelPriority(IoctlChannelSetPriority& params);
+ NvResult AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params);
+ NvResult AllocateObjectContext(IoctlAllocObjCtx& params);
+
+ NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries);
+ NvResult SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
+ std::span<Tegra::CommandListHeader> commands, bool kickoff = false);
+ NvResult SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
+ std::span<const Tegra::CommandListHeader> commands);
+
+ NvResult GetWaitbase(IoctlGetWaitbase& params);
+ NvResult ChannelSetTimeout(IoctlChannelSetTimeout& params);
+ NvResult ChannelSetTimeslice(IoctlSetTimeslice& params);
EventInterface& events_interface;
NvCore::Container& core;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index a174442a6..f43914e1b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -6,6 +6,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
#include "video_core/renderer_base.h"
@@ -25,18 +26,18 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
}
- return Submit(fd, input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::Submit, input, output, fd);
}
case 0x2:
- return GetSyncpoint(input, output);
+ return WrapFixed(this, &nvhost_nvdec::GetSyncpoint, input, output);
case 0x3:
- return GetWaitbase(input, output);
+ return WrapFixed(this, &nvhost_nvdec::GetWaitbase, input, output);
case 0x7:
- return SetSubmitTimeout(input, output);
+ return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
case 0x9:
- return MapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
case 0xa:
- return UnmapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
default:
break;
}
@@ -44,7 +45,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input);
+ return WrapFixed(this, &nvhost_nvdec::SetNVMAPfd, input, output);
default:
break;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 61649aa4a..74c701b95 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -29,6 +29,9 @@ std::size_t SliceVectors(std::span<const u8> input, std::vector<T>& dst, std::si
return 0;
}
const size_t bytes_copied = count * sizeof(T);
+ if (input.size() < offset + bytes_copied) {
+ return 0;
+ }
std::memcpy(dst.data(), input.data() + offset, bytes_copied);
return bytes_copied;
}
@@ -41,6 +44,9 @@ std::size_t WriteVectors(std::span<u8> dst, const std::vector<T>& src, std::size
return 0;
}
const size_t bytes_copied = src.size() * sizeof(T);
+ if (dst.size() < offset + bytes_copied) {
+ return 0;
+ }
std::memcpy(dst.data() + offset, src.data(), bytes_copied);
return bytes_copied;
}
@@ -63,18 +69,14 @@ nvhost_nvdec_common::~nvhost_nvdec_common() {
core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
}
-NvResult nvhost_nvdec_common::SetNVMAPfd(std::span<const u8> input) {
- IoctlSetNvmapFD params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSetNvmapFD));
+NvResult nvhost_nvdec_common::SetNVMAPfd(IoctlSetNvmapFD& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output) {
- IoctlSubmit params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSubmit));
+NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
// Instantiate param buffers
@@ -85,12 +87,12 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std
std::vector<u32> fence_thresholds(params.fence_count);
// Slice input into their respective buffers
- std::size_t offset = sizeof(IoctlSubmit);
- offset += SliceVectors(input, command_buffers, params.cmd_buffer_count, offset);
- offset += SliceVectors(input, relocs, params.relocation_count, offset);
- offset += SliceVectors(input, reloc_shifts, params.relocation_count, offset);
- offset += SliceVectors(input, syncpt_increments, params.syncpoint_count, offset);
- offset += SliceVectors(input, fence_thresholds, params.fence_count, offset);
+ std::size_t offset = 0;
+ offset += SliceVectors(data, command_buffers, params.cmd_buffer_count, offset);
+ offset += SliceVectors(data, relocs, params.relocation_count, offset);
+ offset += SliceVectors(data, reloc_shifts, params.relocation_count, offset);
+ offset += SliceVectors(data, syncpt_increments, params.syncpoint_count, offset);
+ offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
auto& gpu = system.GPU();
if (gpu.UseNvdec()) {
@@ -108,72 +110,51 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std
cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
- std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
// Some games expect command_buffers to be written back
- offset = sizeof(IoctlSubmit);
- offset += WriteVectors(output, command_buffers, offset);
- offset += WriteVectors(output, relocs, offset);
- offset += WriteVectors(output, reloc_shifts, offset);
- offset += WriteVectors(output, syncpt_increments, offset);
- offset += WriteVectors(output, fence_thresholds, offset);
+ offset = 0;
+ offset += WriteVectors(data, command_buffers, offset);
+ offset += WriteVectors(data, relocs, offset);
+ offset += WriteVectors(data, reloc_shifts, offset);
+ offset += WriteVectors(data, syncpt_increments, offset);
+ offset += WriteVectors(data, fence_thresholds, offset);
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::GetSyncpoint(std::span<const u8> input, std::span<u8> output) {
- IoctlGetSyncpoint params{};
- std::memcpy(&params, input.data(), sizeof(IoctlGetSyncpoint));
+NvResult nvhost_nvdec_common::GetSyncpoint(IoctlGetSyncpoint& params) {
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
-
- // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
params.value = channel_syncpoint;
- std::memcpy(output.data(), &params, sizeof(IoctlGetSyncpoint));
-
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::GetWaitbase(std::span<const u8> input, std::span<u8> output) {
- IoctlGetWaitbase params{};
+NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
- std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
params.value = 0; // Seems to be hard coded at 0
- std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase));
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::MapBuffer(std::span<const u8> input, std::span<u8> output) {
- IoctlMapBuffer params{};
- std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
- std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
-
- SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
-
- for (auto& cmd_buffer : cmd_buffer_handles) {
- cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
+NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
+ const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
+ for (size_t i = 0; i < num_entries; i++) {
+ entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
}
- std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer));
- std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
- cmd_buffer_handles.size() * sizeof(MapBufferEntry));
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
- IoctlMapBuffer params{};
- std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
- std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
-
- SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
- for (auto& cmd_buffer : cmd_buffer_handles) {
- nvmap.UnpinHandle(cmd_buffer.map_handle);
+NvResult nvhost_nvdec_common::UnmapBuffer(IoctlMapBuffer& params,
+ std::span<MapBufferEntry> entries) {
+ const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
+ for (size_t i = 0; i < num_entries; i++) {
+ nvmap.UnpinHandle(entries[i].map_handle);
+ entries[i] = {};
}
- std::memset(output.data(), 0, output.size());
+ params = {};
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::SetSubmitTimeout(std::span<const u8> input, std::span<u8> output) {
- std::memcpy(&submit_timeout, input.data(), input.size());
+NvResult nvhost_nvdec_common::SetSubmitTimeout(u32 timeout) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index 9bb573bfe..7ce748e18 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -107,13 +107,13 @@ protected:
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
/// Ioctl command implementations
- NvResult SetNVMAPfd(std::span<const u8> input);
- NvResult Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output);
- NvResult GetSyncpoint(std::span<const u8> input, std::span<u8> output);
- NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output);
- NvResult MapBuffer(std::span<const u8> input, std::span<u8> output);
- NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
- NvResult SetSubmitTimeout(std::span<const u8> input, std::span<u8> output);
+ NvResult SetNVMAPfd(IoctlSetNvmapFD&);
+ NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
+ NvResult GetSyncpoint(IoctlGetSyncpoint& params);
+ NvResult GetWaitbase(IoctlGetWaitbase& params);
+ NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
+ NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
+ NvResult SetSubmitTimeout(u32 timeout);
Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
index a05c8cdae..9e6b86458 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
@@ -5,6 +5,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
namespace Service::Nvidia::Devices {
@@ -18,7 +19,7 @@ NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input, output);
+ return WrapFixed(this, &nvhost_nvjpg::SetNVMAPfd, input, output);
default:
break;
}
@@ -46,9 +47,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
-NvResult nvhost_nvjpg::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) {
- IoctlSetNvmapFD params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
index 5623e0d47..790c97f6a 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
@@ -33,7 +33,7 @@ private:
s32_le nvmap_fd{};
- NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output);
+ NvResult SetNVMAPfd(IoctlSetNvmapFD& params);
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index c0b8684c3..87f8d7c22 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -5,6 +5,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
#include "video_core/renderer_base.h"
@@ -25,16 +26,16 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
}
- return Submit(fd, input, output);
+ return WrapFixedVariable(this, &nvhost_vic::Submit, input, output, fd);
}
case 0x2:
- return GetSyncpoint(input, output);
+ return WrapFixed(this, &nvhost_vic::GetSyncpoint, input, output);
case 0x3:
- return GetWaitbase(input, output);
+ return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
case 0x9:
- return MapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
case 0xa:
- return UnmapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
default:
break;
}
@@ -42,7 +43,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input);
+ return WrapFixed(this, &nvhost_vic::SetNVMAPfd, input, output);
default:
break;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 968eaa175..71b2e62ec 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -13,6 +13,7 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvmap.h"
#include "core/memory.h"
@@ -31,17 +32,17 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
case 0x1:
switch (command.cmd) {
case 0x1:
- return IocCreate(input, output);
+ return WrapFixed(this, &nvmap::IocCreate, input, output);
case 0x3:
- return IocFromId(input, output);
+ return WrapFixed(this, &nvmap::IocFromId, input, output);
case 0x4:
- return IocAlloc(input, output);
+ return WrapFixed(this, &nvmap::IocAlloc, input, output);
case 0x5:
- return IocFree(input, output);
+ return WrapFixed(this, &nvmap::IocFree, input, output);
case 0x9:
- return IocParam(input, output);
+ return WrapFixed(this, &nvmap::IocParam, input, output);
case 0xe:
- return IocGetId(input, output);
+ return WrapFixed(this, &nvmap::IocGetId, input, output);
default:
break;
}
@@ -69,9 +70,7 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
void nvmap::OnOpen(DeviceFD fd) {}
void nvmap::OnClose(DeviceFD fd) {}
-NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) {
- IocCreateParams params;
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvmap::IocCreate(IocCreateParams& params) {
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
@@ -85,13 +84,10 @@ NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) {
params.handle = handle_description->id;
LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) {
- IocAllocParams params;
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvmap::IocAlloc(IocAllocParams& params) {
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
if (!params.handle) {
@@ -133,14 +129,10 @@ NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) {
handle_description->size,
Kernel::KMemoryPermission::None, true, false)
.IsSuccess());
- std::memcpy(output.data(), &params, sizeof(params));
return result;
}
-NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) {
- IocGetIdParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvmap::IocGetId(IocGetIdParams& params) {
LOG_DEBUG(Service_NVDRV, "called");
// See the comment in FromId for extra info on this function
@@ -157,14 +149,10 @@ NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) {
}
params.id = handle_description->id;
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) {
- IocFromIdParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvmap::IocFromId(IocFromIdParams& params) {
LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the
@@ -188,16 +176,12 @@ NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) {
return result;
}
params.handle = handle_description->id;
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) {
+NvResult nvmap::IocParam(IocParamParams& params) {
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
- IocParamParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
if (!params.handle) {
@@ -237,14 +221,10 @@ NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) {
return NvResult::BadValue;
}
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) {
- IocFreeParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvmap::IocFree(IocFreeParams& params) {
LOG_DEBUG(Service_NVDRV, "called");
if (!params.handle) {
@@ -267,7 +247,6 @@ NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) {
// This is possible when there's internal dups or other duplicates.
}
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index 4c0cc71cd..049c11028 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -99,12 +99,12 @@ public:
};
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
- NvResult IocCreate(std::span<const u8> input, std::span<u8> output);
- NvResult IocAlloc(std::span<const u8> input, std::span<u8> output);
- NvResult IocGetId(std::span<const u8> input, std::span<u8> output);
- NvResult IocFromId(std::span<const u8> input, std::span<u8> output);
- NvResult IocParam(std::span<const u8> input, std::span<u8> output);
- NvResult IocFree(std::span<const u8> input, std::span<u8> output);
+ NvResult IocCreate(IocCreateParams& params);
+ NvResult IocAlloc(IocAllocParams& params);
+ NvResult IocGetId(IocGetIdParams& params);
+ NvResult IocFromId(IocFromIdParams& params);
+ NvResult IocParam(IocParamParams& params);
+ NvResult IocFree(IocFreeParams& params);
private:
/// Id to use for the next handle that is created.
diff --git a/src/core/hle/service/nvnflinger/buffer_transform_flags.h b/src/core/hle/service/nvnflinger/buffer_transform_flags.h
index 67aa5dad6..ffe579718 100644
--- a/src/core/hle/service/nvnflinger/buffer_transform_flags.h
+++ b/src/core/hle/service/nvnflinger/buffer_transform_flags.h
@@ -3,6 +3,7 @@
#pragma once
+#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Service::android {
@@ -21,5 +22,6 @@ enum class BufferTransformFlags : u32 {
/// Rotate source image 270 degrees clockwise
Rotate270 = 0x07,
};
+DECLARE_ENUM_FLAG_OPERATORS(BufferTransformFlags);
} // namespace Service::android
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
index 2e29bc848..6dc327b8b 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
@@ -71,24 +71,17 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
R_SUCCEED();
}
-template <typename T>
-std::span<u8> SerializeIoc(T& params) {
- return std::span(reinterpret_cast<u8*>(std::addressof(params)), sizeof(T));
-}
-
Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, u32 size) {
// Create a handle.
- Nvidia::Devices::nvmap::IocCreateParams create_in_params{
+ Nvidia::Devices::nvmap::IocCreateParams create_params{
.size = size,
.handle = 0,
};
- Nvidia::Devices::nvmap::IocCreateParams create_out_params{};
- R_UNLESS(nvmap.IocCreate(SerializeIoc(create_in_params), SerializeIoc(create_out_params)) ==
- Nvidia::NvResult::Success,
+ R_UNLESS(nvmap.IocCreate(create_params) == Nvidia::NvResult::Success,
VI::ResultOperationFailed);
// Assign the output handle.
- *out_nv_map_handle = create_out_params.handle;
+ *out_nv_map_handle = create_params.handle;
// We succeeded.
R_SUCCEED();
@@ -96,13 +89,10 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
// Free the handle.
- Nvidia::Devices::nvmap::IocFreeParams free_in_params{
+ Nvidia::Devices::nvmap::IocFreeParams free_params{
.handle = handle,
};
- Nvidia::Devices::nvmap::IocFreeParams free_out_params{};
- R_UNLESS(nvmap.IocFree(SerializeIoc(free_in_params), SerializeIoc(free_out_params)) ==
- Nvidia::NvResult::Success,
- VI::ResultOperationFailed);
+ R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
// We succeeded.
R_SUCCEED();
@@ -111,7 +101,7 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
u32 size) {
// Assign the allocated memory to the handle.
- Nvidia::Devices::nvmap::IocAllocParams alloc_in_params{
+ Nvidia::Devices::nvmap::IocAllocParams alloc_params{
.handle = handle,
.heap_mask = 0,
.flags = {},
@@ -119,10 +109,7 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
.kind = 0,
.address = GetInteger(buffer),
};
- Nvidia::Devices::nvmap::IocAllocParams alloc_out_params{};
- R_UNLESS(nvmap.IocAlloc(SerializeIoc(alloc_in_params), SerializeIoc(alloc_out_params)) ==
- Nvidia::NvResult::Success,
- VI::ResultOperationFailed);
+ R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
// We succeeded.
R_SUCCEED();
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fa5273402..84b60a928 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -41,7 +41,7 @@ struct Memory::Impl {
explicit Impl(Core::System& system_) : system{system_} {}
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
- current_page_table = &process.GetPageTable().PageTableImpl();
+ current_page_table = &process.GetPageTable().GetImpl();
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
@@ -195,7 +195,7 @@ struct Memory::Impl {
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
auto on_memory, auto on_rasterizer, auto increment) {
- const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl();
+ const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
std::size_t remaining_size = size;
std::size_t page_index = addr >> YUZU_PAGEBITS;
std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
const Kernel::KProcess& process = *system.ApplicationProcess();
- const auto& page_table = process.GetPageTable().PageTableImpl();
+ const auto& page_table = process.GetPageTable().GetImpl();
const size_t page = vaddr >> YUZU_PAGEBITS;
if (page >= page_table.pointers.size()) {
return false;
diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp
index 65cd5aa06..4f1d5b548 100644
--- a/src/video_core/renderer_null/null_rasterizer.cpp
+++ b/src/video_core/renderer_null/null_rasterizer.cpp
@@ -3,6 +3,7 @@
#include "common/alignment.h"
#include "core/memory.h"
+#include "video_core/control/channel_state.h"
#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_null/null_rasterizer.h"
@@ -99,8 +100,14 @@ bool RasterizerNull::AccelerateDisplay(const Tegra::FramebufferConfig& config,
}
void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) {}
-void RasterizerNull::InitializeChannel(Tegra::Control::ChannelState& channel) {}
-void RasterizerNull::BindChannel(Tegra::Control::ChannelState& channel) {}
-void RasterizerNull::ReleaseChannel(s32 channel_id) {}
+void RasterizerNull::InitializeChannel(Tegra::Control::ChannelState& channel) {
+ CreateChannel(channel);
+}
+void RasterizerNull::BindChannel(Tegra::Control::ChannelState& channel) {
+ BindToChannel(channel.bind_id);
+}
+void RasterizerNull::ReleaseChannel(s32 channel_id) {
+ EraseChannel(channel_id);
+}
} // namespace Null
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 52fc142d1..66483a900 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -137,6 +137,56 @@ BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWin
BlitScreen::~BlitScreen() = default;
+static Common::Rectangle<f32> NormalizeCrop(const Tegra::FramebufferConfig& framebuffer,
+ const ScreenInfo& screen_info) {
+ f32 left, top, right, bottom;
+
+ if (!framebuffer.crop_rect.IsEmpty()) {
+ // If crop rectangle is not empty, apply properties from rectangle.
+ left = static_cast<f32>(framebuffer.crop_rect.left);
+ top = static_cast<f32>(framebuffer.crop_rect.top);
+ right = static_cast<f32>(framebuffer.crop_rect.right);
+ bottom = static_cast<f32>(framebuffer.crop_rect.bottom);
+ } else {
+ // Otherwise, fall back to framebuffer dimensions.
+ left = 0;
+ top = 0;
+ right = static_cast<f32>(framebuffer.width);
+ bottom = static_cast<f32>(framebuffer.height);
+ }
+
+ // Apply transformation flags.
+ auto framebuffer_transform_flags = framebuffer.transform_flags;
+
+ if (True(framebuffer_transform_flags & Service::android::BufferTransformFlags::FlipH)) {
+ // Switch left and right.
+ std::swap(left, right);
+ }
+ if (True(framebuffer_transform_flags & Service::android::BufferTransformFlags::FlipV)) {
+ // Switch top and bottom.
+ std::swap(top, bottom);
+ }
+
+ framebuffer_transform_flags &= ~Service::android::BufferTransformFlags::FlipH;
+ framebuffer_transform_flags &= ~Service::android::BufferTransformFlags::FlipV;
+ if (True(framebuffer_transform_flags)) {
+ UNIMPLEMENTED_MSG("Unsupported framebuffer_transform_flags={}",
+ static_cast<u32>(framebuffer_transform_flags));
+ }
+
+ // Get the screen properties.
+ const f32 screen_width = static_cast<f32>(screen_info.width);
+ const f32 screen_height = static_cast<f32>(screen_info.height);
+
+ // Normalize coordinate space.
+ left /= screen_width;
+ top /= screen_height;
+ right /= screen_width;
+ bottom /= screen_height;
+
+ return Common::Rectangle<f32>(left, top, right, bottom);
+}
+
void BlitScreen::Recreate() {
present_manager.WaitPresent();
scheduler.Finish();
@@ -354,17 +404,10 @@ void BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
source_image_view = smaa->Draw(scheduler, image_index, source_image, source_image_view);
}
if (fsr) {
- auto crop_rect = framebuffer.crop_rect;
- if (crop_rect.GetWidth() == 0) {
- crop_rect.right = framebuffer.width;
- }
- if (crop_rect.GetHeight() == 0) {
- crop_rect.bottom = framebuffer.height;
- }
- crop_rect = crop_rect.Scale(Settings::values.resolution_info.up_factor);
- VkExtent2D fsr_input_size{
- .width = Settings::values.resolution_info.ScaleUp(framebuffer.width),
- .height = Settings::values.resolution_info.ScaleUp(framebuffer.height),
+ const auto crop_rect = NormalizeCrop(framebuffer, screen_info);
+ const VkExtent2D fsr_input_size{
+ .width = Settings::values.resolution_info.ScaleUp(screen_info.width),
+ .height = Settings::values.resolution_info.ScaleUp(screen_info.height),
};
VkImageView fsr_image_view =
fsr->Draw(scheduler, image_index, source_image_view, fsr_input_size, crop_rect);
@@ -1397,61 +1440,37 @@ void BlitScreen::SetUniformData(BufferData& data, const Layout::FramebufferLayou
void BlitScreen::SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer,
const Layout::FramebufferLayout layout) const {
- const auto& framebuffer_transform_flags = framebuffer.transform_flags;
- const auto& framebuffer_crop_rect = framebuffer.crop_rect;
-
- static constexpr Common::Rectangle<f32> texcoords{0.f, 0.f, 1.f, 1.f};
- auto left = texcoords.left;
- auto right = texcoords.right;
-
- switch (framebuffer_transform_flags) {
- case Service::android::BufferTransformFlags::Unset:
- break;
- case Service::android::BufferTransformFlags::FlipV:
- // Flip the framebuffer vertically
- left = texcoords.right;
- right = texcoords.left;
- break;
- default:
- UNIMPLEMENTED_MSG("Unsupported framebuffer_transform_flags={}",
- static_cast<u32>(framebuffer_transform_flags));
- break;
- }
+ f32 left, top, right, bottom;
- UNIMPLEMENTED_IF(framebuffer_crop_rect.left != 0);
-
- f32 left_start{};
- if (framebuffer_crop_rect.Top() > 0) {
- left_start = static_cast<f32>(framebuffer_crop_rect.Top()) /
- static_cast<f32>(framebuffer_crop_rect.Bottom());
- }
- f32 scale_u = static_cast<f32>(framebuffer.width) / static_cast<f32>(screen_info.width);
- f32 scale_v = static_cast<f32>(framebuffer.height) / static_cast<f32>(screen_info.height);
- // Scale the output by the crop width/height. This is commonly used with 1280x720 rendering
- // (e.g. handheld mode) on a 1920x1080 framebuffer.
- if (!fsr) {
- if (framebuffer_crop_rect.GetWidth() > 0) {
- scale_u = static_cast<f32>(framebuffer_crop_rect.GetWidth()) /
- static_cast<f32>(screen_info.width);
- }
- if (framebuffer_crop_rect.GetHeight() > 0) {
- scale_v = static_cast<f32>(framebuffer_crop_rect.GetHeight()) /
- static_cast<f32>(screen_info.height);
- }
+ if (fsr) {
+ // FSR has already applied the crop, so we just want to render the image
+ // it has produced.
+ left = 0;
+ top = 0;
+ right = 1;
+ bottom = 1;
+ } else {
+ // Get the normalized crop rectangle.
+ const auto crop = NormalizeCrop(framebuffer, screen_info);
+
+ // Apply the crop.
+ left = crop.left;
+ top = crop.top;
+ right = crop.right;
+ bottom = crop.bottom;
}
+ // Map the coordinates to the screen.
const auto& screen = layout.screen;
const auto x = static_cast<f32>(screen.left);
const auto y = static_cast<f32>(screen.top);
const auto w = static_cast<f32>(screen.GetWidth());
const auto h = static_cast<f32>(screen.GetHeight());
- data.vertices[0] = ScreenRectVertex(x, y, texcoords.top * scale_u, left_start + left * scale_v);
- data.vertices[1] =
- ScreenRectVertex(x + w, y, texcoords.bottom * scale_u, left_start + left * scale_v);
- data.vertices[2] =
- ScreenRectVertex(x, y + h, texcoords.top * scale_u, left_start + right * scale_v);
- data.vertices[3] =
- ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, left_start + right * scale_v);
+
+ data.vertices[0] = ScreenRectVertex(x, y, left, top);
+ data.vertices[1] = ScreenRectVertex(x + w, y, right, top);
+ data.vertices[2] = ScreenRectVertex(x, y + h, left, bottom);
+ data.vertices[3] = ScreenRectVertex(x + w, y + h, right, bottom);
}
void BlitScreen::CreateSMAA(VkExtent2D smaa_size) {
diff --git a/src/video_core/renderer_vulkan/vk_fsr.cpp b/src/video_core/renderer_vulkan/vk_fsr.cpp
index ce8f3f3c2..f7a05fbc0 100644
--- a/src/video_core/renderer_vulkan/vk_fsr.cpp
+++ b/src/video_core/renderer_vulkan/vk_fsr.cpp
@@ -34,7 +34,7 @@ FSR::FSR(const Device& device_, MemoryAllocator& memory_allocator_, size_t image
}
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
- VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect) {
+ VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect) {
UpdateDescriptorSet(image_index, image_view);
@@ -61,15 +61,21 @@ VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView imag
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline);
+ const f32 input_image_width = static_cast<f32>(input_image_extent.width);
+ const f32 input_image_height = static_cast<f32>(input_image_extent.height);
+ const f32 output_image_width = static_cast<f32>(output_size.width);
+ const f32 output_image_height = static_cast<f32>(output_size.height);
+ const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width;
+ const f32 viewport_x = crop_rect.left * input_image_width;
+ const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height;
+ const f32 viewport_y = crop_rect.top * input_image_height;
+
std::array<u32, 4 * 4> push_constants;
- FsrEasuConOffset(
- push_constants.data() + 0, push_constants.data() + 4, push_constants.data() + 8,
- push_constants.data() + 12,
-
- static_cast<f32>(crop_rect.GetWidth()), static_cast<f32>(crop_rect.GetHeight()),
- static_cast<f32>(input_image_extent.width), static_cast<f32>(input_image_extent.height),
- static_cast<f32>(output_size.width), static_cast<f32>(output_size.height),
- static_cast<f32>(crop_rect.left), static_cast<f32>(crop_rect.top));
+ FsrEasuConOffset(push_constants.data() + 0, push_constants.data() + 4,
+ push_constants.data() + 8, push_constants.data() + 12,
+
+ viewport_width, viewport_height, input_image_width, input_image_height,
+ output_image_width, output_image_height, viewport_x, viewport_y);
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
{
diff --git a/src/video_core/renderer_vulkan/vk_fsr.h b/src/video_core/renderer_vulkan/vk_fsr.h
index 8bb9fc23a..3505c1416 100644
--- a/src/video_core/renderer_vulkan/vk_fsr.h
+++ b/src/video_core/renderer_vulkan/vk_fsr.h
@@ -17,7 +17,7 @@ public:
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
VkExtent2D output_size);
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
- VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect);
+ VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect);
private:
void CreateDescriptorPool();
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 3983b2eb7..c0e8431e4 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -82,7 +82,7 @@ VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t in
}
if (y_negate) {
- y += height;
+ y += conv(static_cast<f32>(regs.surface_clip.height));
height = -height;
}
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h
index 74ec4f771..1589ba057 100644
--- a/src/yuzu/configuration/config.h
+++ b/src/yuzu/configuration/config.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_camera.h b/src/yuzu/configuration/configure_camera.h
index 9a90512b3..3d822da7b 100644
--- a/src/yuzu/configuration/configure_camera.h
+++ b/src/yuzu/configuration/configure_camera.h
@@ -1,4 +1,4 @@
-// Text : Copyright 2022 yuzu Emulator Project
+// Text : Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp
index 3dcad2701..02e23cce6 100644
--- a/src/yuzu/configuration/configure_input.cpp
+++ b/src/yuzu/configuration/configure_input.cpp
@@ -152,7 +152,7 @@ void ConfigureInput::Initialize(InputCommon::InputSubsystem* input_subsystem,
connect(player_controllers[0], &ConfigureInputPlayer::HandheldStateChanged,
[this](bool is_handheld) { UpdateDockedState(is_handheld); });
- advanced = new ConfigureInputAdvanced(this);
+ advanced = new ConfigureInputAdvanced(hid_core, this);
ui->tabAdvanced->setLayout(new QHBoxLayout(ui->tabAdvanced));
ui->tabAdvanced->layout()->addWidget(advanced);
diff --git a/src/yuzu/configuration/configure_input.h b/src/yuzu/configuration/configure_input.h
index 136cd3a0a..beb503dae 100644
--- a/src/yuzu/configuration/configure_input.h
+++ b/src/yuzu/configuration/configure_input.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_input_advanced.cpp b/src/yuzu/configuration/configure_input_advanced.cpp
index 3cfd5d439..441cea3f6 100644
--- a/src/yuzu/configuration/configure_input_advanced.cpp
+++ b/src/yuzu/configuration/configure_input_advanced.cpp
@@ -4,11 +4,13 @@
#include <QColorDialog>
#include "common/settings.h"
#include "core/core.h"
+#include "core/hid/emulated_controller.h"
+#include "core/hid/hid_core.h"
#include "ui_configure_input_advanced.h"
#include "yuzu/configuration/configure_input_advanced.h"
-ConfigureInputAdvanced::ConfigureInputAdvanced(QWidget* parent)
- : QWidget(parent), ui(std::make_unique<Ui::ConfigureInputAdvanced>()) {
+ConfigureInputAdvanced::ConfigureInputAdvanced(Core::HID::HIDCore& hid_core_, QWidget* parent)
+ : QWidget(parent), ui(std::make_unique<Ui::ConfigureInputAdvanced>()), hid_core{hid_core_} {
ui->setupUi(this);
controllers_color_buttons = {{
@@ -123,6 +125,8 @@ void ConfigureInputAdvanced::ApplyConfiguration() {
player.button_color_left = colors[1];
player.body_color_right = colors[2];
player.button_color_right = colors[3];
+
+ hid_core.GetEmulatedControllerByIndex(player_idx)->ReloadColorsFromSettings();
}
Settings::values.debug_pad_enabled = ui->debug_enabled->isChecked();
diff --git a/src/yuzu/configuration/configure_input_advanced.h b/src/yuzu/configuration/configure_input_advanced.h
index fc1230284..41f822c4a 100644
--- a/src/yuzu/configuration/configure_input_advanced.h
+++ b/src/yuzu/configuration/configure_input_advanced.h
@@ -14,11 +14,15 @@ namespace Ui {
class ConfigureInputAdvanced;
}
+namespace Core::HID {
+class HIDCore;
+} // namespace Core::HID
+
class ConfigureInputAdvanced : public QWidget {
Q_OBJECT
public:
- explicit ConfigureInputAdvanced(QWidget* parent = nullptr);
+ explicit ConfigureInputAdvanced(Core::HID::HIDCore& hid_core_, QWidget* parent = nullptr);
~ConfigureInputAdvanced() override;
void ApplyConfiguration();
@@ -44,4 +48,6 @@ private:
std::array<std::array<QColor, 4>, 8> controllers_colors;
std::array<std::array<QPushButton*, 4>, 8> controllers_color_buttons;
+
+ Core::HID::HIDCore& hid_core;
};
diff --git a/src/yuzu/configuration/configure_input_player.h b/src/yuzu/configuration/configure_input_player.h
index d3255d2b4..fda09e925 100644
--- a/src/yuzu/configuration/configure_input_player.h
+++ b/src/yuzu/configuration/configure_input_player.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_per_game.h b/src/yuzu/configuration/configure_per_game.h
index 1a727f32c..cc2513001 100644
--- a/src/yuzu/configuration/configure_per_game.h
+++ b/src/yuzu/configuration/configure_per_game.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_profile_manager.cpp b/src/yuzu/configuration/configure_profile_manager.cpp
index a47089988..6d2219bf5 100644
--- a/src/yuzu/configuration/configure_profile_manager.cpp
+++ b/src/yuzu/configuration/configure_profile_manager.cpp
@@ -306,10 +306,10 @@ void ConfigureProfileManager::SetUserImage() {
return;
}
- // Some games crash when the profile image is too big. Resize any image bigger than 256x256
+ // Profile image must be 256x256
QImage image(image_path);
- if (image.width() > 256 || image.height() > 256) {
- image = image.scaled(256, 256, Qt::KeepAspectRatio);
+ if (image.width() != 256 || image.height() != 256) {
+ image = image.scaled(256, 256, Qt::KeepAspectRatioByExpanding, Qt::SmoothTransformation);
if (!image.save(image_path)) {
QMessageBox::warning(this, tr("Error resizing user image"),
tr("Unable to resize image"));
diff --git a/src/yuzu/configuration/configure_ringcon.h b/src/yuzu/configuration/configure_ringcon.h
index b23c27906..6fd95e2b8 100644
--- a/src/yuzu/configuration/configure_ringcon.h
+++ b/src/yuzu/configuration/configure_ringcon.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_tas.h b/src/yuzu/configuration/configure_tas.h
index 4a6b0ba4e..a91891906 100644
--- a/src/yuzu/configuration/configure_tas.h
+++ b/src/yuzu/configuration/configure_tas.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/configuration/configure_touchscreen_advanced.h b/src/yuzu/configuration/configure_touchscreen_advanced.h
index 034dc0d46..b6fdffdc8 100644
--- a/src/yuzu/configuration/configure_touchscreen_advanced.h
+++ b/src/yuzu/configuration/configure_touchscreen_advanced.h
@@ -1,4 +1,4 @@
-// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 1bf173efb..f077d7f9c 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1908,7 +1908,11 @@ void GMainWindow::ConfigureFilesystemProvider(const std::string& filepath) {
void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t program_index,
StartGameType type, AmLaunchType launch_type) {
LOG_INFO(Frontend, "yuzu starting...");
- StoreRecentFile(filename); // Put the filename on top of the list
+
+ if (program_id == 0 ||
+ program_id > static_cast<u64>(Service::AM::Applets::AppletProgramId::MaxProgramId)) {
+ StoreRecentFile(filename); // Put the filename on top of the list
+ }
// Save configurations
UpdateUISettings();
@@ -4273,7 +4277,7 @@ void GMainWindow::OnToggleStatusBar() {
}
void GMainWindow::OnAlbum() {
- constexpr u64 AlbumId = 0x010000000000100Dull;
+ constexpr u64 AlbumId = static_cast<u64>(Service::AM::Applets::AppletProgramId::PhotoViewer);
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
if (!bis_system) {
QMessageBox::warning(this, tr("No firmware available"),
@@ -4292,11 +4296,11 @@ void GMainWindow::OnAlbum() {
const auto filename = QString::fromStdString(album_nca->GetFullPath());
UISettings::values.roms_path = QFileInfo(filename).path();
- BootGame(filename);
+ BootGame(filename, AlbumId);
}
void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) {
- constexpr u64 CabinetId = 0x0100000000001002ull;
+ constexpr u64 CabinetId = static_cast<u64>(Service::AM::Applets::AppletProgramId::Cabinet);
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
if (!bis_system) {
QMessageBox::warning(this, tr("No firmware available"),
@@ -4316,11 +4320,11 @@ void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) {
const auto filename = QString::fromStdString(cabinet_nca->GetFullPath());
UISettings::values.roms_path = QFileInfo(filename).path();
- BootGame(filename);
+ BootGame(filename, CabinetId);
}
void GMainWindow::OnMiiEdit() {
- constexpr u64 MiiEditId = 0x0100000000001009ull;
+ constexpr u64 MiiEditId = static_cast<u64>(Service::AM::Applets::AppletProgramId::MiiEdit);
auto bis_system = system->GetFileSystemController().GetSystemNANDContents();
if (!bis_system) {
QMessageBox::warning(this, tr("No firmware available"),
@@ -4339,7 +4343,7 @@ void GMainWindow::OnMiiEdit() {
const auto filename = QString::fromStdString((mii_applet_nca->GetFullPath()));
UISettings::values.roms_path = QFileInfo(filename).path();
- BootGame(filename);
+ BootGame(filename, MiiEditId);
}
void GMainWindow::OnCaptureScreenshot() {
diff --git a/src/yuzu/vk_device_info.cpp b/src/yuzu/vk_device_info.cpp
index 92f10d315..ab0d39c25 100644
--- a/src/yuzu/vk_device_info.cpp
+++ b/src/yuzu/vk_device_info.cpp
@@ -31,6 +31,7 @@ void PopulateRecords(std::vector<Record>& records, QWindow* window) try {
// Create a test window with a Vulkan surface type for checking present modes.
QWindow test_window(window);
test_window.setSurfaceType(QWindow::VulkanSurface);
+ test_window.create();
auto wsi = QtCommon::GetWindowSystemInfo(&test_window);
vk::InstanceDispatch dld;