summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--CMakeLists.txt8
-rw-r--r--src/core/file_sys/mode.h9
-rw-r--r--src/core/file_sys/patch_manager.cpp4
-rw-r--r--src/core/file_sys/registered_cache.cpp25
-rw-r--r--src/core/file_sys/savedata_factory.cpp33
-rw-r--r--src/core/file_sys/savedata_factory.h47
-rw-r--r--src/core/file_sys/vfs_real.cpp30
-rw-r--r--src/core/hle/service/acc/profile_manager.cpp43
-rw-r--r--src/core/hle/service/am/am.cpp10
-rw-r--r--src/core/hle/service/audio/audout_u.cpp2
-rw-r--r--src/core/hle/service/filesystem/filesystem.cpp8
-rw-r--r--src/core/hle/service/filesystem/filesystem.h6
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp73
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.h6
-rw-r--r--src/core/hle/service/ldr/ldr.cpp5
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp202
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h79
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp33
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h6
-rw-r--r--src/core/hle/service/nvdrv/interface.cpp36
-rw-r--r--src/core/hle/service/sm/sm.h5
-rw-r--r--src/core/hle/service/vi/vi.cpp24
-rw-r--r--src/core/memory/dmnt_cheat_vm.cpp226
-rw-r--r--src/core/perf_stats.cpp20
-rw-r--r--src/core/perf_stats.h11
-rw-r--r--src/tests/core/core_timing.cpp14
-rw-r--r--src/video_core/gpu.cpp2
-rw-r--r--src/video_core/memory_manager.cpp532
-rw-r--r--src/video_core/memory_manager.h172
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp9
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp2
-rw-r--r--src/yuzu/bootmanager.cpp2
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp76
-rw-r--r--src/yuzu/configuration/configure_mouse_advanced.cpp39
-rw-r--r--src/yuzu/configuration/configure_ui.cpp4
-rw-r--r--src/yuzu/game_list_worker.cpp4
-rw-r--r--src/yuzu/main.cpp4
42 files changed, 896 insertions, 938 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7a49318aa..3282ae9d4 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -159,15 +159,15 @@ macro(yuzu_find_packages)
# Capitalization matters here. We need the naming to match the generated paths from Conan
set(REQUIRED_LIBS
# Cmake Pkg Prefix Version Conan Pkg
- "Boost 1.71 boost/1.72.0"
- "Catch2 2.11 catch2/2.11.0"
+ "Boost 1.73 boost/1.73.0"
+ "Catch2 2.13 catch2/2.13.0"
"fmt 7.0 fmt/7.0.1"
# can't use until https://github.com/bincrafters/community/issues/1173
#"libzip 1.5 libzip/1.5.2@bincrafters/stable"
"lz4 1.8 lz4/1.9.2"
- "nlohmann_json 3.7 nlohmann_json/3.7.3"
+ "nlohmann_json 3.8 nlohmann_json/3.8.0"
"ZLIB 1.2 zlib/1.2.11"
- "zstd 1.4 zstd/1.4.4"
+ "zstd 1.4 zstd/1.4.5"
)
foreach(PACKAGE ${REQUIRED_LIBS})
diff --git a/src/core/file_sys/mode.h b/src/core/file_sys/mode.h
index c95205668..2b4f21073 100644
--- a/src/core/file_sys/mode.h
+++ b/src/core/file_sys/mode.h
@@ -4,6 +4,7 @@
#pragma once
+#include "common/common_funcs.h"
#include "common/common_types.h"
namespace FileSys {
@@ -11,13 +12,11 @@ namespace FileSys {
enum class Mode : u32 {
Read = 1,
Write = 2,
- ReadWrite = 3,
+ ReadWrite = Read | Write,
Append = 4,
- WriteAppend = 6,
+ WriteAppend = Write | Append,
};
-inline u32 operator&(Mode lhs, Mode rhs) {
- return static_cast<u32>(lhs) & static_cast<u32>(rhs);
-}
+DECLARE_ENUM_FLAG_OPERATORS(Mode)
} // namespace FileSys
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp
index c47ff863e..729dbb5f4 100644
--- a/src/core/file_sys/patch_manager.cpp
+++ b/src/core/file_sys/patch_manager.cpp
@@ -288,8 +288,8 @@ std::optional<std::vector<Core::Memory::CheatEntry>> ReadCheatFileFromFolder(
}
Core::Memory::TextCheatParser parser;
- return parser.Parse(
- system, std::string_view(reinterpret_cast<const char* const>(data.data()), data.size()));
+ return parser.Parse(system,
+ std::string_view(reinterpret_cast<const char*>(data.data()), data.size()));
}
} // Anonymous namespace
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index e94eed3b6..f831487dd 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -344,15 +344,18 @@ VirtualFile RegisteredCache::GetFileAtID(NcaID id) const {
static std::optional<NcaID> CheckMapForContentRecord(const std::map<u64, CNMT>& map, u64 title_id,
ContentRecordType type) {
- if (map.find(title_id) == map.end())
- return {};
-
- const auto& cnmt = map.at(title_id);
+ const auto cmnt_iter = map.find(title_id);
+ if (cmnt_iter == map.cend()) {
+ return std::nullopt;
+ }
- const auto iter = std::find_if(cnmt.GetContentRecords().begin(), cnmt.GetContentRecords().end(),
+ const auto& cnmt = cmnt_iter->second;
+ const auto& content_records = cnmt.GetContentRecords();
+ const auto iter = std::find_if(content_records.cbegin(), content_records.cend(),
[type](const ContentRecord& rec) { return rec.type == type; });
- if (iter == cnmt.GetContentRecords().end())
- return {};
+ if (iter == content_records.cend()) {
+ return std::nullopt;
+ }
return std::make_optional(iter->nca_id);
}
@@ -467,14 +470,16 @@ VirtualFile RegisteredCache::GetEntryUnparsed(u64 title_id, ContentRecordType ty
std::optional<u32> RegisteredCache::GetEntryVersion(u64 title_id) const {
const auto meta_iter = meta.find(title_id);
- if (meta_iter != meta.end())
+ if (meta_iter != meta.cend()) {
return meta_iter->second.GetTitleVersion();
+ }
const auto yuzu_meta_iter = yuzu_meta.find(title_id);
- if (yuzu_meta_iter != yuzu_meta.end())
+ if (yuzu_meta_iter != yuzu_meta.cend()) {
return yuzu_meta_iter->second.GetTitleVersion();
+ }
- return {};
+ return std::nullopt;
}
VirtualFile RegisteredCache::GetEntryRaw(u64 title_id, ContentRecordType type) const {
diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp
index adfd2c1a4..ba4efee3a 100644
--- a/src/core/file_sys/savedata_factory.cpp
+++ b/src/core/file_sys/savedata_factory.cpp
@@ -17,23 +17,23 @@ constexpr char SAVE_DATA_SIZE_FILENAME[] = ".yuzu_save_size";
namespace {
-void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) {
+void PrintSaveDataAttributeWarnings(SaveDataAttribute meta) {
if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) {
if (meta.zero_1 != 0) {
LOG_WARNING(Service_FS,
- "Possibly incorrect SaveDataDescriptor, type is "
+ "Possibly incorrect SaveDataAttribute, type is "
"SystemSaveData||SaveData but offset 0x28 is non-zero ({:016X}).",
meta.zero_1);
}
if (meta.zero_2 != 0) {
LOG_WARNING(Service_FS,
- "Possibly incorrect SaveDataDescriptor, type is "
+ "Possibly incorrect SaveDataAttribute, type is "
"SystemSaveData||SaveData but offset 0x30 is non-zero ({:016X}).",
meta.zero_2);
}
if (meta.zero_3 != 0) {
LOG_WARNING(Service_FS,
- "Possibly incorrect SaveDataDescriptor, type is "
+ "Possibly incorrect SaveDataAttribute, type is "
"SystemSaveData||SaveData but offset 0x38 is non-zero ({:016X}).",
meta.zero_3);
}
@@ -41,33 +41,32 @@ void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) {
if (meta.type == SaveDataType::SystemSaveData && meta.title_id != 0) {
LOG_WARNING(Service_FS,
- "Possibly incorrect SaveDataDescriptor, type is SystemSaveData but title_id is "
+ "Possibly incorrect SaveDataAttribute, type is SystemSaveData but title_id is "
"non-zero ({:016X}).",
meta.title_id);
}
if (meta.type == SaveDataType::DeviceSaveData && meta.user_id != u128{0, 0}) {
LOG_WARNING(Service_FS,
- "Possibly incorrect SaveDataDescriptor, type is DeviceSaveData but user_id is "
+ "Possibly incorrect SaveDataAttribute, type is DeviceSaveData but user_id is "
"non-zero ({:016X}{:016X})",
meta.user_id[1], meta.user_id[0]);
}
}
-bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataDescriptor& desc) {
- return desc.type == SaveDataType::CacheStorage || desc.type == SaveDataType::TemporaryStorage ||
+bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataAttribute& attr) {
+ return attr.type == SaveDataType::CacheStorage || attr.type == SaveDataType::TemporaryStorage ||
(space == SaveDataSpaceId::NandUser && ///< Normal Save Data -- Current Title & User
- (desc.type == SaveDataType::SaveData || desc.type == SaveDataType::DeviceSaveData) &&
- desc.title_id == 0 && desc.save_id == 0);
+ (attr.type == SaveDataType::SaveData || attr.type == SaveDataType::DeviceSaveData) &&
+ attr.title_id == 0 && attr.save_id == 0);
}
} // Anonymous namespace
-std::string SaveDataDescriptor::DebugInfo() const {
- return fmt::format("[type={:02X}, title_id={:016X}, user_id={:016X}{:016X}, "
- "save_id={:016X}, "
+std::string SaveDataAttribute::DebugInfo() const {
+ return fmt::format("[title_id={:016X}, user_id={:016X}{:016X}, save_id={:016X}, type={:02X}, "
"rank={}, index={}]",
- static_cast<u8>(type), title_id, user_id[1], user_id[0], save_id,
+ title_id, user_id[1], user_id[0], save_id, static_cast<u8>(type),
static_cast<u8>(rank), index);
}
@@ -80,8 +79,8 @@ SaveDataFactory::SaveDataFactory(VirtualDir save_directory) : dir(std::move(save
SaveDataFactory::~SaveDataFactory() = default;
ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
- const SaveDataDescriptor& meta) const {
- PrintSaveDataDescriptorWarnings(meta);
+ const SaveDataAttribute& meta) const {
+ PrintSaveDataAttributeWarnings(meta);
const auto save_directory =
GetFullPath(space, meta.type, meta.title_id, meta.user_id, meta.save_id);
@@ -98,7 +97,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
}
ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
- const SaveDataDescriptor& meta) const {
+ const SaveDataAttribute& meta) const {
const auto save_directory =
GetFullPath(space, meta.type, meta.title_id, meta.user_id, meta.save_id);
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h
index 991e57aa1..6625bbbd8 100644
--- a/src/core/file_sys/savedata_factory.h
+++ b/src/core/file_sys/savedata_factory.h
@@ -21,6 +21,7 @@ enum class SaveDataSpaceId : u8 {
TemporaryStorage = 3,
SdCardUser = 4,
ProperSystem = 100,
+ SafeMode = 101,
};
enum class SaveDataType : u8 {
@@ -30,28 +31,50 @@ enum class SaveDataType : u8 {
DeviceSaveData = 3,
TemporaryStorage = 4,
CacheStorage = 5,
+ SystemBcat = 6,
};
enum class SaveDataRank : u8 {
- Primary,
- Secondary,
+ Primary = 0,
+ Secondary = 1,
};
-struct SaveDataDescriptor {
- u64_le title_id;
+enum class SaveDataFlags : u32 {
+ None = (0 << 0),
+ KeepAfterResettingSystemSaveData = (1 << 0),
+ KeepAfterRefurbishment = (1 << 1),
+ KeepAfterResettingSystemSaveDataWithoutUserSaveData = (1 << 2),
+ NeedsSecureDelete = (1 << 3),
+};
+
+struct SaveDataAttribute {
+ u64 title_id;
u128 user_id;
- u64_le save_id;
+ u64 save_id;
SaveDataType type;
SaveDataRank rank;
- u16_le index;
+ u16 index;
INSERT_PADDING_BYTES(4);
- u64_le zero_1;
- u64_le zero_2;
- u64_le zero_3;
+ u64 zero_1;
+ u64 zero_2;
+ u64 zero_3;
std::string DebugInfo() const;
};
-static_assert(sizeof(SaveDataDescriptor) == 0x40, "SaveDataDescriptor has incorrect size.");
+static_assert(sizeof(SaveDataAttribute) == 0x40, "SaveDataAttribute has incorrect size.");
+
+struct SaveDataExtraData {
+ SaveDataAttribute attr;
+ u64 owner_id;
+ s64 timestamp;
+ SaveDataFlags flags;
+ INSERT_PADDING_BYTES(4);
+ s64 available_size;
+ s64 journal_size;
+ s64 commit_id;
+ std::array<u8, 0x190> unused;
+};
+static_assert(sizeof(SaveDataExtraData) == 0x200, "SaveDataExtraData has incorrect size.");
struct SaveDataSize {
u64 normal;
@@ -64,8 +87,8 @@ public:
explicit SaveDataFactory(VirtualDir dir);
~SaveDataFactory();
- ResultVal<VirtualDir> Create(SaveDataSpaceId space, const SaveDataDescriptor& meta) const;
- ResultVal<VirtualDir> Open(SaveDataSpaceId space, const SaveDataDescriptor& meta) const;
+ ResultVal<VirtualDir> Create(SaveDataSpaceId space, const SaveDataAttribute& meta) const;
+ ResultVal<VirtualDir> Open(SaveDataSpaceId space, const SaveDataAttribute& meta) const;
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
diff --git a/src/core/file_sys/vfs_real.cpp b/src/core/file_sys/vfs_real.cpp
index 96ce5957c..0db0091f6 100644
--- a/src/core/file_sys/vfs_real.cpp
+++ b/src/core/file_sys/vfs_real.cpp
@@ -18,20 +18,22 @@ static std::string ModeFlagsToString(Mode mode) {
std::string mode_str;
// Calculate the correct open mode for the file.
- if (mode & Mode::Read && mode & Mode::Write) {
- if (mode & Mode::Append)
+ if (True(mode & Mode::Read) && True(mode & Mode::Write)) {
+ if (True(mode & Mode::Append)) {
mode_str = "a+";
- else
+ } else {
mode_str = "r+";
+ }
} else {
- if (mode & Mode::Read)
+ if (True(mode & Mode::Read)) {
mode_str = "r";
- else if (mode & Mode::Append)
+ } else if (True(mode & Mode::Append)) {
mode_str = "a";
- else if (mode & Mode::Write)
+ } else if (True(mode & Mode::Write)) {
mode_str = "w";
- else
+ } else {
UNREACHABLE_MSG("Invalid file open mode: {:02X}", static_cast<u8>(mode));
+ }
}
mode_str += "b";
@@ -73,8 +75,9 @@ VirtualFile RealVfsFilesystem::OpenFile(std::string_view path_, Mode perms) {
}
}
- if (!FileUtil::Exists(path) && (perms & Mode::WriteAppend) != 0)
+ if (!FileUtil::Exists(path) && True(perms & Mode::WriteAppend)) {
FileUtil::CreateEmptyFile(path);
+ }
auto backing = std::make_shared<FileUtil::IOFile>(path, ModeFlagsToString(perms).c_str());
cache[path] = backing;
@@ -247,11 +250,11 @@ std::shared_ptr<VfsDirectory> RealVfsFile::GetContainingDirectory() const {
}
bool RealVfsFile::IsWritable() const {
- return (perms & Mode::WriteAppend) != 0;
+ return True(perms & Mode::WriteAppend);
}
bool RealVfsFile::IsReadable() const {
- return (perms & Mode::ReadWrite) != 0;
+ return True(perms & Mode::ReadWrite);
}
std::size_t RealVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const {
@@ -319,8 +322,9 @@ RealVfsDirectory::RealVfsDirectory(RealVfsFilesystem& base_, const std::string&
path_components(FileUtil::SplitPathComponents(path)),
parent_components(FileUtil::SliceVector(path_components, 0, path_components.size() - 1)),
perms(perms_) {
- if (!FileUtil::Exists(path) && perms & Mode::WriteAppend)
+ if (!FileUtil::Exists(path) && True(perms & Mode::WriteAppend)) {
FileUtil::CreateDir(path);
+ }
}
RealVfsDirectory::~RealVfsDirectory() = default;
@@ -371,11 +375,11 @@ std::vector<std::shared_ptr<VfsDirectory>> RealVfsDirectory::GetSubdirectories()
}
bool RealVfsDirectory::IsWritable() const {
- return (perms & Mode::WriteAppend) != 0;
+ return True(perms & Mode::WriteAppend);
}
bool RealVfsDirectory::IsReadable() const {
- return (perms & Mode::ReadWrite) != 0;
+ return True(perms & Mode::ReadWrite);
}
std::string RealVfsDirectory::GetName() const {
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp
index eb8c81645..a98d57b5c 100644
--- a/src/core/hle/service/acc/profile_manager.cpp
+++ b/src/core/hle/service/acc/profile_manager.cpp
@@ -58,7 +58,7 @@ ProfileManager::~ProfileManager() {
/// internal management of the users profiles
std::optional<std::size_t> ProfileManager::AddToProfiles(const ProfileInfo& profile) {
if (user_count >= MAX_USERS) {
- return {};
+ return std::nullopt;
}
profiles[user_count] = profile;
return user_count++;
@@ -101,13 +101,14 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& usern
[&uuid](const ProfileInfo& profile) { return uuid == profile.user_uuid; })) {
return ERROR_USER_ALREADY_EXISTS;
}
- ProfileInfo profile;
- profile.user_uuid = uuid;
- profile.username = username;
- profile.data = {};
- profile.creation_time = 0x0;
- profile.is_open = false;
- return AddUser(profile);
+
+ return AddUser({
+ .user_uuid = uuid,
+ .username = username,
+ .creation_time = 0,
+ .data = {},
+ .is_open = false,
+ });
}
/// Creates a new user on the system. This function allows a much simpler method of registration
@@ -126,7 +127,7 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const std::string& username)
std::optional<UUID> ProfileManager::GetUser(std::size_t index) const {
if (index >= MAX_USERS) {
- return {};
+ return std::nullopt;
}
return profiles[index].user_uuid;
@@ -135,13 +136,13 @@ std::optional<UUID> ProfileManager::GetUser(std::size_t index) const {
/// Returns a users profile index based on their user id.
std::optional<std::size_t> ProfileManager::GetUserIndex(const UUID& uuid) const {
if (!uuid) {
- return {};
+ return std::nullopt;
}
const auto iter = std::find_if(profiles.begin(), profiles.end(),
[&uuid](const ProfileInfo& p) { return p.user_uuid == uuid; });
if (iter == profiles.end()) {
- return {};
+ return std::nullopt;
}
return static_cast<std::size_t>(std::distance(profiles.begin(), iter));
@@ -339,7 +340,13 @@ void ProfileManager::ParseUserSaveFile() {
continue;
}
- AddUser({user.uuid, user.username, user.timestamp, user.extra_data, false});
+ AddUser({
+ .user_uuid = user.uuid,
+ .username = user.username,
+ .creation_time = user.timestamp,
+ .data = user.extra_data,
+ .is_open = false,
+ });
}
std::stable_partition(profiles.begin(), profiles.end(),
@@ -350,11 +357,13 @@ void ProfileManager::WriteUserSaveFile() {
ProfileDataRaw raw{};
for (std::size_t i = 0; i < MAX_USERS; ++i) {
- raw.users[i].username = profiles[i].username;
- raw.users[i].uuid2 = profiles[i].user_uuid;
- raw.users[i].uuid = profiles[i].user_uuid;
- raw.users[i].timestamp = profiles[i].creation_time;
- raw.users[i].extra_data = profiles[i].data;
+ raw.users[i] = {
+ .uuid = profiles[i].user_uuid,
+ .uuid2 = profiles[i].user_uuid,
+ .timestamp = profiles[i].creation_time,
+ .username = profiles[i].username,
+ .extra_data = profiles[i].data,
+ };
}
const auto raw_path =
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index ceed20609..55a1edf1a 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -1342,12 +1342,12 @@ void IApplicationFunctions::EnsureSaveData(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called, uid={:016X}{:016X}", user_id[1], user_id[0]);
- FileSys::SaveDataDescriptor descriptor{};
- descriptor.title_id = system.CurrentProcess()->GetTitleID();
- descriptor.user_id = user_id;
- descriptor.type = FileSys::SaveDataType::SaveData;
+ FileSys::SaveDataAttribute attribute{};
+ attribute.title_id = system.CurrentProcess()->GetTitleID();
+ attribute.user_id = user_id;
+ attribute.type = FileSys::SaveDataType::SaveData;
const auto res = system.GetFileSystemController().CreateSaveData(
- FileSys::SaveDataSpaceId::NandUser, descriptor);
+ FileSys::SaveDataSpaceId::NandUser, attribute);
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(res.Code());
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp
index 106e89743..dd80dd1dc 100644
--- a/src/core/hle/service/audio/audout_u.cpp
+++ b/src/core/hle/service/audio/audout_u.cpp
@@ -71,7 +71,7 @@ public:
stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate,
audio_params.channel_count, std::move(unique_name),
- [=]() { buffer_event.writable->Signal(); });
+ [this] { buffer_event.writable->Signal(); });
}
private:
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp
index c66124998..4490f8e4c 100644
--- a/src/core/hle/service/filesystem/filesystem.cpp
+++ b/src/core/hle/service/filesystem/filesystem.cpp
@@ -311,7 +311,7 @@ ResultVal<FileSys::VirtualFile> FileSystemController::OpenRomFS(
}
ResultVal<FileSys::VirtualDir> FileSystemController::CreateSaveData(
- FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& save_struct) const {
+ FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& save_struct) const {
LOG_TRACE(Service_FS, "Creating Save Data for space_id={:01X}, save_struct={}",
static_cast<u8>(space), save_struct.DebugInfo());
@@ -323,15 +323,15 @@ ResultVal<FileSys::VirtualDir> FileSystemController::CreateSaveData(
}
ResultVal<FileSys::VirtualDir> FileSystemController::OpenSaveData(
- FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& descriptor) const {
+ FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& attribute) const {
LOG_TRACE(Service_FS, "Opening Save Data for space_id={:01X}, save_struct={}",
- static_cast<u8>(space), descriptor.DebugInfo());
+ static_cast<u8>(space), attribute.DebugInfo());
if (save_data_factory == nullptr) {
return FileSys::ERROR_ENTITY_NOT_FOUND;
}
- return save_data_factory->Open(space, descriptor);
+ return save_data_factory->Open(space, attribute);
}
ResultVal<FileSys::VirtualDir> FileSystemController::OpenSaveDataSpace(
diff --git a/src/core/hle/service/filesystem/filesystem.h b/src/core/hle/service/filesystem/filesystem.h
index 1b0a6a949..6dbbf0b2b 100644
--- a/src/core/hle/service/filesystem/filesystem.h
+++ b/src/core/hle/service/filesystem/filesystem.h
@@ -31,7 +31,7 @@ enum class SaveDataSpaceId : u8;
enum class SaveDataType : u8;
enum class StorageId : u8;
-struct SaveDataDescriptor;
+struct SaveDataAttribute;
struct SaveDataSize;
} // namespace FileSys
@@ -69,9 +69,9 @@ public:
ResultVal<FileSys::VirtualFile> OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
FileSys::ContentRecordType type) const;
ResultVal<FileSys::VirtualDir> CreateSaveData(
- FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& save_struct) const;
+ FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& save_struct) const;
ResultVal<FileSys::VirtualDir> OpenSaveData(
- FileSys::SaveDataSpaceId space, const FileSys::SaveDataDescriptor& save_struct) const;
+ FileSys::SaveDataSpaceId space, const FileSys::SaveDataAttribute& save_struct) const;
ResultVal<FileSys::VirtualDir> OpenSaveDataSpace(FileSys::SaveDataSpaceId space) const;
ResultVal<FileSys::VirtualDir> OpenSDMC() const;
ResultVal<FileSys::VirtualDir> OpenBISPartition(FileSys::BisPartitionId id) const;
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index 20c331b77..26fd87f58 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -696,8 +696,8 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter)
{67, nullptr, "FindSaveDataWithFilter"},
{68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"},
{69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"},
- {70, nullptr, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"},
- {71, nullptr, "ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute"},
+ {70, &FSP_SRV::WriteSaveDataFileSystemExtraDataBySaveDataAttribute, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"},
+ {71, &FSP_SRV::ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute, "ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute"},
{80, nullptr, "OpenSaveDataMetaFile"},
{81, nullptr, "OpenSaveDataTransferManager"},
{82, nullptr, "OpenSaveDataTransferManagerVersion2"},
@@ -812,7 +812,7 @@ void FSP_SRV::OpenSdCardFileSystem(Kernel::HLERequestContext& ctx) {
void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
- auto save_struct = rp.PopRaw<FileSys::SaveDataDescriptor>();
+ auto save_struct = rp.PopRaw<FileSys::SaveDataAttribute>();
[[maybe_unused]] auto save_create_struct = rp.PopRaw<std::array<u8, 0x40>>();
u128 uid = rp.PopRaw<u128>();
@@ -826,17 +826,18 @@ void FSP_SRV::CreateSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
}
void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
- LOG_INFO(Service_FS, "called.");
+ IPC::RequestParser rp{ctx};
struct Parameters {
- FileSys::SaveDataSpaceId save_data_space_id;
- FileSys::SaveDataDescriptor descriptor;
+ FileSys::SaveDataSpaceId space_id;
+ FileSys::SaveDataAttribute attribute;
};
- IPC::RequestParser rp{ctx};
const auto parameters = rp.PopRaw<Parameters>();
- auto dir = fsc.OpenSaveData(parameters.save_data_space_id, parameters.descriptor);
+ LOG_INFO(Service_FS, "called.");
+
+ auto dir = fsc.OpenSaveData(parameters.space_id, parameters.attribute);
if (dir.Failed()) {
IPC::ResponseBuilder rb{ctx, 2, 0, 0};
rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND);
@@ -844,13 +845,18 @@ void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
}
FileSys::StorageId id;
- if (parameters.save_data_space_id == FileSys::SaveDataSpaceId::NandUser) {
+
+ switch (parameters.space_id) {
+ case FileSys::SaveDataSpaceId::NandUser:
id = FileSys::StorageId::NandUser;
- } else if (parameters.save_data_space_id == FileSys::SaveDataSpaceId::SdCardSystem ||
- parameters.save_data_space_id == FileSys::SaveDataSpaceId::SdCardUser) {
+ break;
+ case FileSys::SaveDataSpaceId::SdCardSystem:
+ case FileSys::SaveDataSpaceId::SdCardUser:
id = FileSys::StorageId::SdCard;
- } else {
+ break;
+ case FileSys::SaveDataSpaceId::NandSystem:
id = FileSys::StorageId::NandSystem;
+ break;
}
auto filesystem =
@@ -876,22 +882,31 @@ void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext&
rb.PushIpcInterface<ISaveDataInfoReader>(std::make_shared<ISaveDataInfoReader>(space, fsc));
}
-void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- log_mode = rp.PopEnum<LogMode>();
-
- LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode));
+void FSP_SRV::WriteSaveDataFileSystemExtraDataBySaveDataAttribute(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_FS, "(STUBBED) called.");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
-void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
- LOG_DEBUG(Service_FS, "called");
+void FSP_SRV::ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute(
+ Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+
+ struct Parameters {
+ FileSys::SaveDataSpaceId space_id;
+ FileSys::SaveDataAttribute attribute;
+ };
+
+ const auto parameters = rp.PopRaw<Parameters>();
+ // Stub this to None for now, backend needs an impl to read/write the SaveDataExtraData
+ constexpr auto flags = static_cast<u32>(FileSys::SaveDataFlags::None);
+
+ LOG_WARNING(Service_FS, "(STUBBED) called, flags={}", flags);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.PushEnum(log_mode);
+ rb.Push(flags);
}
void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) {
@@ -966,6 +981,24 @@ void FSP_SRV::OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ct
rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND);
}
+void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ log_mode = rp.PopEnum<LogMode>();
+
+ LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode));
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
+void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_FS, "called");
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushEnum(log_mode);
+}
+
void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) {
const auto raw = ctx.ReadBuffer();
auto log = Common::StringFromFixedZeroTerminatedBuffer(
diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h
index dfb3e395b..4964e874e 100644
--- a/src/core/hle/service/filesystem/fsp_srv.h
+++ b/src/core/hle/service/filesystem/fsp_srv.h
@@ -43,11 +43,13 @@ private:
void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx);
void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx);
void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx);
- void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
- void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
+ void WriteSaveDataFileSystemExtraDataBySaveDataAttribute(Kernel::HLERequestContext& ctx);
+ void ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute(Kernel::HLERequestContext& ctx);
void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx);
void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
+ void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
+ void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx);
void GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx);
void OpenMultiCommitManager(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 64a526b9e..d8cd10e31 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -310,7 +310,7 @@ public:
ResultVal<VAddr> MapProcessCodeMemory(Kernel::Process* process, VAddr baseAddress,
u64 size) const {
- for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) {
+ for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
auto& page_table{process->PageTable()};
const VAddr addr{GetRandomMapRegion(page_table, size)};
const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)};
@@ -331,8 +331,7 @@ public:
ResultVal<VAddr> MapNro(Kernel::Process* process, VAddr nro_addr, std::size_t nro_size,
VAddr bss_addr, std::size_t bss_size, std::size_t size) const {
-
- for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) {
+ for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
auto& page_table{process->PageTable()};
VAddr addr{};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 195421cc0..d4ba88147 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -16,11 +16,12 @@
#include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices {
+
namespace NvErrCodes {
-enum {
- InvalidNmapHandle = -22,
-};
-}
+constexpr u32 Success{};
+constexpr u32 OutOfMemory{static_cast<u32>(-12)};
+constexpr u32 InvalidInput{static_cast<u32>(-22)};
+} // namespace NvErrCodes
nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev)
: nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {}
@@ -49,8 +50,9 @@ u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, const std:
break;
}
- if (static_cast<IoctlCommand>(command.cmd.Value()) == IoctlCommand::IocRemapCommand)
+ if (static_cast<IoctlCommand>(command.cmd.Value()) == IoctlCommand::IocRemapCommand) {
return Remap(input, output);
+ }
UNIMPLEMENTED_MSG("Unimplemented ioctl command");
return 0;
@@ -59,6 +61,7 @@ u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, const std:
u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlInitalizeEx params{};
std::memcpy(&params, input.data(), input.size());
+
LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size);
return 0;
@@ -67,53 +70,61 @@ u32 nvhost_as_gpu::InitalizeEx(const std::vector<u8>& input, std::vector<u8>& ou
u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlAllocSpace params{};
std::memcpy(&params, input.data(), input.size());
+
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
params.page_size, params.flags);
- auto& gpu = system.GPU();
- const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
- if (params.flags & 1) {
- params.offset = gpu.MemoryManager().AllocateSpace(params.offset, size, 1);
+ const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
+ if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
+ params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size);
} else {
- params.offset = gpu.MemoryManager().AllocateSpace(size, params.align);
+ params.offset = system.GPU().MemoryManager().Allocate(size, params.align);
+ }
+
+ auto result{NvErrCodes::Success};
+ if (!params.offset) {
+ LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size);
+ result = NvErrCodes::OutOfMemory;
}
std::memcpy(output.data(), &params, output.size());
- return 0;
+ return result;
}
u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) {
- std::size_t num_entries = input.size() / sizeof(IoctlRemapEntry);
+ const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
- LOG_WARNING(Service_NVDRV, "(STUBBED) called, num_entries=0x{:X}", num_entries);
+ LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
+ auto result{NvErrCodes::Success};
std::vector<IoctlRemapEntry> entries(num_entries);
std::memcpy(entries.data(), input.data(), input.size());
- auto& gpu = system.GPU();
for (const auto& entry : entries) {
- LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
- entry.offset, entry.nvmap_handle, entry.pages);
- GPUVAddr offset = static_cast<GPUVAddr>(entry.offset) << 0x10;
- auto object = nvmap_dev->GetObject(entry.nvmap_handle);
+ LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
+ entry.offset, entry.nvmap_handle, entry.pages);
+
+ const auto object{nvmap_dev->GetObject(entry.nvmap_handle)};
if (!object) {
- LOG_CRITICAL(Service_NVDRV, "nvmap {} is an invalid handle!", entry.nvmap_handle);
- std::memcpy(output.data(), entries.data(), output.size());
- return static_cast<u32>(NvErrCodes::InvalidNmapHandle);
+ LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
+ result = NvErrCodes::InvalidInput;
+ break;
}
- ASSERT(object->status == nvmap::Object::Status::Allocated);
+ const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
+ const auto size{static_cast<u64>(entry.pages) << 0x10};
+ const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
+ const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)};
- const u64 size = static_cast<u64>(entry.pages) << 0x10;
- ASSERT(size <= object->size);
- const u64 map_offset = static_cast<u64>(entry.map_offset) << 0x10;
-
- const GPUVAddr returned =
- gpu.MemoryManager().MapBufferEx(object->addr + map_offset, offset, size);
- ASSERT(returned == offset);
+ if (!addr) {
+ LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
+ result = NvErrCodes::InvalidInput;
+ break;
+ }
}
+
std::memcpy(output.data(), entries.data(), output.size());
- return 0;
+ return result;
}
u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -126,44 +137,76 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
params.offset);
- if (!params.nvmap_handle) {
- return 0;
+ const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
+ if (!object) {
+ LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
+ std::memcpy(output.data(), &params, output.size());
+ return NvErrCodes::InvalidInput;
}
- auto object = nvmap_dev->GetObject(params.nvmap_handle);
- ASSERT(object);
-
- // We can only map objects that have already been assigned a CPU address.
- ASSERT(object->status == nvmap::Object::Status::Allocated);
-
- ASSERT(params.buffer_offset == 0);
-
// The real nvservices doesn't make a distinction between handles and ids, and
// object can only have one handle and it will be the same as its id. Assert that this is the
// case to prevent unexpected behavior.
ASSERT(object->id == params.nvmap_handle);
-
auto& gpu = system.GPU();
- if (params.flags & 1) {
- params.offset = gpu.MemoryManager().MapBufferEx(object->addr, params.offset, object->size);
- } else {
- params.offset = gpu.MemoryManager().MapBufferEx(object->addr, object->size);
+ u64 page_size{params.page_size};
+ if (!page_size) {
+ page_size = object->align;
+ }
+
+ if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
+ if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
+ const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
+ const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
+
+ if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
+ LOG_CRITICAL(Service_NVDRV,
+ "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
+ "mapping_size = {}, offset={}",
+ params.flags, params.nvmap_handle, params.buffer_offset,
+ params.mapping_size, params.offset);
+
+ std::memcpy(output.data(), &params, output.size());
+ return NvErrCodes::InvalidInput;
+ }
+
+ std::memcpy(output.data(), &params, output.size());
+ return NvErrCodes::Success;
+ } else {
+ LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
+
+ std::memcpy(output.data(), &params, output.size());
+ return NvErrCodes::InvalidInput;
+ }
}
- // Create a new mapping entry for this operation.
- ASSERT_MSG(buffer_mappings.find(params.offset) == buffer_mappings.end(),
- "Offset is already mapped");
+ // We can only map objects that have already been assigned a CPU address.
+ ASSERT(object->status == nvmap::Object::Status::Allocated);
+
+ const auto physical_address{object->addr + params.buffer_offset};
+ u64 size{params.mapping_size};
+ if (!size) {
+ size = object->size;
+ }
- BufferMapping mapping{};
- mapping.nvmap_handle = params.nvmap_handle;
- mapping.offset = params.offset;
- mapping.size = object->size;
+ const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
+ if (is_alloc) {
+ params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
+ } else {
+ params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
+ }
- buffer_mappings[params.offset] = mapping;
+ auto result{NvErrCodes::Success};
+ if (!params.offset) {
+ LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
+ result = NvErrCodes::InvalidInput;
+ } else {
+ AddBufferMap(params.offset, size, physical_address, is_alloc);
+ }
std::memcpy(output.data(), &params, output.size());
- return 0;
+ return result;
}
u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -172,24 +215,20 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
- const auto itr = buffer_mappings.find(params.offset);
- if (itr == buffer_mappings.end()) {
- LOG_WARNING(Service_NVDRV, "Tried to unmap an invalid offset 0x{:X}", params.offset);
- // Hardware tests shows that unmapping an already unmapped buffer always returns successful
- // and doesn't fail.
- return 0;
+ if (const auto size{RemoveBufferMap(params.offset)}; size) {
+ system.GPU().MemoryManager().Unmap(params.offset, *size);
+ } else {
+ LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
}
- params.offset = system.GPU().MemoryManager().UnmapBuffer(params.offset, itr->second.size);
- buffer_mappings.erase(itr->second.offset);
-
std::memcpy(output.data(), &params, output.size());
- return 0;
+ return NvErrCodes::Success;
}
u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlBindChannel params{};
std::memcpy(&params, input.data(), input.size());
+
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
channel = params.fd;
@@ -199,6 +238,7 @@ u32 nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& ou
u32 nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlGetVaRegions params{};
std::memcpy(&params, input.data(), input.size());
+
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
params.buf_size);
@@ -210,9 +250,43 @@ u32 nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& o
params.regions[1].offset = 0x04000000;
params.regions[1].page_size = 0x10000;
params.regions[1].pages = 0x1bffff;
+
// TODO(ogniK): This probably can stay stubbed but should add support way way later
+
std::memcpy(output.data(), &params, output.size());
return 0;
}
+std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const {
+ const auto end{buffer_mappings.upper_bound(gpu_addr)};
+ for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) {
+ if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
+ return iter->second;
+ }
+ }
+
+ return {};
+}
+
+void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
+ bool is_allocated) {
+ buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
+}
+
+std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
+ if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
+ std::size_t size{};
+
+ if (iter->second.IsAllocated()) {
+ size = iter->second.Size();
+ }
+
+ buffer_mappings.erase(iter);
+
+ return size;
+ }
+
+ return {};
+}
+
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index f79fcc065..9a0cdff0c 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -4,9 +4,12 @@
#pragma once
+#include <map>
#include <memory>
-#include <unordered_map>
+#include <optional>
#include <vector>
+
+#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
@@ -15,6 +18,13 @@ namespace Service::Nvidia::Devices {
class nvmap;
+enum class AddressSpaceFlags : u32 {
+ None = 0x0,
+ FixedOffset = 0x1,
+ Remap = 0x100,
+};
+DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
+
class nvhost_as_gpu final : public nvdevice {
public:
explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev);
@@ -25,6 +35,45 @@ public:
IoctlVersion version) override;
private:
+ class BufferMap final {
+ public:
+ constexpr BufferMap() = default;
+
+ constexpr BufferMap(GPUVAddr start_addr, std::size_t size)
+ : start_addr{start_addr}, end_addr{start_addr + size} {}
+
+ constexpr BufferMap(GPUVAddr start_addr, std::size_t size, VAddr cpu_addr,
+ bool is_allocated)
+ : start_addr{start_addr}, end_addr{start_addr + size}, cpu_addr{cpu_addr},
+ is_allocated{is_allocated} {}
+
+ constexpr VAddr StartAddr() const {
+ return start_addr;
+ }
+
+ constexpr VAddr EndAddr() const {
+ return end_addr;
+ }
+
+ constexpr std::size_t Size() const {
+ return end_addr - start_addr;
+ }
+
+ constexpr VAddr CpuAddr() const {
+ return cpu_addr;
+ }
+
+ constexpr bool IsAllocated() const {
+ return is_allocated;
+ }
+
+ private:
+ GPUVAddr start_addr{};
+ GPUVAddr end_addr{};
+ VAddr cpu_addr{};
+ bool is_allocated{};
+ };
+
enum class IoctlCommand : u32_le {
IocInitalizeExCommand = 0x40284109,
IocAllocateSpaceCommand = 0xC0184102,
@@ -49,7 +98,7 @@ private:
struct IoctlAllocSpace {
u32_le pages;
u32_le page_size;
- u32_le flags;
+ AddressSpaceFlags flags;
INSERT_PADDING_WORDS(1);
union {
u64_le offset;
@@ -69,18 +118,18 @@ private:
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
struct IoctlMapBufferEx {
- u32_le flags; // bit0: fixed_offset, bit2: cacheable
- u32_le kind; // -1 is default
+ AddressSpaceFlags flags; // bit0: fixed_offset, bit2: cacheable
+ u32_le kind; // -1 is default
u32_le nvmap_handle;
u32_le page_size; // 0 means don't care
- u64_le buffer_offset;
+ s64_le buffer_offset;
u64_le mapping_size;
- u64_le offset;
+ s64_le offset;
};
static_assert(sizeof(IoctlMapBufferEx) == 40, "IoctlMapBufferEx is incorrect size");
struct IoctlUnmapBuffer {
- u64_le offset;
+ s64_le offset;
};
static_assert(sizeof(IoctlUnmapBuffer) == 8, "IoctlUnmapBuffer is incorrect size");
@@ -106,15 +155,6 @@ private:
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2,
"IoctlGetVaRegions is incorrect size");
- struct BufferMapping {
- u64 offset;
- u64 size;
- u32 nvmap_handle;
- };
-
- /// Map containing the nvmap object mappings in GPU memory.
- std::unordered_map<u64, BufferMapping> buffer_mappings;
-
u32 channel{};
u32 InitalizeEx(const std::vector<u8>& input, std::vector<u8>& output);
@@ -125,7 +165,14 @@ private:
u32 BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
u32 GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
+ std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
+ void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
+ std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
+
std::shared_ptr<nvmap> nvmap_dev;
+
+ // This is expected to be ordered, therefore we must use a map, not unordered_map
+ std::map<GPUVAddr, BufferMap> buffer_mappings;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 8c742316c..9436e16ad 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -18,7 +18,12 @@ enum {
};
}
-nvmap::nvmap(Core::System& system) : nvdevice(system) {}
+nvmap::nvmap(Core::System& system) : nvdevice(system) {
+ // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to
+ // represent this.
+ CreateObject(0);
+}
+
nvmap::~nvmap() = default;
VAddr nvmap::GetObjectAddress(u32 handle) const {
@@ -50,6 +55,21 @@ u32 nvmap::ioctl(Ioctl command, const std::vector<u8>& input, const std::vector<
return 0;
}
+u32 nvmap::CreateObject(u32 size) {
+ // Create a new nvmap object and obtain a handle to it.
+ auto object = std::make_shared<Object>();
+ object->id = next_id++;
+ object->size = size;
+ object->status = Object::Status::Created;
+ object->refcount = 1;
+
+ const u32 handle = next_handle++;
+
+ handles.insert_or_assign(handle, std::move(object));
+
+ return handle;
+}
+
u32 nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
IocCreateParams params;
std::memcpy(&params, input.data(), sizeof(params));
@@ -59,17 +79,8 @@ u32 nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
LOG_ERROR(Service_NVDRV, "Size is 0");
return static_cast<u32>(NvErrCodes::InvalidValue);
}
- // Create a new nvmap object and obtain a handle to it.
- auto object = std::make_shared<Object>();
- object->id = next_id++;
- object->size = params.size;
- object->status = Object::Status::Created;
- object->refcount = 1;
-
- u32 handle = next_handle++;
- handles[handle] = std::move(object);
- params.handle = handle;
+ params.handle = CreateObject(params.size);
std::memcpy(output.data(), &params, sizeof(params));
return 0;
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index 73c2e8809..84624be00 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -49,10 +49,10 @@ public:
private:
/// Id to use for the next handle that is created.
- u32 next_handle = 1;
+ u32 next_handle = 0;
/// Id to use for the next object that is created.
- u32 next_id = 1;
+ u32 next_id = 0;
/// Mapping of currently allocated handles to the objects they represent.
std::unordered_map<u32, std::shared_ptr<Object>> handles;
@@ -119,6 +119,8 @@ private:
};
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
+ u32 CreateObject(u32 size);
+
u32 IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
u32 IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
u32 IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
diff --git a/src/core/hle/service/nvdrv/interface.cpp b/src/core/hle/service/nvdrv/interface.cpp
index deaf0808b..88fbfa9b0 100644
--- a/src/core/hle/service/nvdrv/interface.cpp
+++ b/src/core/hle/service/nvdrv/interface.cpp
@@ -60,24 +60,24 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) {
if (ctrl.must_delay) {
ctrl.fresh_call = false;
- ctx.SleepClientThread("NVServices::DelayedResponse", ctrl.timeout,
- [=](std::shared_ptr<Kernel::Thread> thread,
- Kernel::HLERequestContext& ctx,
- Kernel::ThreadWakeupReason reason) {
- IoctlCtrl ctrl2{ctrl};
- std::vector<u8> tmp_output = output;
- std::vector<u8> tmp_output2 = output2;
- u32 result = nvdrv->Ioctl(fd, command, input, input2, tmp_output,
- tmp_output2, ctrl2, version);
- ctx.WriteBuffer(tmp_output, 0);
- if (version == IoctlVersion::Version3) {
- ctx.WriteBuffer(tmp_output2, 1);
- }
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(RESULT_SUCCESS);
- rb.Push(result);
- },
- nvdrv->GetEventWriteable(ctrl.event_id));
+ ctx.SleepClientThread(
+ "NVServices::DelayedResponse", ctrl.timeout,
+ [=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_,
+ Kernel::ThreadWakeupReason reason) {
+ IoctlCtrl ctrl2{ctrl};
+ std::vector<u8> tmp_output = output;
+ std::vector<u8> tmp_output2 = output2;
+ const u32 ioctl_result = nvdrv->Ioctl(fd, command, input, input2, tmp_output,
+ tmp_output2, ctrl2, version);
+ ctx_.WriteBuffer(tmp_output, 0);
+ if (version == IoctlVersion::Version3) {
+ ctx_.WriteBuffer(tmp_output2, 1);
+ }
+ IPC::ResponseBuilder rb{ctx_, 3};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push(ioctl_result);
+ },
+ nvdrv->GetEventWriteable(ctrl.event_id));
} else {
ctx.WriteBuffer(output);
if (version == IoctlVersion::Version3) {
diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h
index b06d2f103..b526a94fe 100644
--- a/src/core/hle/service/sm/sm.h
+++ b/src/core/hle/service/sm/sm.h
@@ -9,6 +9,7 @@
#include <type_traits>
#include <unordered_map>
+#include "common/concepts.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/server_port.h"
@@ -56,10 +57,8 @@ public:
ResultVal<std::shared_ptr<Kernel::ClientPort>> GetServicePort(const std::string& name);
ResultVal<std::shared_ptr<Kernel::ClientSession>> ConnectToService(const std::string& name);
- template <typename T>
+ template <Common::IsBaseOf<Kernel::SessionRequestHandler> T>
std::shared_ptr<T> GetService(const std::string& service_name) const {
- static_assert(std::is_base_of_v<Kernel::SessionRequestHandler, T>,
- "Not a base of ServiceFrameworkBase");
auto service = registered_services.find(service_name);
if (service == registered_services.end()) {
LOG_DEBUG(Service, "Can't find service: {}", service_name);
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 825d11a3f..480d34725 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -548,8 +548,8 @@ private:
// Wait the current thread until a buffer becomes available
ctx.SleepClientThread(
"IHOSBinderDriver::DequeueBuffer", UINT64_MAX,
- [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
- Kernel::ThreadWakeupReason reason) {
+ [=, this](std::shared_ptr<Kernel::Thread> thread,
+ Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) {
// Repeat TransactParcel DequeueBuffer when a buffer is available
const auto guard = nv_flinger->Lock();
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
@@ -1199,6 +1199,23 @@ private:
}
}
+ void GetIndirectLayerImageRequiredMemoryInfo(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto width = rp.Pop<u64>();
+ const auto height = rp.Pop<u64>();
+ LOG_DEBUG(Service_VI, "called width={}, height={}", width, height);
+
+ constexpr std::size_t base_size = 0x20000;
+ constexpr std::size_t alignment = 0x1000;
+ const auto texture_size = width * height * 4;
+ const auto out_size = (texture_size + base_size - 1) / base_size * base_size;
+
+ IPC::ResponseBuilder rb{ctx, 6};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push(out_size);
+ rb.Push(alignment);
+ }
+
static ResultVal<ConvertedScaleMode> ConvertScalingModeImpl(NintendoScaleMode mode) {
switch (mode) {
case NintendoScaleMode::None:
@@ -1243,7 +1260,8 @@ IApplicationDisplayService::IApplicationDisplayService(
{2102, &IApplicationDisplayService::ConvertScalingMode, "ConvertScalingMode"},
{2450, nullptr, "GetIndirectLayerImageMap"},
{2451, nullptr, "GetIndirectLayerImageCropMap"},
- {2460, nullptr, "GetIndirectLayerImageRequiredMemoryInfo"},
+ {2460, &IApplicationDisplayService::GetIndirectLayerImageRequiredMemoryInfo,
+ "GetIndirectLayerImageRequiredMemoryInfo"},
{5202, &IApplicationDisplayService::GetDisplayVsyncEvent, "GetDisplayVsyncEvent"},
{5203, nullptr, "GetDisplayVsyncEventForDebug"},
};
diff --git a/src/core/memory/dmnt_cheat_vm.cpp b/src/core/memory/dmnt_cheat_vm.cpp
index 2e7da23fe..48be80c12 100644
--- a/src/core/memory/dmnt_cheat_vm.cpp
+++ b/src/core/memory/dmnt_cheat_vm.cpp
@@ -313,30 +313,32 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
switch (opcode_type) {
case CheatVmOpcodeType::StoreStatic: {
- StoreStaticOpcode store_static{};
// 0TMR00AA AAAAAAAA YYYYYYYY (YYYYYYYY)
// Read additional words.
const u32 second_dword = GetNextDword();
- store_static.bit_width = (first_dword >> 24) & 0xF;
- store_static.mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF);
- store_static.offset_register = ((first_dword >> 16) & 0xF);
- store_static.rel_address =
- (static_cast<u64>(first_dword & 0xFF) << 32ul) | static_cast<u64>(second_dword);
- store_static.value = GetNextVmInt(store_static.bit_width);
- opcode.opcode = store_static;
+ const u32 bit_width = (first_dword >> 24) & 0xF;
+
+ opcode.opcode = StoreStaticOpcode{
+ .bit_width = bit_width,
+ .mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF),
+ .offset_register = (first_dword >> 16) & 0xF,
+ .rel_address = (static_cast<u64>(first_dword & 0xFF) << 32) | second_dword,
+ .value = GetNextVmInt(bit_width),
+ };
} break;
case CheatVmOpcodeType::BeginConditionalBlock: {
- BeginConditionalOpcode begin_cond{};
// 1TMC00AA AAAAAAAA YYYYYYYY (YYYYYYYY)
// Read additional words.
const u32 second_dword = GetNextDword();
- begin_cond.bit_width = (first_dword >> 24) & 0xF;
- begin_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF);
- begin_cond.cond_type = static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF);
- begin_cond.rel_address =
- (static_cast<u64>(first_dword & 0xFF) << 32ul) | static_cast<u64>(second_dword);
- begin_cond.value = GetNextVmInt(begin_cond.bit_width);
- opcode.opcode = begin_cond;
+ const u32 bit_width = (first_dword >> 24) & 0xF;
+
+ opcode.opcode = BeginConditionalOpcode{
+ .bit_width = bit_width,
+ .mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF),
+ .cond_type = static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF),
+ .rel_address = (static_cast<u64>(first_dword & 0xFF) << 32) | second_dword,
+ .value = GetNextVmInt(bit_width),
+ };
} break;
case CheatVmOpcodeType::EndConditionalBlock: {
// 20000000
@@ -344,12 +346,14 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
opcode.opcode = EndConditionalOpcode{};
} break;
case CheatVmOpcodeType::ControlLoop: {
- ControlLoopOpcode ctrl_loop{};
// 300R0000 VVVVVVVV
// 310R0000
// Parse register, whether loop start or loop end.
- ctrl_loop.start_loop = ((first_dword >> 24) & 0xF) == 0;
- ctrl_loop.reg_index = ((first_dword >> 20) & 0xF);
+ ControlLoopOpcode ctrl_loop{
+ .start_loop = ((first_dword >> 24) & 0xF) == 0,
+ .reg_index = (first_dword >> 20) & 0xF,
+ .num_iters = 0,
+ };
// Read number of iters if loop start.
if (ctrl_loop.start_loop) {
@@ -358,66 +362,65 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
opcode.opcode = ctrl_loop;
} break;
case CheatVmOpcodeType::LoadRegisterStatic: {
- LoadRegisterStaticOpcode ldr_static{};
// 400R0000 VVVVVVVV VVVVVVVV
// Read additional words.
- ldr_static.reg_index = ((first_dword >> 16) & 0xF);
- ldr_static.value =
- (static_cast<u64>(GetNextDword()) << 32ul) | static_cast<u64>(GetNextDword());
- opcode.opcode = ldr_static;
+ opcode.opcode = LoadRegisterStaticOpcode{
+ .reg_index = (first_dword >> 16) & 0xF,
+ .value = (static_cast<u64>(GetNextDword()) << 32) | GetNextDword(),
+ };
} break;
case CheatVmOpcodeType::LoadRegisterMemory: {
- LoadRegisterMemoryOpcode ldr_memory{};
// 5TMRI0AA AAAAAAAA
// Read additional words.
const u32 second_dword = GetNextDword();
- ldr_memory.bit_width = (first_dword >> 24) & 0xF;
- ldr_memory.mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF);
- ldr_memory.reg_index = ((first_dword >> 16) & 0xF);
- ldr_memory.load_from_reg = ((first_dword >> 12) & 0xF) != 0;
- ldr_memory.rel_address =
- (static_cast<u64>(first_dword & 0xFF) << 32ul) | static_cast<u64>(second_dword);
- opcode.opcode = ldr_memory;
+ opcode.opcode = LoadRegisterMemoryOpcode{
+ .bit_width = (first_dword >> 24) & 0xF,
+ .mem_type = static_cast<MemoryAccessType>((first_dword >> 20) & 0xF),
+ .reg_index = ((first_dword >> 16) & 0xF),
+ .load_from_reg = ((first_dword >> 12) & 0xF) != 0,
+ .rel_address = (static_cast<u64>(first_dword & 0xFF) << 32) | second_dword,
+ };
} break;
case CheatVmOpcodeType::StoreStaticToAddress: {
- StoreStaticToAddressOpcode str_static{};
// 6T0RIor0 VVVVVVVV VVVVVVVV
// Read additional words.
- str_static.bit_width = (first_dword >> 24) & 0xF;
- str_static.reg_index = ((first_dword >> 16) & 0xF);
- str_static.increment_reg = ((first_dword >> 12) & 0xF) != 0;
- str_static.add_offset_reg = ((first_dword >> 8) & 0xF) != 0;
- str_static.offset_reg_index = ((first_dword >> 4) & 0xF);
- str_static.value =
- (static_cast<u64>(GetNextDword()) << 32ul) | static_cast<u64>(GetNextDword());
- opcode.opcode = str_static;
+ opcode.opcode = StoreStaticToAddressOpcode{
+ .bit_width = (first_dword >> 24) & 0xF,
+ .reg_index = (first_dword >> 16) & 0xF,
+ .increment_reg = ((first_dword >> 12) & 0xF) != 0,
+ .add_offset_reg = ((first_dword >> 8) & 0xF) != 0,
+ .offset_reg_index = (first_dword >> 4) & 0xF,
+ .value = (static_cast<u64>(GetNextDword()) << 32) | GetNextDword(),
+ };
} break;
case CheatVmOpcodeType::PerformArithmeticStatic: {
- PerformArithmeticStaticOpcode perform_math_static{};
// 7T0RC000 VVVVVVVV
// Read additional words.
- perform_math_static.bit_width = (first_dword >> 24) & 0xF;
- perform_math_static.reg_index = ((first_dword >> 16) & 0xF);
- perform_math_static.math_type =
- static_cast<RegisterArithmeticType>((first_dword >> 12) & 0xF);
- perform_math_static.value = GetNextDword();
- opcode.opcode = perform_math_static;
+ opcode.opcode = PerformArithmeticStaticOpcode{
+ .bit_width = (first_dword >> 24) & 0xF,
+ .reg_index = ((first_dword >> 16) & 0xF),
+ .math_type = static_cast<RegisterArithmeticType>((first_dword >> 12) & 0xF),
+ .value = GetNextDword(),
+ };
} break;
case CheatVmOpcodeType::BeginKeypressConditionalBlock: {
- BeginKeypressConditionalOpcode begin_keypress_cond{};
// 8kkkkkkk
// Just parse the mask.
- begin_keypress_cond.key_mask = first_dword & 0x0FFFFFFF;
- opcode.opcode = begin_keypress_cond;
+ opcode.opcode = BeginKeypressConditionalOpcode{
+ .key_mask = first_dword & 0x0FFFFFFF,
+ };
} break;
case CheatVmOpcodeType::PerformArithmeticRegister: {
- PerformArithmeticRegisterOpcode perform_math_reg{};
// 9TCRSIs0 (VVVVVVVV (VVVVVVVV))
- perform_math_reg.bit_width = (first_dword >> 24) & 0xF;
- perform_math_reg.math_type = static_cast<RegisterArithmeticType>((first_dword >> 20) & 0xF);
- perform_math_reg.dst_reg_index = ((first_dword >> 16) & 0xF);
- perform_math_reg.src_reg_1_index = ((first_dword >> 12) & 0xF);
- perform_math_reg.has_immediate = ((first_dword >> 8) & 0xF) != 0;
+ PerformArithmeticRegisterOpcode perform_math_reg{
+ .bit_width = (first_dword >> 24) & 0xF,
+ .math_type = static_cast<RegisterArithmeticType>((first_dword >> 20) & 0xF),
+ .dst_reg_index = (first_dword >> 16) & 0xF,
+ .src_reg_1_index = (first_dword >> 12) & 0xF,
+ .src_reg_2_index = 0,
+ .has_immediate = ((first_dword >> 8) & 0xF) != 0,
+ .value = {},
+ };
if (perform_math_reg.has_immediate) {
perform_math_reg.src_reg_2_index = 0;
perform_math_reg.value = GetNextVmInt(perform_math_reg.bit_width);
@@ -427,7 +430,6 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
opcode.opcode = perform_math_reg;
} break;
case CheatVmOpcodeType::StoreRegisterToAddress: {
- StoreRegisterToAddressOpcode str_register{};
// ATSRIOxa (aaaaaaaa)
// A = opcode 10
// T = bit width
@@ -439,20 +441,23 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
// Relative Address
// x = offset register (for offset type 1), memory type (for offset type 3)
// a = relative address (for offset type 2+3)
- str_register.bit_width = (first_dword >> 24) & 0xF;
- str_register.str_reg_index = ((first_dword >> 20) & 0xF);
- str_register.addr_reg_index = ((first_dword >> 16) & 0xF);
- str_register.increment_reg = ((first_dword >> 12) & 0xF) != 0;
- str_register.ofs_type = static_cast<StoreRegisterOffsetType>(((first_dword >> 8) & 0xF));
- str_register.ofs_reg_index = ((first_dword >> 4) & 0xF);
+ StoreRegisterToAddressOpcode str_register{
+ .bit_width = (first_dword >> 24) & 0xF,
+ .str_reg_index = (first_dword >> 20) & 0xF,
+ .addr_reg_index = (first_dword >> 16) & 0xF,
+ .increment_reg = ((first_dword >> 12) & 0xF) != 0,
+ .ofs_type = static_cast<StoreRegisterOffsetType>(((first_dword >> 8) & 0xF)),
+ .mem_type = MemoryAccessType::MainNso,
+ .ofs_reg_index = (first_dword >> 4) & 0xF,
+ .rel_address = 0,
+ };
switch (str_register.ofs_type) {
case StoreRegisterOffsetType::None:
case StoreRegisterOffsetType::Reg:
// Nothing more to do
break;
case StoreRegisterOffsetType::Imm:
- str_register.rel_address =
- ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword()));
+ str_register.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword();
break;
case StoreRegisterOffsetType::MemReg:
str_register.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF);
@@ -460,8 +465,7 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
case StoreRegisterOffsetType::MemImm:
case StoreRegisterOffsetType::MemImmReg:
str_register.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF);
- str_register.rel_address =
- ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword()));
+ str_register.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword();
break;
default:
str_register.ofs_type = StoreRegisterOffsetType::None;
@@ -470,7 +474,6 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
opcode.opcode = str_register;
} break;
case CheatVmOpcodeType::BeginRegisterConditionalBlock: {
- BeginRegisterConditionalOpcode begin_reg_cond{};
// C0TcSX##
// C0TcS0Ma aaaaaaaa
// C0TcS1Mr
@@ -492,11 +495,19 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
// r = offset register.
// X = other register.
// V = value.
- begin_reg_cond.bit_width = (first_dword >> 20) & 0xF;
- begin_reg_cond.cond_type =
- static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF);
- begin_reg_cond.val_reg_index = ((first_dword >> 12) & 0xF);
- begin_reg_cond.comp_type = static_cast<CompareRegisterValueType>((first_dword >> 8) & 0xF);
+
+ BeginRegisterConditionalOpcode begin_reg_cond{
+ .bit_width = (first_dword >> 20) & 0xF,
+ .cond_type = static_cast<ConditionalComparisonType>((first_dword >> 16) & 0xF),
+ .val_reg_index = (first_dword >> 12) & 0xF,
+ .comp_type = static_cast<CompareRegisterValueType>((first_dword >> 8) & 0xF),
+ .mem_type = MemoryAccessType::MainNso,
+ .addr_reg_index = 0,
+ .other_reg_index = 0,
+ .ofs_reg_index = 0,
+ .rel_address = 0,
+ .value = {},
+ };
switch (begin_reg_cond.comp_type) {
case CompareRegisterValueType::StaticValue:
@@ -508,26 +519,25 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
case CompareRegisterValueType::MemoryRelAddr:
begin_reg_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF);
begin_reg_cond.rel_address =
- ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword()));
+ (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword();
break;
case CompareRegisterValueType::MemoryOfsReg:
begin_reg_cond.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF);
begin_reg_cond.ofs_reg_index = (first_dword & 0xF);
break;
case CompareRegisterValueType::RegisterRelAddr:
- begin_reg_cond.addr_reg_index = ((first_dword >> 4) & 0xF);
+ begin_reg_cond.addr_reg_index = (first_dword >> 4) & 0xF;
begin_reg_cond.rel_address =
- ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword()));
+ (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword();
break;
case CompareRegisterValueType::RegisterOfsReg:
- begin_reg_cond.addr_reg_index = ((first_dword >> 4) & 0xF);
- begin_reg_cond.ofs_reg_index = (first_dword & 0xF);
+ begin_reg_cond.addr_reg_index = (first_dword >> 4) & 0xF;
+ begin_reg_cond.ofs_reg_index = first_dword & 0xF;
break;
}
opcode.opcode = begin_reg_cond;
} break;
case CheatVmOpcodeType::SaveRestoreRegister: {
- SaveRestoreRegisterOpcode save_restore_reg{};
// C10D0Sx0
// C1 = opcode 0xC1
// D = destination index.
@@ -535,36 +545,37 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
// x = 3 if clearing reg, 2 if clearing saved value, 1 if saving a register, 0 if restoring
// a register.
// NOTE: If we add more save slots later, current encoding is backwards compatible.
- save_restore_reg.dst_index = (first_dword >> 16) & 0xF;
- save_restore_reg.src_index = (first_dword >> 8) & 0xF;
- save_restore_reg.op_type = static_cast<SaveRestoreRegisterOpType>((first_dword >> 4) & 0xF);
- opcode.opcode = save_restore_reg;
+ opcode.opcode = SaveRestoreRegisterOpcode{
+ .dst_index = (first_dword >> 16) & 0xF,
+ .src_index = (first_dword >> 8) & 0xF,
+ .op_type = static_cast<SaveRestoreRegisterOpType>((first_dword >> 4) & 0xF),
+ };
} break;
case CheatVmOpcodeType::SaveRestoreRegisterMask: {
- SaveRestoreRegisterMaskOpcode save_restore_regmask{};
// C2x0XXXX
// C2 = opcode 0xC2
// x = 3 if clearing reg, 2 if clearing saved value, 1 if saving, 0 if restoring.
// X = 16-bit bitmask, bit i --> save or restore register i.
- save_restore_regmask.op_type =
- static_cast<SaveRestoreRegisterOpType>((first_dword >> 20) & 0xF);
+ SaveRestoreRegisterMaskOpcode save_restore_regmask{
+ .op_type = static_cast<SaveRestoreRegisterOpType>((first_dword >> 20) & 0xF),
+ .should_operate = {},
+ };
for (std::size_t i = 0; i < NumRegisters; i++) {
- save_restore_regmask.should_operate[i] = (first_dword & (1u << i)) != 0;
+ save_restore_regmask.should_operate[i] = (first_dword & (1U << i)) != 0;
}
opcode.opcode = save_restore_regmask;
} break;
case CheatVmOpcodeType::ReadWriteStaticRegister: {
- ReadWriteStaticRegisterOpcode rw_static_reg{};
// C3000XXx
// C3 = opcode 0xC3.
// XX = static register index.
// x = register index.
- rw_static_reg.static_idx = ((first_dword >> 4) & 0xFF);
- rw_static_reg.idx = (first_dword & 0xF);
- opcode.opcode = rw_static_reg;
+ opcode.opcode = ReadWriteStaticRegisterOpcode{
+ .static_idx = (first_dword >> 4) & 0xFF,
+ .idx = first_dword & 0xF,
+ };
} break;
case CheatVmOpcodeType::DebugLog: {
- DebugLogOpcode debug_log{};
// FFFTIX##
// FFFTI0Ma aaaaaaaa
// FFFTI1Mr
@@ -583,31 +594,36 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
// a = relative address.
// r = offset register.
// X = value register.
- debug_log.bit_width = (first_dword >> 16) & 0xF;
- debug_log.log_id = ((first_dword >> 12) & 0xF);
- debug_log.val_type = static_cast<DebugLogValueType>((first_dword >> 8) & 0xF);
+ DebugLogOpcode debug_log{
+ .bit_width = (first_dword >> 16) & 0xF,
+ .log_id = (first_dword >> 12) & 0xF,
+ .val_type = static_cast<DebugLogValueType>((first_dword >> 8) & 0xF),
+ .mem_type = MemoryAccessType::MainNso,
+ .addr_reg_index = 0,
+ .val_reg_index = 0,
+ .ofs_reg_index = 0,
+ .rel_address = 0,
+ };
switch (debug_log.val_type) {
case DebugLogValueType::RegisterValue:
- debug_log.val_reg_index = ((first_dword >> 4) & 0xF);
+ debug_log.val_reg_index = (first_dword >> 4) & 0xF;
break;
case DebugLogValueType::MemoryRelAddr:
debug_log.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF);
- debug_log.rel_address =
- ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword()));
+ debug_log.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword();
break;
case DebugLogValueType::MemoryOfsReg:
debug_log.mem_type = static_cast<MemoryAccessType>((first_dword >> 4) & 0xF);
- debug_log.ofs_reg_index = (first_dword & 0xF);
+ debug_log.ofs_reg_index = first_dword & 0xF;
break;
case DebugLogValueType::RegisterRelAddr:
- debug_log.addr_reg_index = ((first_dword >> 4) & 0xF);
- debug_log.rel_address =
- ((static_cast<u64>(first_dword & 0xF) << 32ul) | static_cast<u64>(GetNextDword()));
+ debug_log.addr_reg_index = (first_dword >> 4) & 0xF;
+ debug_log.rel_address = (static_cast<u64>(first_dword & 0xF) << 32) | GetNextDword();
break;
case DebugLogValueType::RegisterOfsReg:
- debug_log.addr_reg_index = ((first_dword >> 4) & 0xF);
- debug_log.ofs_reg_index = (first_dword & 0xF);
+ debug_log.addr_reg_index = (first_dword >> 4) & 0xF;
+ debug_log.ofs_reg_index = first_dword & 0xF;
break;
}
opcode.opcode = debug_log;
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index 29339ead7..b899ac884 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -74,15 +74,16 @@ void PerfStats::EndGameFrame() {
game_frames += 1;
}
-double PerfStats::GetMeanFrametime() {
+double PerfStats::GetMeanFrametime() const {
std::lock_guard lock{object_mutex};
if (current_index <= IgnoreFrames) {
return 0;
}
+
const double sum = std::accumulate(perf_history.begin() + IgnoreFrames,
perf_history.begin() + current_index, 0.0);
- return sum / (current_index - IgnoreFrames);
+ return sum / static_cast<double>(current_index - IgnoreFrames);
}
PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
@@ -94,12 +95,13 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
const auto system_us_per_second = (current_system_time_us - reset_point_system_us) / interval;
- PerfStatsResults results{};
- results.system_fps = static_cast<double>(system_frames) / interval;
- results.game_fps = static_cast<double>(game_frames) / interval;
- results.frametime = duration_cast<DoubleSecs>(accumulated_frametime).count() /
- static_cast<double>(system_frames);
- results.emulation_speed = system_us_per_second.count() / 1'000'000.0;
+ const PerfStatsResults results{
+ .system_fps = static_cast<double>(system_frames) / interval,
+ .game_fps = static_cast<double>(game_frames) / interval,
+ .frametime = duration_cast<DoubleSecs>(accumulated_frametime).count() /
+ static_cast<double>(system_frames),
+ .emulation_speed = system_us_per_second.count() / 1'000'000.0,
+ };
// Reset counters
reset_point = now;
@@ -111,7 +113,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
return results;
}
-double PerfStats::GetLastFrameTimeScale() {
+double PerfStats::GetLastFrameTimeScale() const {
std::lock_guard lock{object_mutex};
constexpr double FRAME_LENGTH = 1.0 / 60;
diff --git a/src/core/perf_stats.h b/src/core/perf_stats.h
index d9a64f072..69256b960 100644
--- a/src/core/perf_stats.h
+++ b/src/core/perf_stats.h
@@ -30,7 +30,6 @@ struct PerfStatsResults {
class PerfStats {
public:
explicit PerfStats(u64 title_id);
-
~PerfStats();
using Clock = std::chrono::high_resolution_clock;
@@ -42,18 +41,18 @@ public:
PerfStatsResults GetAndResetStats(std::chrono::microseconds current_system_time_us);
/**
- * Returns the Arthimetic Mean of all frametime values stored in the performance history.
+ * Returns the arithmetic mean of all frametime values stored in the performance history.
*/
- double GetMeanFrametime();
+ double GetMeanFrametime() const;
/**
* Gets the ratio between walltime and the emulated time of the previous system frame. This is
* useful for scaling inputs or outputs moving between the two time domains.
*/
- double GetLastFrameTimeScale();
+ double GetLastFrameTimeScale() const;
private:
- std::mutex object_mutex{};
+ mutable std::mutex object_mutex;
/// Title ID for the game that is running. 0 if there is no game running yet
u64 title_id{0};
@@ -61,7 +60,7 @@ private:
std::size_t current_index{0};
/// Stores an hour of historical frametime data useful for processing and tracking performance
/// regressions with code changes.
- std::array<double, 216000> perf_history = {};
+ std::array<double, 216000> perf_history{};
/// Point when the cumulative counters were reset
Clock::time_point reset_point = Clock::now();
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 022b26e6d..b35459152 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -46,20 +46,16 @@ struct ScopeInit final {
Core::Timing::CoreTiming core_timing;
};
-#pragma optimize("", off)
-
u64 TestTimerSpeed(Core::Timing::CoreTiming& core_timing) {
- u64 start = core_timing.GetGlobalTimeNs().count();
- u64 placebo = 0;
+ const u64 start = core_timing.GetGlobalTimeNs().count();
+ volatile u64 placebo = 0;
for (std::size_t i = 0; i < 1000; i++) {
- placebo += core_timing.GetGlobalTimeNs().count();
+ placebo = placebo + core_timing.GetGlobalTimeNs().count();
}
- u64 end = core_timing.GetGlobalTimeNs().count();
- return (end - start);
+ const u64 end = core_timing.GetGlobalTimeNs().count();
+ return end - start;
}
-#pragma optimize("", on)
-
} // Anonymous namespace
TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 8e19c3373..512578c8b 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -81,7 +81,7 @@ void GPU::WaitFence(u32 syncpoint_id, u32 value) {
}
MICROPROFILE_SCOPE(GPU_wait);
std::unique_lock lock{sync_mutex};
- sync_cv.wait(lock, [=]() { return syncpoints[syncpoint_id].load() >= value; });
+ sync_cv.wait(lock, [=, this] { return syncpoints[syncpoint_id].load() >= value; });
}
void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index ff5505d12..844164645 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -4,7 +4,6 @@
#include "common/alignment.h"
#include "common/assert.h"
-#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/process.h"
@@ -16,121 +15,137 @@
namespace Tegra {
MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
- : rasterizer{rasterizer}, system{system} {
- page_table.Resize(address_space_width, page_bits, false);
-
- // Initialize the map with a single free region covering the entire managed space.
- VirtualMemoryArea initial_vma;
- initial_vma.size = address_space_end;
- vma_map.emplace(initial_vma.base, initial_vma);
-
- UpdatePageTableForVMA(initial_vma);
-}
+ : system{system}, rasterizer{rasterizer}, page_table(page_table_size) {}
MemoryManager::~MemoryManager() = default;
-GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) {
- const u64 aligned_size{Common::AlignUp(size, page_size)};
- const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
-
- AllocateMemory(gpu_addr, 0, aligned_size);
-
+GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
+ u64 remaining_size{size};
+ for (u64 offset{}; offset < size; offset += page_size) {
+ if (remaining_size < page_size) {
+ SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size);
+ } else {
+ SetPageEntry(gpu_addr + offset, page_entry + offset);
+ }
+ remaining_size -= page_size;
+ }
return gpu_addr;
}
-GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
- const u64 aligned_size{Common::AlignUp(size, page_size)};
-
- AllocateMemory(gpu_addr, 0, aligned_size);
+GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
+ return UpdateRange(gpu_addr, cpu_addr, size);
+}
- return gpu_addr;
+GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) {
+ return Map(cpu_addr, *FindFreeRange(size, align), size);
}
-GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
- const u64 aligned_size{Common::AlignUp(size, page_size)};
- const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
+void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
+ if (!size) {
+ return;
+ }
- MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
- ASSERT(
- system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
+ // Flush and invalidate through the GPU interface, to be asynchronous if possible.
+ system.GPU().FlushAndInvalidateRegion(*GpuToCpuAddress(gpu_addr), size);
- return gpu_addr;
+ UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
}
-GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
- ASSERT((gpu_addr & page_mask) == 0);
+std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
+ for (u64 offset{}; offset < size; offset += page_size) {
+ if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) {
+ return {};
+ }
+ }
- const u64 aligned_size{Common::AlignUp(size, page_size)};
+ return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
+}
- MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
- ASSERT(
- system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
- return gpu_addr;
+GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
+ return *AllocateFixed(*FindFreeRange(size, align), size);
}
-GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
- ASSERT((gpu_addr & page_mask) == 0);
+void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) {
+ if (!page_entry.IsValid()) {
+ return;
+ }
- const u64 aligned_size{Common::AlignUp(size, page_size)};
- const auto cpu_addr = GpuToCpuAddress(gpu_addr);
- ASSERT(cpu_addr);
+ ASSERT(system.CurrentProcess()
+ ->PageTable()
+ .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
+ .IsSuccess());
+}
- // Flush and invalidate through the GPU interface, to be asynchronous if possible.
- system.GPU().FlushAndInvalidateRegion(*cpu_addr, aligned_size);
+void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
+ if (!page_entry.IsValid()) {
+ return;
+ }
- UnmapRange(gpu_addr, aligned_size);
ASSERT(system.CurrentProcess()
->PageTable()
- .UnlockForDeviceAddressSpace(cpu_addr.value(), size)
+ .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
.IsSuccess());
-
- return gpu_addr;
}
-GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size) const {
- // Find the first Free VMA.
- const VMAHandle vma_handle{
- std::find_if(vma_map.begin(), vma_map.end(), [region_start, size](const auto& vma) {
- if (vma.second.type != VirtualMemoryArea::Type::Unmapped) {
- return false;
- }
+PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
+ return page_table[PageEntryIndex(gpu_addr)];
+}
- const VAddr vma_end{vma.second.base + vma.second.size};
- return vma_end > region_start && vma_end >= region_start + size;
- })};
+void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
+ // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to
+ // improper tracking, but should be fixed in the future.
- if (vma_handle == vma_map.end()) {
- return {};
- }
+ //// Unlock the old page
+ // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
- return std::max(region_start, vma_handle->second.base);
-}
+ //// Lock the new page
+ // TryLockPage(page_entry, size);
-bool MemoryManager::IsAddressValid(GPUVAddr addr) const {
- return (addr >> page_bits) < page_table.pointers.size();
+ page_table[PageEntryIndex(gpu_addr)] = page_entry;
}
-std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) const {
- if (!IsAddressValid(addr)) {
- return {};
+std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align) const {
+ if (!align) {
+ align = page_size;
+ } else {
+ align = Common::AlignUp(align, page_size);
}
- const VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]};
- if (cpu_addr) {
- return cpu_addr + (addr & page_mask);
+ u64 available_size{};
+ GPUVAddr gpu_addr{address_space_start};
+ while (gpu_addr + available_size < address_space_size) {
+ if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) {
+ available_size += page_size;
+
+ if (available_size >= size) {
+ return gpu_addr;
+ }
+ } else {
+ gpu_addr += available_size + page_size;
+ available_size = 0;
+
+ const auto remainder{gpu_addr % align};
+ if (remainder) {
+ gpu_addr = (gpu_addr - remainder) + align;
+ }
+ }
}
return {};
}
-template <typename T>
-T MemoryManager::Read(GPUVAddr addr) const {
- if (!IsAddressValid(addr)) {
+std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
+ const auto page_entry{GetPageEntry(gpu_addr)};
+ if (!page_entry.IsValid()) {
return {};
}
- const u8* page_pointer{GetPointer(addr)};
- if (page_pointer) {
+ return page_entry.ToAddress() + (gpu_addr & page_mask);
+}
+
+template <typename T>
+T MemoryManager::Read(GPUVAddr addr) const {
+ if (auto page_pointer{GetPointer(addr)}; page_pointer) {
// NOTE: Avoid adding any extra logic to this fast-path block
T value;
std::memcpy(&value, page_pointer, sizeof(T));
@@ -144,12 +159,7 @@ T MemoryManager::Read(GPUVAddr addr) const {
template <typename T>
void MemoryManager::Write(GPUVAddr addr, T data) {
- if (!IsAddressValid(addr)) {
- return;
- }
-
- u8* page_pointer{GetPointer(addr)};
- if (page_pointer) {
+ if (auto page_pointer{GetPointer(addr)}; page_pointer) {
// NOTE: Avoid adding any extra logic to this fast-path block
std::memcpy(page_pointer, &data, sizeof(T));
return;
@@ -167,66 +177,49 @@ template void MemoryManager::Write<u16>(GPUVAddr addr, u16 data);
template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
-u8* MemoryManager::GetPointer(GPUVAddr addr) {
- if (!IsAddressValid(addr)) {
+u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
+ if (!GetPageEntry(gpu_addr).IsValid()) {
return {};
}
- auto& memory = system.Memory();
-
- const VAddr page_addr{page_table.backing_addr[addr >> page_bits]};
-
- if (page_addr != 0) {
- return memory.GetPointer(page_addr + (addr & page_mask));
+ const auto address{GpuToCpuAddress(gpu_addr)};
+ if (!address) {
+ return {};
}
- LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr);
- return {};
+ return system.Memory().GetPointer(*address);
}
-const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
- if (!IsAddressValid(addr)) {
+const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
+ if (!GetPageEntry(gpu_addr).IsValid()) {
return {};
}
- const auto& memory = system.Memory();
-
- const VAddr page_addr{page_table.backing_addr[addr >> page_bits]};
-
- if (page_addr != 0) {
- return memory.GetPointer(page_addr + (addr & page_mask));
+ const auto address{GpuToCpuAddress(gpu_addr)};
+ if (!address) {
+ return {};
}
- LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr);
- return {};
-}
-
-bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const {
- const std::size_t inner_size = size - 1;
- const GPUVAddr end = start + inner_size;
- const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start));
- const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end));
- const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start);
- return range == inner_size;
+ return system.Memory().GetPointer(*address);
}
-void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer,
- const std::size_t size) const {
+void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
std::size_t remaining_size{size};
std::size_t page_index{gpu_src_addr >> page_bits};
std::size_t page_offset{gpu_src_addr & page_mask};
- auto& memory = system.Memory();
-
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const VAddr src_addr{page_table.backing_addr[page_index] + page_offset};
- // Flush must happen on the rasterizer interface, such that memory is always synchronous
- // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
- rasterizer.FlushRegion(src_addr, copy_amount);
- memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
+ if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
+ const auto src_addr{*page_addr + page_offset};
+
+ // Flush must happen on the rasterizer interface, such that memory is always synchronous
+ // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
+ rasterizer.FlushRegion(src_addr, copy_amount);
+ system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
+ }
page_index++;
page_offset = 0;
@@ -241,18 +234,17 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
std::size_t page_index{gpu_src_addr >> page_bits};
std::size_t page_offset{gpu_src_addr & page_mask};
- auto& memory = system.Memory();
-
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const u8* page_pointer = page_table.pointers[page_index];
- if (page_pointer) {
- const VAddr src_addr{page_table.backing_addr[page_index] + page_offset};
- memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
+
+ if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
+ const auto src_addr{*page_addr + page_offset};
+ system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
} else {
std::memset(dest_buffer, 0, copy_amount);
}
+
page_index++;
page_offset = 0;
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
@@ -260,23 +252,23 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
}
}
-void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer,
- const std::size_t size) {
+void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) {
std::size_t remaining_size{size};
std::size_t page_index{gpu_dest_addr >> page_bits};
std::size_t page_offset{gpu_dest_addr & page_mask};
- auto& memory = system.Memory();
-
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset};
- // Invalidate must happen on the rasterizer interface, such that memory is always
- // synchronous when it is written (even when in asynchronous GPU mode).
- rasterizer.InvalidateRegion(dest_addr, copy_amount);
- memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
+ if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
+ const auto dest_addr{*page_addr + page_offset};
+
+ // Invalidate must happen on the rasterizer interface, such that memory is always
+ // synchronous when it is written (even when in asynchronous GPU mode).
+ rasterizer.InvalidateRegion(dest_addr, copy_amount);
+ system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
+ }
page_index++;
page_offset = 0;
@@ -286,21 +278,20 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer,
}
void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
- const std::size_t size) {
+ std::size_t size) {
std::size_t remaining_size{size};
std::size_t page_index{gpu_dest_addr >> page_bits};
std::size_t page_offset{gpu_dest_addr & page_mask};
- auto& memory = system.Memory();
-
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- u8* page_pointer = page_table.pointers[page_index];
- if (page_pointer) {
- const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset};
- memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
+
+ if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
+ const auto dest_addr{*page_addr + page_offset};
+ system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
}
+
page_index++;
page_offset = 0;
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
@@ -308,273 +299,26 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buf
}
}
-void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr,
- const std::size_t size) {
+void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) {
std::vector<u8> tmp_buffer(size);
ReadBlock(gpu_src_addr, tmp_buffer.data(), size);
WriteBlock(gpu_dest_addr, tmp_buffer.data(), size);
}
void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr,
- const std::size_t size) {
+ std::size_t size) {
std::vector<u8> tmp_buffer(size);
ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size);
WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size);
}
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) {
- const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits];
- const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size;
- return page <= Core::Memory::PAGE_SIZE;
-}
-
-void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
- VAddr backing_addr) {
- LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size,
- (base + size) * page_size);
-
- const VAddr end{base + size};
- ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
- base + page_table.pointers.size());
-
- if (memory == nullptr) {
- while (base != end) {
- page_table.pointers[base] = nullptr;
- page_table.backing_addr[base] = 0;
-
- base += 1;
- }
- } else {
- while (base != end) {
- page_table.pointers[base] = memory;
- page_table.backing_addr[base] = backing_addr;
-
- base += 1;
- memory += page_size;
- backing_addr += page_size;
- }
- }
-}
-
-void MemoryManager::MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr) {
- ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base);
- MapPages(base / page_size, size / page_size, target, Common::PageType::Memory, backing_addr);
-}
-
-void MemoryManager::UnmapRegion(GPUVAddr base, u64 size) {
- ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: {:016X}", base);
- MapPages(base / page_size, size / page_size, nullptr, Common::PageType::Unmapped);
-}
-
-bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
- ASSERT(base + size == next.base);
- if (type != next.type) {
- return {};
- }
- if (type == VirtualMemoryArea::Type::Allocated && (offset + size != next.offset)) {
- return {};
- }
- if (type == VirtualMemoryArea::Type::Mapped && backing_memory + size != next.backing_memory) {
- return {};
- }
- return true;
-}
-
-MemoryManager::VMAHandle MemoryManager::FindVMA(GPUVAddr target) const {
- if (target >= address_space_end) {
- return vma_map.end();
- } else {
- return std::prev(vma_map.upper_bound(target));
- }
-}
-
-MemoryManager::VMAIter MemoryManager::Allocate(VMAIter vma_handle) {
- VirtualMemoryArea& vma{vma_handle->second};
-
- vma.type = VirtualMemoryArea::Type::Allocated;
- vma.backing_addr = 0;
- vma.backing_memory = {};
- UpdatePageTableForVMA(vma);
-
- return MergeAdjacent(vma_handle);
-}
-
-MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset,
- u64 size) {
-
- // This is the appropriately sized VMA that will turn into our allocation.
- VMAIter vma_handle{CarveVMA(target, size)};
- VirtualMemoryArea& vma{vma_handle->second};
-
- ASSERT(vma.size == size);
-
- vma.offset = offset;
-
- return Allocate(vma_handle);
-}
-
-MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size,
- VAddr backing_addr) {
- // This is the appropriately sized VMA that will turn into our allocation.
- VMAIter vma_handle{CarveVMA(target, size)};
- VirtualMemoryArea& vma{vma_handle->second};
-
- ASSERT(vma.size == size);
-
- vma.type = VirtualMemoryArea::Type::Mapped;
- vma.backing_memory = memory;
- vma.backing_addr = backing_addr;
- UpdatePageTableForVMA(vma);
-
- return MergeAdjacent(vma_handle);
-}
-
-void MemoryManager::UnmapRange(GPUVAddr target, u64 size) {
- VMAIter vma{CarveVMARange(target, size)};
- const VAddr target_end{target + size};
- const VMAIter end{vma_map.end()};
-
- // The comparison against the end of the range must be done using addresses since VMAs can be
- // merged during this process, causing invalidation of the iterators.
- while (vma != end && vma->second.base < target_end) {
- // Unmapped ranges return to allocated state and can be reused
- // This behavior is used by Super Mario Odyssey, Sonic Forces, and likely other games
- vma = std::next(Allocate(vma));
- }
-
- ASSERT(FindVMA(target)->second.size >= size);
-}
-
-MemoryManager::VMAIter MemoryManager::StripIterConstness(const VMAHandle& iter) {
- // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
- // non-const access to its container.
- return vma_map.erase(iter, iter); // Erases an empty range of elements
-}
-
-MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) {
- ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size);
- ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: 0x{:016X}", base);
-
- VMAIter vma_handle{StripIterConstness(FindVMA(base))};
- if (vma_handle == vma_map.end()) {
- // Target address is outside the managed range
- return {};
- }
-
- const VirtualMemoryArea& vma{vma_handle->second};
- if (vma.type == VirtualMemoryArea::Type::Mapped) {
- // Region is already allocated
- return vma_handle;
- }
-
- const VAddr start_in_vma{base - vma.base};
- const VAddr end_in_vma{start_in_vma + size};
-
- ASSERT_MSG(end_in_vma <= vma.size, "region size 0x{:016X} is less than required size 0x{:016X}",
- vma.size, end_in_vma);
-
- if (end_in_vma < vma.size) {
- // Split VMA at the end of the allocated region
- SplitVMA(vma_handle, end_in_vma);
- }
- if (start_in_vma != 0) {
- // Split VMA at the start of the allocated region
- vma_handle = SplitVMA(vma_handle, start_in_vma);
- }
-
- return vma_handle;
-}
-
-MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) {
- ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size);
- ASSERT_MSG((target & page_mask) == 0, "non-page aligned base: 0x{:016X}", target);
-
- const VAddr target_end{target + size};
- ASSERT(target_end >= target);
- ASSERT(size > 0);
-
- VMAIter begin_vma{StripIterConstness(FindVMA(target))};
- const VMAIter i_end{vma_map.lower_bound(target_end)};
- if (std::any_of(begin_vma, i_end, [](const auto& entry) {
- return entry.second.type == VirtualMemoryArea::Type::Unmapped;
- })) {
+ const auto cpu_addr{GpuToCpuAddress(gpu_addr)};
+ if (!cpu_addr) {
return {};
}
-
- if (target != begin_vma->second.base) {
- begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
- }
-
- VMAIter end_vma{StripIterConstness(FindVMA(target_end))};
- if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
- end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
- }
-
- return begin_vma;
-}
-
-MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
- VirtualMemoryArea& old_vma{vma_handle->second};
- VirtualMemoryArea new_vma{old_vma}; // Make a copy of the VMA
-
- // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
- // a bug. This restriction might be removed later.
- ASSERT(offset_in_vma < old_vma.size);
- ASSERT(offset_in_vma > 0);
-
- old_vma.size = offset_in_vma;
- new_vma.base += offset_in_vma;
- new_vma.size -= offset_in_vma;
-
- switch (new_vma.type) {
- case VirtualMemoryArea::Type::Unmapped:
- break;
- case VirtualMemoryArea::Type::Allocated:
- new_vma.offset += offset_in_vma;
- break;
- case VirtualMemoryArea::Type::Mapped:
- new_vma.backing_memory += offset_in_vma;
- break;
- }
-
- ASSERT(old_vma.CanBeMergedWith(new_vma));
-
- return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
-}
-
-MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) {
- const VMAIter next_vma{std::next(iter)};
- if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
- iter->second.size += next_vma->second.size;
- vma_map.erase(next_vma);
- }
-
- if (iter != vma_map.begin()) {
- VMAIter prev_vma{std::prev(iter)};
- if (prev_vma->second.CanBeMergedWith(iter->second)) {
- prev_vma->second.size += iter->second.size;
- vma_map.erase(iter);
- iter = prev_vma;
- }
- }
-
- return iter;
-}
-
-void MemoryManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
- switch (vma.type) {
- case VirtualMemoryArea::Type::Unmapped:
- UnmapRegion(vma.base, vma.size);
- break;
- case VirtualMemoryArea::Type::Allocated:
- MapMemoryRegion(vma.base, vma.size, nullptr, vma.backing_addr);
- break;
- case VirtualMemoryArea::Type::Mapped:
- MapMemoryRegion(vma.base, vma.size, vma.backing_memory, vma.backing_addr);
- break;
- }
+ const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size};
+ return page <= Core::Memory::PAGE_SIZE;
}
} // namespace Tegra
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 87658e87a..681bd9588 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -6,9 +6,9 @@
#include <map>
#include <optional>
+#include <vector>
#include "common/common_types.h"
-#include "common/page_table.h"
namespace VideoCore {
class RasterizerInterface;
@@ -20,45 +20,57 @@ class System;
namespace Tegra {
-/**
- * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
- * with homogeneous attributes across its extents. In this particular implementation each VMA is
- * also backed by a single host memory allocation.
- */
-struct VirtualMemoryArea {
- enum class Type : u8 {
- Unmapped,
- Allocated,
- Mapped,
+class PageEntry final {
+public:
+ enum class State : u32 {
+ Unmapped = static_cast<u32>(-1),
+ Allocated = static_cast<u32>(-2),
};
- /// Virtual base address of the region.
- GPUVAddr base{};
- /// Size of the region.
- u64 size{};
- /// Memory area mapping type.
- Type type{Type::Unmapped};
- /// CPU memory mapped address corresponding to this memory area.
- VAddr backing_addr{};
- /// Offset into the backing_memory the mapping starts from.
- std::size_t offset{};
- /// Pointer backing this VMA.
- u8* backing_memory{};
-
- /// Tests if this area can be merged to the right with `next`.
- bool CanBeMergedWith(const VirtualMemoryArea& next) const;
+ constexpr PageEntry() = default;
+ constexpr PageEntry(State state) : state{state} {}
+ constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
+
+ constexpr bool IsUnmapped() const {
+ return state == State::Unmapped;
+ }
+
+ constexpr bool IsAllocated() const {
+ return state == State::Allocated;
+ }
+
+ constexpr bool IsValid() const {
+ return !IsUnmapped() && !IsAllocated();
+ }
+
+ constexpr VAddr ToAddress() const {
+ if (!IsValid()) {
+ return {};
+ }
+
+ return static_cast<VAddr>(state) << ShiftBits;
+ }
+
+ constexpr PageEntry operator+(u64 offset) {
+ // If this is a reserved value, offsets do not apply
+ if (!IsValid()) {
+ return *this;
+ }
+ return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
+ }
+
+private:
+ static constexpr std::size_t ShiftBits{12};
+
+ State state{State::Unmapped};
};
+static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
class MemoryManager final {
public:
explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer);
~MemoryManager();
- GPUVAddr AllocateSpace(u64 size, u64 align);
- GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align);
- GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
- GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr addr, u64 size);
- GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size);
std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr) const;
template <typename T>
@@ -70,9 +82,6 @@ public:
u8* GetPointer(GPUVAddr addr);
const u8* GetPointer(GPUVAddr addr) const;
- /// Returns true if the block is continuous in host memory, false otherwise
- bool IsBlockContinuous(GPUVAddr start, std::size_t size) const;
-
/**
* ReadBlock and WriteBlock are full read and write operations over virtual
* GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -98,92 +107,43 @@ public:
void CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size);
/**
- * IsGranularRange checks if a gpu region can be simply read with a pointer
+ * IsGranularRange checks if a gpu region can be simply read with a pointer.
*/
bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size);
-private:
- using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
- using VMAHandle = VMAMap::const_iterator;
- using VMAIter = VMAMap::iterator;
-
- bool IsAddressValid(GPUVAddr addr) const;
- void MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
- VAddr backing_addr = 0);
- void MapMemoryRegion(GPUVAddr base, u64 size, u8* target, VAddr backing_addr);
- void UnmapRegion(GPUVAddr base, u64 size);
-
- /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
- VMAHandle FindVMA(GPUVAddr target) const;
-
- VMAHandle AllocateMemory(GPUVAddr target, std::size_t offset, u64 size);
-
- /**
- * Maps an unmanaged host memory pointer at a given address.
- *
- * @param target The guest address to start the mapping at.
- * @param memory The memory to be mapped.
- * @param size Size of the mapping in bytes.
- * @param backing_addr The base address of the range to back this mapping.
- */
- VMAHandle MapBackingMemory(GPUVAddr target, u8* memory, u64 size, VAddr backing_addr);
-
- /// Unmaps a range of addresses, splitting VMAs as necessary.
- void UnmapRange(GPUVAddr target, u64 size);
-
- /// Converts a VMAHandle to a mutable VMAIter.
- VMAIter StripIterConstness(const VMAHandle& iter);
-
- /// Marks as the specified VMA as allocated.
- VMAIter Allocate(VMAIter vma);
-
- /**
- * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
- * the appropriate error checking.
- */
- VMAIter CarveVMA(GPUVAddr base, u64 size);
-
- /**
- * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
- * end of the range.
- */
- VMAIter CarveVMARange(GPUVAddr base, u64 size);
-
- /**
- * Splits a VMA in two, at the specified offset.
- * @returns the right side of the split, with the original iterator becoming the left side.
- */
- VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
+ GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size);
+ GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align);
+ std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size);
+ GPUVAddr Allocate(std::size_t size, std::size_t align);
+ void Unmap(GPUVAddr gpu_addr, std::size_t size);
- /**
- * Checks for and merges the specified VMA with adjacent ones if possible.
- * @returns the merged VMA or the original if no merging was possible.
- */
- VMAIter MergeAdjacent(VMAIter vma);
+private:
+ PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
+ void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
+ GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size);
+ std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align) const;
- /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
- void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
+ void TryLockPage(PageEntry page_entry, std::size_t size);
+ void TryUnlockPage(PageEntry page_entry, std::size_t size);
- /// Finds a free (unmapped region) of the specified size starting at the specified address.
- GPUVAddr FindFreeRegion(GPUVAddr region_start, u64 size) const;
+ static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) {
+ return (gpu_addr >> page_bits) & page_table_mask;
+ }
-private:
+ static constexpr u64 address_space_size = 1ULL << 40;
+ static constexpr u64 address_space_start = 1ULL << 32;
static constexpr u64 page_bits{16};
static constexpr u64 page_size{1 << page_bits};
static constexpr u64 page_mask{page_size - 1};
+ static constexpr u64 page_table_bits{24};
+ static constexpr u64 page_table_size{1 << page_table_bits};
+ static constexpr u64 page_table_mask{page_table_size - 1};
- /// Address space in bits, according to Tegra X1 TRM
- static constexpr u32 address_space_width{40};
- /// Start address for mapping, this is fairly arbitrary but must be non-zero.
- static constexpr GPUVAddr address_space_base{0x100000};
- /// End of address space, based on address space in bits.
- static constexpr GPUVAddr address_space_end{1ULL << address_space_width};
+ Core::System& system;
- Common::PageTable page_table;
- VMAMap vma_map;
VideoCore::RasterizerInterface& rasterizer;
- Core::System& system;
+ std::vector<PageEntry> page_table;
};
} // namespace Tegra
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 03e82c599..cb284db77 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -178,16 +178,11 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind
if (device.UseAsynchronousShaders()) {
// Max worker threads we should allow
- constexpr auto MAX_THREADS = 2u;
- // Amount of threads we should reserve for other parts of yuzu
- constexpr auto RESERVED_THREADS = 6u;
- // Get the amount of threads we can use(this can return zero)
- const auto cpu_thread_count =
- std::max(RESERVED_THREADS, std::thread::hardware_concurrency());
- // Deduce how many "extra" threads we have to use.
- const auto max_threads_unused = cpu_thread_count - RESERVED_THREADS;
+ constexpr u32 MAX_THREADS = 4;
+ // Deduce how many threads we can use
+ const u32 threads_used = std::thread::hardware_concurrency() / 4;
// Always allow at least 1 thread regardless of our settings
- const auto max_worker_count = std::max(1u, max_threads_unused);
+ const auto max_worker_count = std::max(1U, threads_used);
// Don't use more than MAX_THREADS
const auto worker_count = std::min(max_worker_count, MAX_THREADS);
async_shaders.AllocateWorkers(worker_count);
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index ce53e5a6b..a551e3de8 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -696,6 +696,7 @@ void VKBlitScreen::CreateFramebuffers() {
.flags = 0,
.renderPass = *renderpass,
.attachmentCount = 1,
+ .pAttachments = nullptr,
.width = size.width,
.height = size.height,
.layers = 1,
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 6245e0d78..0c03e4d83 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -771,8 +771,9 @@ std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const
.pNext = nullptr,
.flags = 0,
.queueFamilyIndex = queue_family,
+ .queueCount = 1,
+ .pQueuePriorities = nullptr,
});
- ci.queueCount = 1;
ci.pQueuePriorities = &QUEUE_PRIORITY;
}
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 42b3a744c..418c62bc4 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -261,8 +261,13 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
}
const Specialization specialization{
+ .base_binding = 0,
.workgroup_size = key.workgroup_size,
.shared_memory_size = key.shared_memory_size,
+ .point_size = std::nullopt,
+ .enabled_attributes = {},
+ .attribute_types = {},
+ .ndc_minus_one_to_one = false,
};
const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
shader->GetRegistry(), specialization),
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 2ed2004f0..7500e8244 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -815,8 +815,13 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen
std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
VkRenderPass renderpass) {
- FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
- std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
+ FramebufferCacheKey key{
+ .renderpass = renderpass,
+ .width = std::numeric_limits<u32>::max(),
+ .height = std::numeric_limits<u32>::max(),
+ .layers = std::numeric_limits<u32>::max(),
+ .views = {},
+ };
const auto try_push = [&key](const View& view) {
if (!view) {
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 2d5460776..b068888f9 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -47,6 +47,7 @@ vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) c
VkSamplerCustomBorderColorCreateInfoEXT border{
.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT,
.pNext = nullptr,
+ .customBorderColor = {},
.format = VK_FORMAT_UNDEFINED,
};
std::memcpy(&border.customBorderColor, color.data(), sizeof(color));
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index efd4bb13b..2c6f54101 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -473,6 +473,8 @@ VkImageView CachedSurfaceView::GetAttachment() {
.aspectMask = aspect_mask,
.baseMipLevel = base_level,
.levelCount = num_levels,
+ .baseArrayLayer = 0,
+ .layerCount = 0,
},
};
if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 5738787ac..8fc322b30 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -567,7 +567,7 @@ void GRenderWindow::CaptureScreenshot(u32 res_scale, const QString& screenshot_p
screenshot_image = QImage(QSize(layout.width, layout.height), QImage::Format_RGB32);
renderer.RequestScreenshot(
screenshot_image.bits(),
- [=] {
+ [=, this] {
const std::string std_screenshot_path = screenshot_path.toStdString();
if (screenshot_image.mirrored(false, true).save(screenshot_path)) {
LOG_INFO(Frontend, "Screenshot saved to \"{}\"", std_screenshot_path);
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index 00433926d..b1850bc95 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -280,9 +280,9 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
}
button->setContextMenuPolicy(Qt::CustomContextMenu);
- connect(button, &QPushButton::clicked, [=] {
+ connect(button, &QPushButton::clicked, [=, this] {
HandleClick(button_map[button_id],
- [=](Common::ParamPackage params) {
+ [=, this](Common::ParamPackage params) {
// Workaround for ZL & ZR for analog triggers like on XBOX controllors.
// Analog triggers (from controllers like the XBOX controller) would not
// work due to a different range of their signals (from 0 to 255 on
@@ -300,19 +300,20 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
},
InputCommon::Polling::DeviceType::Button);
});
- connect(button, &QPushButton::customContextMenuRequested, [=](const QPoint& menu_location) {
- QMenu context_menu;
- context_menu.addAction(tr("Clear"), [&] {
- buttons_param[button_id].Clear();
- button_map[button_id]->setText(tr("[not set]"));
- });
- context_menu.addAction(tr("Restore Default"), [&] {
- buttons_param[button_id] = Common::ParamPackage{
- InputCommon::GenerateKeyboardParam(Config::default_buttons[button_id])};
- button_map[button_id]->setText(ButtonToText(buttons_param[button_id]));
- });
- context_menu.exec(button_map[button_id]->mapToGlobal(menu_location));
- });
+ connect(button, &QPushButton::customContextMenuRequested,
+ [=, this](const QPoint& menu_location) {
+ QMenu context_menu;
+ context_menu.addAction(tr("Clear"), [&] {
+ buttons_param[button_id].Clear();
+ button_map[button_id]->setText(tr("[not set]"));
+ });
+ context_menu.addAction(tr("Restore Default"), [&] {
+ buttons_param[button_id] = Common::ParamPackage{
+ InputCommon::GenerateKeyboardParam(Config::default_buttons[button_id])};
+ button_map[button_id]->setText(ButtonToText(buttons_param[button_id]));
+ });
+ context_menu.exec(button_map[button_id]->mapToGlobal(menu_location));
+ });
}
for (int analog_id = 0; analog_id < Settings::NativeAnalog::NumAnalogs; analog_id++) {
@@ -323,16 +324,16 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
}
analog_button->setContextMenuPolicy(Qt::CustomContextMenu);
- connect(analog_button, &QPushButton::clicked, [=]() {
+ connect(analog_button, &QPushButton::clicked, [=, this] {
HandleClick(analog_map_buttons[analog_id][sub_button_id],
- [=](const Common::ParamPackage& params) {
+ [=, this](const Common::ParamPackage& params) {
SetAnalogButton(params, analogs_param[analog_id],
analog_sub_buttons[sub_button_id]);
},
InputCommon::Polling::DeviceType::Button);
});
connect(analog_button, &QPushButton::customContextMenuRequested,
- [=](const QPoint& menu_location) {
+ [=, this](const QPoint& menu_location) {
QMenu context_menu;
context_menu.addAction(tr("Clear"), [&] {
analogs_param[analog_id].Erase(analog_sub_buttons[sub_button_id]);
@@ -350,32 +351,35 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
menu_location));
});
}
- connect(analog_map_stick[analog_id], &QPushButton::clicked, [=] {
+ connect(analog_map_stick[analog_id], &QPushButton::clicked, [=, this] {
if (QMessageBox::information(
this, tr("Information"),
tr("After pressing OK, first move your joystick horizontally, "
"and then vertically."),
QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Ok) {
- HandleClick(
- analog_map_stick[analog_id],
- [=](const Common::ParamPackage& params) { analogs_param[analog_id] = params; },
- InputCommon::Polling::DeviceType::Analog);
+ HandleClick(analog_map_stick[analog_id],
+ [=, this](const Common::ParamPackage& params) {
+ analogs_param[analog_id] = params;
+ },
+ InputCommon::Polling::DeviceType::Analog);
}
});
- connect(analog_map_deadzone_and_modifier_slider[analog_id], &QSlider::valueChanged, [=] {
- const float slider_value = analog_map_deadzone_and_modifier_slider[analog_id]->value();
- if (analogs_param[analog_id].Get("engine", "") == "sdl" ||
- analogs_param[analog_id].Get("engine", "") == "gcpad") {
- analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
- tr("Deadzone: %1%").arg(slider_value));
- analogs_param[analog_id].Set("deadzone", slider_value / 100.0f);
- } else {
- analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
- tr("Modifier Scale: %1%").arg(slider_value));
- analogs_param[analog_id].Set("modifier_scale", slider_value / 100.0f);
- }
- });
+ connect(analog_map_deadzone_and_modifier_slider[analog_id], &QSlider::valueChanged,
+ [=, this] {
+ const float slider_value =
+ analog_map_deadzone_and_modifier_slider[analog_id]->value();
+ if (analogs_param[analog_id].Get("engine", "") == "sdl" ||
+ analogs_param[analog_id].Get("engine", "") == "gcpad") {
+ analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
+ tr("Deadzone: %1%").arg(slider_value));
+ analogs_param[analog_id].Set("deadzone", slider_value / 100.0f);
+ } else {
+ analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
+ tr("Modifier Scale: %1%").arg(slider_value));
+ analogs_param[analog_id].Set("modifier_scale", slider_value / 100.0f);
+ }
+ });
}
connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); });
diff --git a/src/yuzu/configuration/configure_mouse_advanced.cpp b/src/yuzu/configuration/configure_mouse_advanced.cpp
index e0647ea5b..ea2549363 100644
--- a/src/yuzu/configuration/configure_mouse_advanced.cpp
+++ b/src/yuzu/configuration/configure_mouse_advanced.cpp
@@ -83,25 +83,28 @@ ConfigureMouseAdvanced::ConfigureMouseAdvanced(QWidget* parent)
}
button->setContextMenuPolicy(Qt::CustomContextMenu);
- connect(button, &QPushButton::clicked, [=] {
- HandleClick(
- button_map[button_id],
- [=](const Common::ParamPackage& params) { buttons_param[button_id] = params; },
- InputCommon::Polling::DeviceType::Button);
- });
- connect(button, &QPushButton::customContextMenuRequested, [=](const QPoint& menu_location) {
- QMenu context_menu;
- context_menu.addAction(tr("Clear"), [&] {
- buttons_param[button_id].Clear();
- button_map[button_id]->setText(tr("[not set]"));
- });
- context_menu.addAction(tr("Restore Default"), [&] {
- buttons_param[button_id] = Common::ParamPackage{
- InputCommon::GenerateKeyboardParam(Config::default_mouse_buttons[button_id])};
- button_map[button_id]->setText(ButtonToText(buttons_param[button_id]));
- });
- context_menu.exec(button_map[button_id]->mapToGlobal(menu_location));
+ connect(button, &QPushButton::clicked, [=, this] {
+ HandleClick(button_map[button_id],
+ [=, this](const Common::ParamPackage& params) {
+ buttons_param[button_id] = params;
+ },
+ InputCommon::Polling::DeviceType::Button);
});
+ connect(button, &QPushButton::customContextMenuRequested,
+ [=, this](const QPoint& menu_location) {
+ QMenu context_menu;
+ context_menu.addAction(tr("Clear"), [&] {
+ buttons_param[button_id].Clear();
+ button_map[button_id]->setText(tr("[not set]"));
+ });
+ context_menu.addAction(tr("Restore Default"), [&] {
+ buttons_param[button_id] =
+ Common::ParamPackage{InputCommon::GenerateKeyboardParam(
+ Config::default_mouse_buttons[button_id])};
+ button_map[button_id]->setText(ButtonToText(buttons_param[button_id]));
+ });
+ context_menu.exec(button_map[button_id]->mapToGlobal(menu_location));
+ });
}
connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); });
diff --git a/src/yuzu/configuration/configure_ui.cpp b/src/yuzu/configuration/configure_ui.cpp
index 91c21c572..2c20b68d0 100644
--- a/src/yuzu/configuration/configure_ui.cpp
+++ b/src/yuzu/configuration/configure_ui.cpp
@@ -54,9 +54,9 @@ ConfigureUi::ConfigureUi(QWidget* parent) : QWidget(parent), ui(new Ui::Configur
// Update text ComboBoxes after user interaction.
connect(ui->row_1_text_combobox, QOverload<int>::of(&QComboBox::activated),
- [=]() { ConfigureUi::UpdateSecondRowComboBox(); });
+ [this] { ConfigureUi::UpdateSecondRowComboBox(); });
connect(ui->row_2_text_combobox, QOverload<int>::of(&QComboBox::activated),
- [=]() { ConfigureUi::UpdateFirstRowComboBox(); });
+ [this] { ConfigureUi::UpdateFirstRowComboBox(); });
// Set screenshot path to user specification.
connect(ui->screenshot_path_button, &QToolButton::pressed, this, [this] {
diff --git a/src/yuzu/game_list_worker.cpp b/src/yuzu/game_list_worker.cpp
index 2018150db..239016b94 100644
--- a/src/yuzu/game_list_worker.cpp
+++ b/src/yuzu/game_list_worker.cpp
@@ -369,8 +369,8 @@ void GameListWorker::run() {
auto* const game_list_dir = new GameListDir(game_dir);
emit DirEntryReady(game_list_dir);
provider->ClearAllEntries();
- ScanFileSystem(ScanTarget::FillManualContentProvider, game_dir.path.toStdString(), 2,
- game_list_dir);
+ ScanFileSystem(ScanTarget::FillManualContentProvider, game_dir.path.toStdString(),
+ game_dir.deep_scan ? 256 : 0, game_list_dir);
ScanFileSystem(ScanTarget::PopulateGameList, game_dir.path.toStdString(),
game_dir.deep_scan ? 256 : 0, game_list_dir);
}
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index e26cec78c..592993c36 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -583,7 +583,7 @@ void GMainWindow::InitializeWidgets() {
renderer_status_button->setObjectName(QStringLiteral("RendererStatusBarButton"));
renderer_status_button->setCheckable(true);
renderer_status_button->setFocusPolicy(Qt::NoFocus);
- connect(renderer_status_button, &QPushButton::toggled, [=](bool checked) {
+ connect(renderer_status_button, &QPushButton::toggled, [this](bool checked) {
renderer_status_button->setText(checked ? tr("VULKAN") : tr("OPENGL"));
});
renderer_status_button->toggle();
@@ -595,7 +595,7 @@ void GMainWindow::InitializeWidgets() {
#else
renderer_status_button->setChecked(Settings::values.renderer_backend.GetValue() ==
Settings::RendererBackend::Vulkan);
- connect(renderer_status_button, &QPushButton::clicked, [=] {
+ connect(renderer_status_button, &QPushButton::clicked, [this] {
if (emulation_running) {
return;
}