summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x[-rw-r--r--].ci/scripts/linux/docker.sh2
-rwxr-xr-x[-rw-r--r--].ci/scripts/windows/docker.sh10
-rw-r--r--CMakeModules/MinGWCross.cmake4
m---------externals/cubeb0
m---------externals/dynarmic0
-rw-r--r--externals/microprofile/microprofile.h20
-rw-r--r--externals/microprofile/microprofileui.h108
-rw-r--r--externals/opus/CMakeLists.txt6
m---------externals/sirit0
-rw-r--r--src/CMakeLists.txt2
-rw-r--r--src/audio_core/CMakeLists.txt3
-rw-r--r--src/audio_core/audio_renderer.cpp32
-rw-r--r--src/audio_core/audio_renderer.h5
-rw-r--r--src/audio_core/behavior_info.cpp100
-rw-r--r--src/audio_core/behavior_info.h66
-rw-r--r--src/audio_core/common.h47
-rw-r--r--src/common/bit_field.h2
-rw-r--r--src/common/uuid.h5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp17
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp16
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h7
-rw-r--r--src/core/crypto/key_manager.cpp3
-rw-r--r--src/core/crypto/partition_data_manager.cpp7
-rw-r--r--src/core/file_sys/program_metadata.cpp11
-rw-r--r--src/core/file_sys/program_metadata.h6
-rw-r--r--src/core/gdbstub/gdbstub.cpp7
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_block.h23
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.cpp36
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.h3
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp44
-rw-r--r--src/core/hle/kernel/memory/page_table.h2
-rw-r--r--src/core/hle/kernel/mutex.cpp5
-rw-r--r--src/core/hle/kernel/physical_core.cpp4
-rw-r--r--src/core/hle/kernel/process_capability.cpp30
-rw-r--r--src/core/hle/kernel/readable_event.cpp13
-rw-r--r--src/core/hle/kernel/resource_limit.cpp2
-rw-r--r--src/core/hle/kernel/shared_memory.cpp8
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp154
-rw-r--r--src/core/hle/kernel/thread.cpp5
-rw-r--r--src/core/hle/service/acc/acc.cpp59
-rw-r--r--src/core/hle/service/acc/acc.h3
-rw-r--r--src/core/hle/service/acc/acc_su.cpp4
-rw-r--r--src/core/hle/service/acc/acc_u0.cpp2
-rw-r--r--src/core/hle/service/acc/acc_u1.cpp3
-rw-r--r--src/core/hle/service/am/am.cpp71
-rw-r--r--src/core/hle/service/am/am.h3
-rw-r--r--src/core/hle/service/audio/audctl.cpp2
-rw-r--r--src/core/hle/service/audio/audin_u.cpp70
-rw-r--r--src/core/hle/service/audio/audin_u.h29
-rw-r--r--src/core/hle/service/audio/audren_u.cpp13
-rw-r--r--src/core/hle/service/bcat/backend/boxcat.cpp7
-rw-r--r--src/core/hle/service/bcat/module.cpp1
-rw-r--r--src/core/hle/service/caps/caps_su.cpp12
-rw-r--r--src/core/hle/service/caps/caps_su.h3
-rw-r--r--src/core/hle/service/es/es.cpp2
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp8
-rw-r--r--src/core/hle/service/friend/friend.cpp1
-rw-r--r--src/core/hle/service/glue/errors.h8
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp50
-rw-r--r--src/core/hle/service/hid/hid.h1
-rw-r--r--src/core/hle/service/ldr/ldr.cpp1
-rw-r--r--src/core/hle/service/ncm/ncm.cpp1
-rw-r--r--src/core/hle/service/nim/nim.cpp70
-rw-r--r--src/core/hle/service/npns/npns.cpp2
-rw-r--r--src/core/hle/service/ns/ns.cpp30
-rw-r--r--src/core/hle/service/ns/pl_u.cpp1
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h5
-rw-r--r--src/core/hle/service/pctl/module.cpp2
-rw-r--r--src/core/hle/service/prepo/prepo.cpp12
-rw-r--r--src/core/hle/service/ptm/psm.cpp21
-rw-r--r--src/core/hle/service/set/set.cpp1
-rw-r--r--src/core/hle/service/set/set_cal.cpp2
-rw-r--r--src/core/hle/service/set/set_sys.cpp12
-rw-r--r--src/core/hle/service/sm/sm.cpp12
-rw-r--r--src/core/hle/service/sockets/bsd.cpp1
-rw-r--r--src/core/hle/service/time/time.cpp50
-rw-r--r--src/core/hle/service/time/time_zone_manager.cpp4
-rw-r--r--src/core/hle/service/vi/vi.cpp19
-rw-r--r--src/core/loader/elf.cpp5
-rw-r--r--src/core/loader/nro.cpp23
-rw-r--r--src/core/loader/nro.h2
-rw-r--r--src/core/reporter.h1
-rw-r--r--src/core/settings.cpp11
-rw-r--r--src/core/settings.h13
-rw-r--r--src/core/telemetry_session.cpp16
-rw-r--r--src/input_common/main.cpp6
-rw-r--r--src/tests/core/core_timing.cpp18
-rw-r--r--src/video_core/CMakeLists.txt26
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h126
-rw-r--r--src/video_core/buffer_cache/map_interval.h18
-rw-r--r--src/video_core/dma_pusher.cpp31
-rw-r--r--src/video_core/dma_pusher.h1
-rw-r--r--src/video_core/engines/fermi_2d.cpp8
-rw-r--r--src/video_core/engines/fermi_2d.h3
-rw-r--r--src/video_core/engines/kepler_compute.cpp7
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp7
-rw-r--r--src/video_core/engines/kepler_memory.h3
-rw-r--r--src/video_core/engines/maxwell_3d.cpp90
-rw-r--r--src/video_core/engines/maxwell_3d.h15
-rw-r--r--src/video_core/engines/maxwell_dma.cpp18
-rw-r--r--src/video_core/engines/maxwell_dma.h3
-rw-r--r--src/video_core/engines/shader_bytecode.h7
-rw-r--r--src/video_core/fence_manager.h172
-rw-r--r--src/video_core/gpu.cpp84
-rw-r--r--src/video_core/gpu.h39
-rw-r--r--src/video_core/gpu_asynch.cpp4
-rw-r--r--src/video_core/gpu_asynch.h2
-rw-r--r--src/video_core/gpu_thread.cpp39
-rw-r--r--src/video_core/gpu_thread.h11
-rw-r--r--src/video_core/memory_manager.cpp18
-rw-r--r--src/video_core/query_cache.h46
-rw-r--r--src/video_core/rasterizer_interface.h21
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h2
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp72
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h53
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp164
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp99
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp68
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h33
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp512
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h372
-rw-r--r--src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp220
-rw-r--r--src/video_core/renderer_vulkan/nsight_aftermath_tracker.h87
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp3
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp36
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h19
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp101
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h74
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp96
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp118
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h62
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp218
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h17
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp53
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.h59
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp83
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp76
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h1
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp58
-rw-r--r--src/video_core/renderer_vulkan/wrapper.h64
-rw-r--r--src/video_core/shader/control_flow.cpp16
-rw-r--r--src/video_core/shader/decode.cpp30
-rw-r--r--src/video_core/shader/decode/arithmetic_half.cpp51
-rw-r--r--src/video_core/shader/decode/arithmetic_integer.cpp35
-rw-r--r--src/video_core/shader/decode/image.cpp18
-rw-r--r--src/video_core/shader/decode/memory.cpp2
-rw-r--r--src/video_core/shader/decode/register_set_predicate.cpp52
-rw-r--r--src/video_core/shader/decode/texture.cpp203
-rw-r--r--src/video_core/shader/memory_util.cpp77
-rw-r--r--src/video_core/shader/memory_util.h47
-rw-r--r--src/video_core/shader/node.h131
-rw-r--r--src/video_core/shader/shader_ir.h37
-rw-r--r--src/video_core/shader/track.cpp20
-rw-r--r--src/video_core/texture_cache/surface_base.h18
-rw-r--r--src/video_core/texture_cache/surface_params.cpp10
-rw-r--r--src/video_core/texture_cache/texture_cache.h182
-rw-r--r--src/video_core/textures/decoders.cpp3
-rw-r--r--src/video_core/textures/decoders.h5
-rw-r--r--src/yuzu/applets/profile_select.cpp2
-rw-r--r--src/yuzu/configuration/config.cpp21
-rw-r--r--src/yuzu/configuration/config.h2
-rw-r--r--src/yuzu/configuration/configure_debug.cpp2
-rw-r--r--src/yuzu/configuration/configure_debug.ui7
-rw-r--r--src/yuzu/configuration/configure_filesystem.cpp2
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.cpp7
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.ui40
-rw-r--r--src/yuzu/configuration/configure_hotkeys.cpp80
-rw-r--r--src/yuzu/configuration/configure_hotkeys.h6
-rw-r--r--src/yuzu/configuration/configure_hotkeys.ui39
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp60
-rw-r--r--src/yuzu/configuration/configure_input_player.h6
-rw-r--r--src/yuzu/configuration/configure_input_player.ui16
-rw-r--r--src/yuzu/game_list_p.h14
-rw-r--r--src/yuzu/main.cpp4
-rw-r--r--src/yuzu_cmd/config.cpp8
-rw-r--r--src/yuzu_cmd/default_ini.h9
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2.cpp7
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp3
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h2
-rw-r--r--src/yuzu_tester/config.cpp6
213 files changed, 4700 insertions, 1859 deletions
diff --git a/.ci/scripts/linux/docker.sh b/.ci/scripts/linux/docker.sh
index f11878128..5559a527c 100644..100755
--- a/.ci/scripts/linux/docker.sh
+++ b/.ci/scripts/linux/docker.sh
@@ -5,7 +5,7 @@ cd /yuzu
ccache -s
mkdir build || true && cd build
-cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DYUZU_USE_BUNDLED_UNICORN=ON -DYUZU_USE_QT_WEB_ENGINE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_VULKAN=No
+cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DYUZU_USE_BUNDLED_UNICORN=ON -DYUZU_USE_QT_WEB_ENGINE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON
ninja
diff --git a/.ci/scripts/windows/docker.sh b/.ci/scripts/windows/docker.sh
index beb554b65..5d5650d3b 100644..100755
--- a/.ci/scripts/windows/docker.sh
+++ b/.ci/scripts/windows/docker.sh
@@ -13,7 +13,7 @@ echo '' >> /bin/cmd
chmod +x /bin/cmd
mkdir build || true && cd build
-cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DUSE_CCACHE=ON -DYUZU_USE_BUNDLED_UNICORN=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_VULKAN=No
+cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DUSE_CCACHE=ON -DYUZU_USE_BUNDLED_UNICORN=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DCMAKE_BUILD_TYPE=Release
ninja
# Clean up the dirty hacks
@@ -29,7 +29,13 @@ echo 'Prepare binaries...'
cd ..
mkdir package
-QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/'
+if [ -d "/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/" ]; then
+ QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/'
+else
+ #fallback to qt
+ QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt/plugins/platforms/'
+fi
+
find build/ -name "yuzu*.exe" -exec cp {} 'package' \;
# copy Qt plugins
diff --git a/CMakeModules/MinGWCross.cmake b/CMakeModules/MinGWCross.cmake
index 29ecd1ac4..b268e72d8 100644
--- a/CMakeModules/MinGWCross.cmake
+++ b/CMakeModules/MinGWCross.cmake
@@ -10,8 +10,8 @@ set(SDL2_PATH ${MINGW_PREFIX})
set(MINGW_TOOL_PREFIX ${CMAKE_SYSTEM_PROCESSOR}-w64-mingw32-)
# Specify the cross compiler
-set(CMAKE_C_COMPILER ${MINGW_TOOL_PREFIX}gcc-posix)
-set(CMAKE_CXX_COMPILER ${MINGW_TOOL_PREFIX}g++-posix)
+set(CMAKE_C_COMPILER ${MINGW_TOOL_PREFIX}gcc)
+set(CMAKE_CXX_COMPILER ${MINGW_TOOL_PREFIX}g++)
set(CMAKE_RC_COMPILER ${MINGW_TOOL_PREFIX}windres)
# Mingw tools
diff --git a/externals/cubeb b/externals/cubeb
-Subproject 6f2420de8f155b10330cf973900ac7bdbfee589
+Subproject 616d773441b5355800ce64197a699e6cd6b3617
diff --git a/externals/dynarmic b/externals/dynarmic
-Subproject 57b987c185ae6677861cbf781f08ed1649b0543
+Subproject e7166e8ba74d7b9c85e87afc0aaf667e7e84cfe
diff --git a/externals/microprofile/microprofile.h b/externals/microprofile/microprofile.h
index 9d830f7bf..0c0d0a4d3 100644
--- a/externals/microprofile/microprofile.h
+++ b/externals/microprofile/microprofile.h
@@ -910,14 +910,14 @@ typedef void* (*MicroProfileThreadFunc)(void*);
#ifndef _WIN32
typedef pthread_t MicroProfileThread;
-void MicroProfileThreadStart(MicroProfileThread* pThread, MicroProfileThreadFunc Func)
+inline void MicroProfileThreadStart(MicroProfileThread* pThread, MicroProfileThreadFunc Func)
{
pthread_attr_t Attr;
int r = pthread_attr_init(&Attr);
MP_ASSERT(r == 0);
pthread_create(pThread, &Attr, Func, 0);
}
-void MicroProfileThreadJoin(MicroProfileThread* pThread)
+inline void MicroProfileThreadJoin(MicroProfileThread* pThread)
{
int r = pthread_join(*pThread, 0);
MP_ASSERT(r == 0);
@@ -930,11 +930,11 @@ DWORD _stdcall ThreadTrampoline(void* pFunc)
return (uint32_t)F(0);
}
-void MicroProfileThreadStart(MicroProfileThread* pThread, MicroProfileThreadFunc Func)
+inline void MicroProfileThreadStart(MicroProfileThread* pThread, MicroProfileThreadFunc Func)
{
*pThread = CreateThread(0, 0, ThreadTrampoline, Func, 0, 0);
}
-void MicroProfileThreadJoin(MicroProfileThread* pThread)
+inline void MicroProfileThreadJoin(MicroProfileThread* pThread)
{
WaitForSingleObject(*pThread, INFINITE);
CloseHandle(*pThread);
@@ -1131,7 +1131,7 @@ inline void MicroProfileSetThreadLog(MicroProfileThreadLog* pLog)
pthread_setspecific(g_MicroProfileThreadLogKey, pLog);
}
#else
-MicroProfileThreadLog* MicroProfileGetThreadLog()
+inline MicroProfileThreadLog* MicroProfileGetThreadLog()
{
return g_MicroProfileThreadLog;
}
@@ -1247,7 +1247,7 @@ MicroProfileToken MicroProfileFindToken(const char* pGroup, const char* pName)
return MICROPROFILE_INVALID_TOKEN;
}
-uint16_t MicroProfileGetGroup(const char* pGroup, MicroProfileTokenType Type)
+inline uint16_t MicroProfileGetGroup(const char* pGroup, MicroProfileTokenType Type)
{
for(uint32_t i = 0; i < S.nGroupCount; ++i)
{
@@ -1276,7 +1276,7 @@ uint16_t MicroProfileGetGroup(const char* pGroup, MicroProfileTokenType Type)
return nGroupIndex;
}
-void MicroProfileRegisterGroup(const char* pGroup, const char* pCategory, uint32_t nColor)
+inline void MicroProfileRegisterGroup(const char* pGroup, const char* pCategory, uint32_t nColor)
{
int nCategoryIndex = -1;
for(uint32_t i = 0; i < S.nCategoryCount; ++i)
@@ -1442,7 +1442,7 @@ void MicroProfileGpuLeave(MicroProfileToken nToken_, uint64_t nTickStart)
}
}
-void MicroProfileContextSwitchPut(MicroProfileContextSwitch* pContextSwitch)
+inline void MicroProfileContextSwitchPut(MicroProfileContextSwitch* pContextSwitch)
{
if(S.nRunning || pContextSwitch->nTicks <= S.nPauseTicks)
{
@@ -1894,7 +1894,7 @@ void MicroProfileSetEnableAllGroups(bool bEnableAllGroups)
S.nAllGroupsWanted = bEnableAllGroups ? 1 : 0;
}
-void MicroProfileEnableCategory(const char* pCategory, bool bEnabled)
+inline void MicroProfileEnableCategory(const char* pCategory, bool bEnabled)
{
int nCategoryIndex = -1;
for(uint32_t i = 0; i < S.nCategoryCount; ++i)
@@ -2004,7 +2004,7 @@ void MicroProfileForceDisableGroup(const char* pGroup, MicroProfileTokenType Typ
}
-void MicroProfileCalcAllTimers(float* pTimers, float* pAverage, float* pMax, float* pCallAverage, float* pExclusive, float* pAverageExclusive, float* pMaxExclusive, float* pTotal, uint32_t nSize)
+inline void MicroProfileCalcAllTimers(float* pTimers, float* pAverage, float* pMax, float* pCallAverage, float* pExclusive, float* pAverageExclusive, float* pMaxExclusive, float* pTotal, uint32_t nSize)
{
for(uint32_t i = 0; i < S.nTotalTimers && i < nSize; ++i)
{
diff --git a/externals/microprofile/microprofileui.h b/externals/microprofile/microprofileui.h
index ddaebe55b..fe2410cf4 100644
--- a/externals/microprofile/microprofileui.h
+++ b/externals/microprofile/microprofileui.h
@@ -417,19 +417,19 @@ void MicroProfileToggleDisplayMode()
}
-void MicroProfileStringArrayClear(MicroProfileStringArray* pArray)
+inline void MicroProfileStringArrayClear(MicroProfileStringArray* pArray)
{
pArray->nNumStrings = 0;
pArray->pBufferPos = &pArray->Buffer[0];
}
-void MicroProfileStringArrayAddLiteral(MicroProfileStringArray* pArray, const char* pLiteral)
+inline void MicroProfileStringArrayAddLiteral(MicroProfileStringArray* pArray, const char* pLiteral)
{
MP_ASSERT(pArray->nNumStrings < MICROPROFILE_TOOLTIP_MAX_STRINGS);
pArray->ppStrings[pArray->nNumStrings++] = pLiteral;
}
-void MicroProfileStringArrayFormat(MicroProfileStringArray* pArray, const char* fmt, ...)
+inline void MicroProfileStringArrayFormat(MicroProfileStringArray* pArray, const char* fmt, ...)
{
MP_ASSERT(pArray->nNumStrings < MICROPROFILE_TOOLTIP_MAX_STRINGS);
pArray->ppStrings[pArray->nNumStrings++] = pArray->pBufferPos;
@@ -439,7 +439,7 @@ void MicroProfileStringArrayFormat(MicroProfileStringArray* pArray, const char*
va_end(args);
MP_ASSERT(pArray->pBufferPos < pArray->Buffer + MICROPROFILE_TOOLTIP_STRING_BUFFER_SIZE);
}
-void MicroProfileStringArrayCopy(MicroProfileStringArray* pDest, MicroProfileStringArray* pSrc)
+inline void MicroProfileStringArrayCopy(MicroProfileStringArray* pDest, MicroProfileStringArray* pSrc)
{
memcpy(&pDest->ppStrings[0], &pSrc->ppStrings[0], sizeof(pDest->ppStrings));
memcpy(&pDest->Buffer[0], &pSrc->Buffer[0], sizeof(pDest->Buffer));
@@ -456,7 +456,7 @@ void MicroProfileStringArrayCopy(MicroProfileStringArray* pDest, MicroProfileStr
pDest->nNumStrings = pSrc->nNumStrings;
}
-void MicroProfileFloatWindowSize(const char** ppStrings, uint32_t nNumStrings, uint32_t* pColors, uint32_t& nWidth, uint32_t& nHeight, uint32_t* pStringLengths = 0)
+inline void MicroProfileFloatWindowSize(const char** ppStrings, uint32_t nNumStrings, uint32_t* pColors, uint32_t& nWidth, uint32_t& nHeight, uint32_t* pStringLengths = 0)
{
uint32_t* nStringLengths = pStringLengths ? pStringLengths : (uint32_t*)alloca(nNumStrings * sizeof(uint32_t));
uint32_t nTextCount = nNumStrings/2;
@@ -474,7 +474,7 @@ void MicroProfileFloatWindowSize(const char** ppStrings, uint32_t nNumStrings, u
nHeight = (MICROPROFILE_TEXT_HEIGHT+1) * nTextCount + 2 * MICROPROFILE_BORDER_SIZE;
}
-void MicroProfileDrawFloatWindow(uint32_t nX, uint32_t nY, const char** ppStrings, uint32_t nNumStrings, uint32_t nColor, uint32_t* pColors = 0)
+inline void MicroProfileDrawFloatWindow(uint32_t nX, uint32_t nY, const char** ppStrings, uint32_t nNumStrings, uint32_t nColor, uint32_t* pColors = 0)
{
uint32_t nWidth = 0, nHeight = 0;
uint32_t* nStringLengths = (uint32_t*)alloca(nNumStrings * sizeof(uint32_t));
@@ -503,7 +503,7 @@ void MicroProfileDrawFloatWindow(uint32_t nX, uint32_t nY, const char** ppString
nY += (MICROPROFILE_TEXT_HEIGHT+1);
}
}
-void MicroProfileDrawTextBox(uint32_t nX, uint32_t nY, const char** ppStrings, uint32_t nNumStrings, uint32_t nColor, uint32_t* pColors = 0)
+inline void MicroProfileDrawTextBox(uint32_t nX, uint32_t nY, const char** ppStrings, uint32_t nNumStrings, uint32_t nColor, uint32_t* pColors = 0)
{
uint32_t nWidth = 0, nHeight = 0;
uint32_t* nStringLengths = (uint32_t*)alloca(nNumStrings * sizeof(uint32_t));
@@ -529,7 +529,7 @@ void MicroProfileDrawTextBox(uint32_t nX, uint32_t nY, const char** ppStrings, u
-void MicroProfileToolTipMeta(MicroProfileStringArray* pToolTip)
+inline void MicroProfileToolTipMeta(MicroProfileStringArray* pToolTip)
{
MicroProfile& S = *MicroProfileGet();
if(UI.nRangeBeginIndex != UI.nRangeEndIndex && UI.pRangeLog)
@@ -608,7 +608,7 @@ void MicroProfileToolTipMeta(MicroProfileStringArray* pToolTip)
}
}
-void MicroProfileDrawFloatTooltip(uint32_t nX, uint32_t nY, uint32_t nToken, uint64_t nTime)
+inline void MicroProfileDrawFloatTooltip(uint32_t nX, uint32_t nY, uint32_t nToken, uint64_t nTime)
{
MicroProfile& S = *MicroProfileGet();
@@ -718,7 +718,7 @@ void MicroProfileDrawFloatTooltip(uint32_t nX, uint32_t nY, uint32_t nToken, uin
}
-void MicroProfileZoomTo(int64_t nTickStart, int64_t nTickEnd)
+inline void MicroProfileZoomTo(int64_t nTickStart, int64_t nTickEnd)
{
MicroProfile& S = *MicroProfileGet();
@@ -728,7 +728,7 @@ void MicroProfileZoomTo(int64_t nTickStart, int64_t nTickEnd)
UI.fDetailedRangeTarget = MicroProfileLogTickDifference(nTickStart, nTickEnd) * fToMs;
}
-void MicroProfileCenter(int64_t nTickCenter)
+inline void MicroProfileCenter(int64_t nTickCenter)
{
MicroProfile& S = *MicroProfileGet();
int64_t nStart = S.Frames[S.nFrameCurrent].nFrameStartCpu;
@@ -739,7 +739,7 @@ void MicroProfileCenter(int64_t nTickCenter)
#ifdef MICROPROFILE_DEBUG
uint64_t* g_pMicroProfileDumpStart = 0;
uint64_t* g_pMicroProfileDumpEnd = 0;
-void MicroProfileDebugDumpRange()
+inline void MicroProfileDebugDumpRange()
{
MicroProfile& S = *MicroProfileGet();
if(g_pMicroProfileDumpStart != g_pMicroProfileDumpEnd)
@@ -777,7 +777,7 @@ void MicroProfileDebugDumpRange()
#define MICROPROFILE_HOVER_DIST 0.5f
-void MicroProfileDrawDetailedContextSwitchBars(uint32_t nY, uint32_t nThreadId, uint32_t nContextSwitchStart, uint32_t nContextSwitchEnd, int64_t nBaseTicks, uint32_t nBaseY)
+inline void MicroProfileDrawDetailedContextSwitchBars(uint32_t nY, uint32_t nThreadId, uint32_t nContextSwitchStart, uint32_t nContextSwitchEnd, int64_t nBaseTicks, uint32_t nBaseY)
{
MicroProfile& S = *MicroProfileGet();
int64_t nTickIn = -1;
@@ -841,7 +841,7 @@ void MicroProfileDrawDetailedContextSwitchBars(uint32_t nY, uint32_t nThreadId,
}
}
-void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int nBaseY, int nSelectedFrame)
+inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int nBaseY, int nSelectedFrame)
{
MicroProfile& S = *MicroProfileGet();
MP_DEBUG_DUMP_RANGE();
@@ -1325,7 +1325,7 @@ void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int nBaseY,
}
-void MicroProfileDrawDetailedFrameHistory(uint32_t nWidth, uint32_t nHeight, uint32_t nBaseY, uint32_t nSelectedFrame)
+inline void MicroProfileDrawDetailedFrameHistory(uint32_t nWidth, uint32_t nHeight, uint32_t nBaseY, uint32_t nSelectedFrame)
{
MicroProfile& S = *MicroProfileGet();
@@ -1379,7 +1379,7 @@ void MicroProfileDrawDetailedFrameHistory(uint32_t nWidth, uint32_t nHeight, uin
}
MicroProfileDrawBox(fSelectionStart, nBaseY, fSelectionEnd, nBaseY+MICROPROFILE_FRAME_HISTORY_HEIGHT, MICROPROFILE_FRAME_HISTORY_COLOR_HIGHTLIGHT, MicroProfileBoxTypeFlat);
}
-void MicroProfileDrawDetailedView(uint32_t nWidth, uint32_t nHeight)
+inline void MicroProfileDrawDetailedView(uint32_t nWidth, uint32_t nHeight)
{
MicroProfile& S = *MicroProfileGet();
@@ -1416,11 +1416,11 @@ void MicroProfileDrawDetailedView(uint32_t nWidth, uint32_t nHeight)
MicroProfileDrawDetailedFrameHistory(nWidth, nHeight, nBaseY, nSelectedFrame);
}
-void MicroProfileDrawTextRight(uint32_t nX, uint32_t nY, uint32_t nColor, const char* pStr, uint32_t nStrLen)
+inline void MicroProfileDrawTextRight(uint32_t nX, uint32_t nY, uint32_t nColor, const char* pStr, uint32_t nStrLen)
{
MicroProfileDrawText(nX - nStrLen * (MICROPROFILE_TEXT_WIDTH+1), nY, nColor, pStr, nStrLen);
}
-void MicroProfileDrawHeader(int32_t nX, uint32_t nWidth, const char* pName)
+inline void MicroProfileDrawHeader(int32_t nX, uint32_t nWidth, const char* pName)
{
if(pName)
{
@@ -1432,7 +1432,7 @@ void MicroProfileDrawHeader(int32_t nX, uint32_t nWidth, const char* pName)
typedef void (*MicroProfileLoopGroupCallback)(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pData);
-void MicroProfileLoopActiveGroupsDraw(int32_t nX, int32_t nY, const char* pName, MicroProfileLoopGroupCallback CB, void* pData)
+inline void MicroProfileLoopActiveGroupsDraw(int32_t nX, int32_t nY, const char* pName, MicroProfileLoopGroupCallback CB, void* pData)
{
MicroProfile& S = *MicroProfileGet();
nY += MICROPROFILE_TEXT_HEIGHT + 2;
@@ -1465,7 +1465,7 @@ void MicroProfileLoopActiveGroupsDraw(int32_t nX, int32_t nY, const char* pName,
}
-void MicroProfileCalcTimers(float* pTimers, float* pAverage, float* pMax, float* pCallAverage, float* pExclusive, float* pAverageExclusive, float* pMaxExclusive, uint64_t nGroup, uint32_t nSize)
+inline void MicroProfileCalcTimers(float* pTimers, float* pAverage, float* pMax, float* pCallAverage, float* pExclusive, float* pAverageExclusive, float* pMaxExclusive, uint64_t nGroup, uint32_t nSize)
{
MicroProfile& S = *MicroProfileGet();
@@ -1527,7 +1527,7 @@ void MicroProfileCalcTimers(float* pTimers, float* pAverage, float* pMax, float*
#define SBUF_MAX 32
-void MicroProfileDrawBarArrayCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
+inline void MicroProfileDrawBarArrayCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
{
const uint32_t nHeight = MICROPROFILE_TEXT_HEIGHT;
const uint32_t nTextWidth = 6 * (1+MICROPROFILE_TEXT_WIDTH);
@@ -1547,7 +1547,7 @@ void MicroProfileDrawBarArrayCallback(uint32_t nTimer, uint32_t nIdx, uint64_t n
}
-uint32_t MicroProfileDrawBarArray(int32_t nX, int32_t nY, float* pTimers, const char* pName, uint32_t nTotalHeight, float* pTimers2 = NULL)
+inline uint32_t MicroProfileDrawBarArray(int32_t nX, int32_t nY, float* pTimers, const char* pName, uint32_t nTotalHeight, float* pTimers2 = NULL)
{
const uint32_t nTextWidth = 6 * (1+MICROPROFILE_TEXT_WIDTH);
const uint32_t nWidth = MICROPROFILE_BAR_WIDTH;
@@ -1559,7 +1559,7 @@ uint32_t MicroProfileDrawBarArray(int32_t nX, int32_t nY, float* pTimers, const
return nWidth + 5 + nTextWidth;
}
-void MicroProfileDrawBarCallCountCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
+inline void MicroProfileDrawBarCallCountCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
{
MicroProfile& S = *MicroProfileGet();
char sBuffer[SBUF_MAX];
@@ -1567,7 +1567,7 @@ void MicroProfileDrawBarCallCountCallback(uint32_t nTimer, uint32_t nIdx, uint64
MicroProfileDrawText(nX, nY, (uint32_t)-1, sBuffer, nLen);
}
-uint32_t MicroProfileDrawBarCallCount(int32_t nX, int32_t nY, const char* pName)
+inline uint32_t MicroProfileDrawBarCallCount(int32_t nX, int32_t nY, const char* pName)
{
MicroProfileLoopActiveGroupsDraw(nX, nY, pName, MicroProfileDrawBarCallCountCallback, 0);
const uint32_t nTextWidth = 6 * MICROPROFILE_TEXT_WIDTH;
@@ -1581,7 +1581,7 @@ struct MicroProfileMetaAverageArgs
float fRcpFrames;
};
-void MicroProfileDrawBarMetaAverageCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
+inline void MicroProfileDrawBarMetaAverageCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
{
MicroProfileMetaAverageArgs* pArgs = (MicroProfileMetaAverageArgs*)pExtra;
uint64_t* pCounters = pArgs->pCounters;
@@ -1591,7 +1591,7 @@ void MicroProfileDrawBarMetaAverageCallback(uint32_t nTimer, uint32_t nIdx, uint
MicroProfileDrawText(nX - nLen * (MICROPROFILE_TEXT_WIDTH+1), nY, (uint32_t)-1, sBuffer, nLen);
}
-uint32_t MicroProfileDrawBarMetaAverage(int32_t nX, int32_t nY, uint64_t* pCounters, const char* pName, uint32_t nTotalHeight)
+inline uint32_t MicroProfileDrawBarMetaAverage(int32_t nX, int32_t nY, uint64_t* pCounters, const char* pName, uint32_t nTotalHeight)
{
if(!pName)
return 0;
@@ -1605,7 +1605,7 @@ uint32_t MicroProfileDrawBarMetaAverage(int32_t nX, int32_t nY, uint64_t* pCount
}
-void MicroProfileDrawBarMetaCountCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
+inline void MicroProfileDrawBarMetaCountCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
{
uint64_t* pCounters = (uint64_t*)pExtra;
char sBuffer[SBUF_MAX];
@@ -1613,7 +1613,7 @@ void MicroProfileDrawBarMetaCountCallback(uint32_t nTimer, uint32_t nIdx, uint64
MicroProfileDrawText(nX - nLen * (MICROPROFILE_TEXT_WIDTH+1), nY, (uint32_t)-1, sBuffer, nLen);
}
-uint32_t MicroProfileDrawBarMetaCount(int32_t nX, int32_t nY, uint64_t* pCounters, const char* pName, uint32_t nTotalHeight)
+inline uint32_t MicroProfileDrawBarMetaCount(int32_t nX, int32_t nY, uint64_t* pCounters, const char* pName, uint32_t nTotalHeight)
{
if(!pName)
return 0;
@@ -1625,7 +1625,7 @@ uint32_t MicroProfileDrawBarMetaCount(int32_t nX, int32_t nY, uint64_t* pCounter
return 5 + nTextWidth;
}
-void MicroProfileDrawBarLegendCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
+inline void MicroProfileDrawBarLegendCallback(uint32_t nTimer, uint32_t nIdx, uint64_t nGroupMask, uint32_t nX, uint32_t nY, void* pExtra)
{
MicroProfile& S = *MicroProfileGet();
if (S.TimerInfo[nTimer].bGraph)
@@ -1640,7 +1640,7 @@ void MicroProfileDrawBarLegendCallback(uint32_t nTimer, uint32_t nIdx, uint64_t
}
}
-uint32_t MicroProfileDrawBarLegend(int32_t nX, int32_t nY, uint32_t nTotalHeight, uint32_t nMaxWidth)
+inline uint32_t MicroProfileDrawBarLegend(int32_t nX, int32_t nY, uint32_t nTotalHeight, uint32_t nMaxWidth)
{
MicroProfileDrawLineVertical(nX-5, nY, nTotalHeight, UI.nOpacityBackground | g_nMicroProfileBackColors[0]|g_nMicroProfileBackColors[1]);
MicroProfileLoopActiveGroupsDraw(nMaxWidth, nY, 0, MicroProfileDrawBarLegendCallback, 0);
@@ -1807,7 +1807,7 @@ void MicroProfileDumpTimers()
}
}
-void MicroProfileDrawBarView(uint32_t nScreenWidth, uint32_t nScreenHeight)
+inline void MicroProfileDrawBarView(uint32_t nScreenWidth, uint32_t nScreenHeight)
{
MicroProfile& S = *MicroProfileGet();
@@ -1951,7 +1951,7 @@ typedef const char* (*MicroProfileSubmenuCallback)(int, bool* bSelected);
typedef void (*MicroProfileClickCallback)(int);
-const char* MicroProfileUIMenuMode(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuMode(int nIndex, bool* bSelected)
{
MicroProfile& S = *MicroProfileGet();
switch(nIndex)
@@ -1979,7 +1979,7 @@ const char* MicroProfileUIMenuMode(int nIndex, bool* bSelected)
}
}
-const char* MicroProfileUIMenuGroups(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuGroups(int nIndex, bool* bSelected)
{
MicroProfile& S = *MicroProfileGet();
*bSelected = false;
@@ -2012,7 +2012,7 @@ const char* MicroProfileUIMenuGroups(int nIndex, bool* bSelected)
}
}
-const char* MicroProfileUIMenuAggregate(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuAggregate(int nIndex, bool* bSelected)
{
MicroProfile& S = *MicroProfileGet();
if(nIndex < sizeof(g_MicroProfileAggregatePresets)/sizeof(g_MicroProfileAggregatePresets[0]))
@@ -2032,7 +2032,7 @@ const char* MicroProfileUIMenuAggregate(int nIndex, bool* bSelected)
}
-const char* MicroProfileUIMenuTimers(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuTimers(int nIndex, bool* bSelected)
{
MicroProfile& S = *MicroProfileGet();
*bSelected = 0 != (S.nBars & (1 << nIndex));
@@ -2054,7 +2054,7 @@ const char* MicroProfileUIMenuTimers(int nIndex, bool* bSelected)
return 0;
}
-const char* MicroProfileUIMenuOptions(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuOptions(int nIndex, bool* bSelected)
{
MicroProfile& S = *MicroProfileGet();
if(nIndex >= MICROPROFILE_OPTION_SIZE) return 0;
@@ -2094,7 +2094,7 @@ const char* MicroProfileUIMenuOptions(int nIndex, bool* bSelected)
return UI.Options[nIndex].Text;
}
-const char* MicroProfileUIMenuPreset(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuPreset(int nIndex, bool* bSelected)
{
static char buf[128];
*bSelected = false;
@@ -2118,7 +2118,7 @@ const char* MicroProfileUIMenuPreset(int nIndex, bool* bSelected)
}
}
-const char* MicroProfileUIMenuCustom(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuCustom(int nIndex, bool* bSelected)
{
if((uint32_t)-1 == UI.nCustomActive)
{
@@ -2145,13 +2145,13 @@ const char* MicroProfileUIMenuCustom(int nIndex, bool* bSelected)
}
}
-const char* MicroProfileUIMenuEmpty(int nIndex, bool* bSelected)
+inline const char* MicroProfileUIMenuEmpty(int nIndex, bool* bSelected)
{
return 0;
}
-void MicroProfileUIClickMode(int nIndex)
+inline void MicroProfileUIClickMode(int nIndex)
{
MicroProfile& S = *MicroProfileGet();
switch(nIndex)
@@ -2176,7 +2176,7 @@ void MicroProfileUIClickMode(int nIndex)
}
}
-void MicroProfileUIClickGroups(int nIndex)
+inline void MicroProfileUIClickGroups(int nIndex)
{
MicroProfile& S = *MicroProfileGet();
if(nIndex == 0)
@@ -2208,7 +2208,7 @@ void MicroProfileUIClickGroups(int nIndex)
}
}
-void MicroProfileUIClickAggregate(int nIndex)
+inline void MicroProfileUIClickAggregate(int nIndex)
{
MicroProfile& S = *MicroProfileGet();
S.nAggregateFlip = g_MicroProfileAggregatePresets[nIndex];
@@ -2218,13 +2218,13 @@ void MicroProfileUIClickAggregate(int nIndex)
}
}
-void MicroProfileUIClickTimers(int nIndex)
+inline void MicroProfileUIClickTimers(int nIndex)
{
MicroProfile& S = *MicroProfileGet();
S.nBars ^= (1 << nIndex);
}
-void MicroProfileUIClickOptions(int nIndex)
+inline void MicroProfileUIClickOptions(int nIndex)
{
MicroProfile& S = *MicroProfileGet();
switch(UI.Options[nIndex].nSubType)
@@ -2271,7 +2271,7 @@ void MicroProfileUIClickOptions(int nIndex)
}
}
-void MicroProfileUIClickPreset(int nIndex)
+inline void MicroProfileUIClickPreset(int nIndex)
{
int nNumPresets = sizeof(g_MicroProfilePresetNames) / sizeof(g_MicroProfilePresetNames[0]);
int nIndexSave = nIndex - nNumPresets - 1;
@@ -2285,7 +2285,7 @@ void MicroProfileUIClickPreset(int nIndex)
}
}
-void MicroProfileUIClickCustom(int nIndex)
+inline void MicroProfileUIClickCustom(int nIndex)
{
if(nIndex == 0)
{
@@ -2298,13 +2298,13 @@ void MicroProfileUIClickCustom(int nIndex)
}
-void MicroProfileUIClickEmpty(int nIndex)
+inline void MicroProfileUIClickEmpty(int nIndex)
{
}
-void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
+inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
{
MicroProfile& S = *MicroProfileGet();
@@ -2489,7 +2489,7 @@ void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
}
-void MicroProfileMoveGraph()
+inline void MicroProfileMoveGraph()
{
int nZoom = UI.nMouseWheelDelta;
@@ -2536,7 +2536,7 @@ void MicroProfileMoveGraph()
UI.nOffsetY = 0;
}
-void MicroProfileDrawCustom(uint32_t nWidth, uint32_t nHeight)
+inline void MicroProfileDrawCustom(uint32_t nWidth, uint32_t nHeight)
{
if((uint32_t)-1 != UI.nCustomActive)
{
@@ -2633,7 +2633,7 @@ void MicroProfileDrawCustom(uint32_t nWidth, uint32_t nHeight)
}
}
}
-void MicroProfileDraw(uint32_t nWidth, uint32_t nHeight)
+inline void MicroProfileDraw(uint32_t nWidth, uint32_t nHeight)
{
MICROPROFILE_SCOPE(g_MicroProfileDraw);
MicroProfile& S = *MicroProfileGet();
@@ -3226,7 +3226,7 @@ void MicroProfileLoadPreset(const char* pSuffix)
}
}
-uint32_t MicroProfileCustomGroupFind(const char* pCustomName)
+inline uint32_t MicroProfileCustomGroupFind(const char* pCustomName)
{
for(uint32_t i = 0; i < UI.nCustomCount; ++i)
{
@@ -3238,7 +3238,7 @@ uint32_t MicroProfileCustomGroupFind(const char* pCustomName)
return (uint32_t)-1;
}
-uint32_t MicroProfileCustomGroup(const char* pCustomName)
+inline uint32_t MicroProfileCustomGroup(const char* pCustomName)
{
for(uint32_t i = 0; i < UI.nCustomCount; ++i)
{
@@ -3271,7 +3271,7 @@ void MicroProfileCustomGroup(const char* pCustomName, uint32_t nMaxTimers, uint3
UI.Custom[nIndex].nAggregateFlip = nAggregateFlip;
}
-void MicroProfileCustomGroupEnable(uint32_t nIndex)
+inline void MicroProfileCustomGroupEnable(uint32_t nIndex)
{
if(nIndex < UI.nCustomCount)
{
diff --git a/externals/opus/CMakeLists.txt b/externals/opus/CMakeLists.txt
index cbb393272..94a86551f 100644
--- a/externals/opus/CMakeLists.txt
+++ b/externals/opus/CMakeLists.txt
@@ -203,7 +203,11 @@ endif()
target_compile_definitions(opus PRIVATE OPUS_BUILD ENABLE_HARDENING)
if(NOT MSVC)
- target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=2)
+ if(MINGW)
+ target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=0)
+ else()
+ target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=2)
+ endif()
endif()
# It is strongly recommended to uncomment one of these VAR_ARRAYS: Use C99
diff --git a/externals/sirit b/externals/sirit
-Subproject a712959f1e373a33b48042b5934e288a243d595
+Subproject 414fc4dbd28d8fe48f735a0c389db8a234f733c
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 0913be72c..3a57356ab 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -54,8 +54,10 @@ else()
add_compile_options(
-Wall
-Werror=implicit-fallthrough
+ -Werror=missing-declarations
-Werror=reorder
-Wextra
+ -Wmissing-declarations
-Wno-attributes
-Wno-unused-parameter
)
diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt
index c381dbe1d..5ef38a337 100644
--- a/src/audio_core/CMakeLists.txt
+++ b/src/audio_core/CMakeLists.txt
@@ -7,9 +7,12 @@ add_library(audio_core STATIC
audio_out.h
audio_renderer.cpp
audio_renderer.h
+ behavior_info.cpp
+ behavior_info.h
buffer.h
codec.cpp
codec.h
+ common.h
null_sink.h
sink.h
sink_details.cpp
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp
index 7a9dc61d4..d18ef6940 100644
--- a/src/audio_core/audio_renderer.cpp
+++ b/src/audio_core/audio_renderer.cpp
@@ -6,6 +6,7 @@
#include "audio_core/audio_out.h"
#include "audio_core/audio_renderer.h"
#include "audio_core/codec.h"
+#include "audio_core/common.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
@@ -79,7 +80,7 @@ AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory
std::size_t instance_number)
: worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count),
effects(params.effect_count), memory{memory_} {
-
+ behavior_info.SetUserRevision(params.revision);
audio_out = std::make_unique<AudioCore::AudioOut>();
stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS,
fmt::format("AudioRenderer-Instance{}", instance_number),
@@ -109,17 +110,17 @@ Stream::State AudioRenderer::GetStreamState() const {
return stream->GetState();
}
-static constexpr u32 VersionFromRevision(u32_le rev) {
- // "REV7" -> 7
- return ((rev >> 24) & 0xff) - 0x30;
-}
-
-std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) {
+ResultVal<std::vector<u8>> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) {
// Copy UpdateDataHeader struct
UpdateDataHeader config{};
std::memcpy(&config, input_params.data(), sizeof(UpdateDataHeader));
u32 memory_pool_count = worker_params.effect_count + (worker_params.voice_count * 4);
+ if (!behavior_info.UpdateInput(input_params, sizeof(UpdateDataHeader))) {
+ LOG_ERROR(Audio, "Failed to update behavior info input parameters");
+ return Audren::ERR_INVALID_PARAMETERS;
+ }
+
// Copy MemoryPoolInfo structs
std::vector<MemoryPoolInfo> mem_pool_info(memory_pool_count);
std::memcpy(mem_pool_info.data(),
@@ -173,8 +174,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
// Copy output header
UpdateDataHeader response_data{worker_params};
std::vector<u8> output_params(response_data.total_size);
- const auto audren_revision = VersionFromRevision(config.revision);
- if (audren_revision >= 5) {
+ if (behavior_info.IsElapsedFrameCountSupported()) {
response_data.frame_count = 0x10;
response_data.total_size += 0x10;
}
@@ -200,7 +200,19 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
sizeof(EffectOutStatus));
effect_out_status_offset += sizeof(EffectOutStatus);
}
- return output_params;
+
+ // Update behavior info output
+ const std::size_t behavior_out_status_offset{
+ sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
+ response_data.effects_size + response_data.sinks_size +
+ response_data.performance_manager_size};
+
+ if (!behavior_info.UpdateOutput(output_params, behavior_out_status_offset)) {
+ LOG_ERROR(Audio, "Failed to update behavior info output parameters");
+ return Audren::ERR_INVALID_PARAMETERS;
+ }
+
+ return MakeResult(output_params);
}
void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h
index 62faf9f19..b42770fae 100644
--- a/src/audio_core/audio_renderer.h
+++ b/src/audio_core/audio_renderer.h
@@ -8,11 +8,13 @@
#include <memory>
#include <vector>
+#include "audio_core/behavior_info.h"
#include "audio_core/stream.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/kernel/object.h"
+#include "core/hle/result.h"
namespace Core::Timing {
class CoreTiming;
@@ -226,7 +228,7 @@ public:
std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number);
~AudioRenderer();
- std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params);
+ ResultVal<std::vector<u8>> UpdateAudioRenderer(const std::vector<u8>& input_params);
void QueueMixedBuffer(Buffer::Tag tag);
void ReleaseAndQueueBuffers();
u32 GetSampleRate() const;
@@ -237,6 +239,7 @@ public:
private:
class EffectState;
class VoiceState;
+ BehaviorInfo behavior_info{};
AudioRendererParameter worker_params;
std::shared_ptr<Kernel::WritableEvent> buffer_event;
diff --git a/src/audio_core/behavior_info.cpp b/src/audio_core/behavior_info.cpp
new file mode 100644
index 000000000..94b7a3bf1
--- /dev/null
+++ b/src/audio_core/behavior_info.cpp
@@ -0,0 +1,100 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <cstring>
+#include "audio_core/behavior_info.h"
+#include "audio_core/common.h"
+#include "common/logging/log.h"
+
+namespace AudioCore {
+
+BehaviorInfo::BehaviorInfo() : process_revision(CURRENT_PROCESS_REVISION) {}
+BehaviorInfo::~BehaviorInfo() = default;
+
+bool BehaviorInfo::UpdateInput(const std::vector<u8>& buffer, std::size_t offset) {
+ if (!CanConsumeBuffer(buffer.size(), offset, sizeof(InParams))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+ InParams params{};
+ std::memcpy(&params, buffer.data() + offset, sizeof(InParams));
+
+ if (!IsValidRevision(params.revision)) {
+ LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", params.revision);
+ return false;
+ }
+
+ if (user_revision != params.revision) {
+ LOG_ERROR(Audio,
+ "User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}",
+ user_revision, params.revision);
+ return false;
+ }
+
+ ClearError();
+ UpdateFlags(params.flags);
+
+ // TODO(ogniK): Check input params size when InfoUpdater is used
+
+ return true;
+}
+
+bool BehaviorInfo::UpdateOutput(std::vector<u8>& buffer, std::size_t offset) {
+ if (!CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) {
+ LOG_ERROR(Audio, "Buffer is an invalid size!");
+ return false;
+ }
+
+ OutParams params{};
+ std::memcpy(params.errors.data(), errors.data(), sizeof(ErrorInfo) * errors.size());
+ params.error_count = static_cast<u32_le>(error_count);
+ std::memcpy(buffer.data() + offset, &params, sizeof(OutParams));
+ return true;
+}
+
+void BehaviorInfo::ClearError() {
+ error_count = 0;
+}
+
+void BehaviorInfo::UpdateFlags(u64_le dest_flags) {
+ flags = dest_flags;
+}
+
+void BehaviorInfo::SetUserRevision(u32_le revision) {
+ user_revision = revision;
+}
+
+bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const {
+ return IsRevisionSupported(2, user_revision);
+}
+
+bool BehaviorInfo::IsSplitterSupported() const {
+ return IsRevisionSupported(2, user_revision);
+}
+
+bool BehaviorInfo::IsLongSizePreDelaySupported() const {
+ return IsRevisionSupported(3, user_revision);
+}
+
+bool BehaviorInfo::IsAudioRenererProcessingTimeLimit80PercentSupported() const {
+ return IsRevisionSupported(5, user_revision);
+}
+
+bool BehaviorInfo::IsAudioRenererProcessingTimeLimit75PercentSupported() const {
+ return IsRevisionSupported(4, user_revision);
+}
+
+bool BehaviorInfo::IsAudioRenererProcessingTimeLimit70PercentSupported() const {
+ return IsRevisionSupported(1, user_revision);
+}
+
+bool BehaviorInfo::IsElapsedFrameCountSupported() const {
+ return IsRevisionSupported(5, user_revision);
+}
+
+bool BehaviorInfo::IsMemoryPoolForceMappingEnabled() const {
+ return (flags & 1) != 0;
+}
+
+} // namespace AudioCore
diff --git a/src/audio_core/behavior_info.h b/src/audio_core/behavior_info.h
new file mode 100644
index 000000000..c5e91ab39
--- /dev/null
+++ b/src/audio_core/behavior_info.h
@@ -0,0 +1,66 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+
+#include <vector>
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/swap.h"
+
+namespace AudioCore {
+class BehaviorInfo {
+public:
+ explicit BehaviorInfo();
+ ~BehaviorInfo();
+
+ bool UpdateInput(const std::vector<u8>& buffer, std::size_t offset);
+ bool UpdateOutput(std::vector<u8>& buffer, std::size_t offset);
+
+ void ClearError();
+ void UpdateFlags(u64_le dest_flags);
+ void SetUserRevision(u32_le revision);
+
+ bool IsAdpcmLoopContextBugFixed() const;
+ bool IsSplitterSupported() const;
+ bool IsLongSizePreDelaySupported() const;
+ bool IsAudioRenererProcessingTimeLimit80PercentSupported() const;
+ bool IsAudioRenererProcessingTimeLimit75PercentSupported() const;
+ bool IsAudioRenererProcessingTimeLimit70PercentSupported() const;
+ bool IsElapsedFrameCountSupported() const;
+ bool IsMemoryPoolForceMappingEnabled() const;
+
+private:
+ u32_le process_revision{};
+ u32_le user_revision{};
+ u64_le flags{};
+
+ struct ErrorInfo {
+ u32_le result{};
+ INSERT_PADDING_WORDS(1);
+ u64_le result_info{};
+ };
+ static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size");
+
+ std::array<ErrorInfo, 10> errors{};
+ std::size_t error_count{};
+
+ struct InParams {
+ u32_le revision{};
+ u32_le padding{};
+ u64_le flags{};
+ };
+ static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size");
+
+ struct OutParams {
+ std::array<ErrorInfo, 10> errors{};
+ u32_le error_count{};
+ INSERT_PADDING_BYTES(12);
+ };
+ static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size");
+};
+
+} // namespace AudioCore
diff --git a/src/audio_core/common.h b/src/audio_core/common.h
new file mode 100644
index 000000000..98478b66b
--- /dev/null
+++ b/src/audio_core/common.h
@@ -0,0 +1,47 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "common/swap.h"
+#include "core/hle/result.h"
+
+namespace AudioCore {
+namespace Audren {
+constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
+}
+
+constexpr u32_le CURRENT_PROCESS_REVISION = Common::MakeMagic('R', 'E', 'V', '8');
+
+static constexpr u32 VersionFromRevision(u32_le rev) {
+ // "REV7" -> 7
+ return ((rev >> 24) & 0xff) - 0x30;
+}
+
+static constexpr bool IsRevisionSupported(u32 required, u32_le user_revision) {
+ const auto base = VersionFromRevision(user_revision);
+ return required <= base;
+}
+
+static constexpr bool IsValidRevision(u32_le revision) {
+ const auto base = VersionFromRevision(revision);
+ constexpr auto max_rev = VersionFromRevision(CURRENT_PROCESS_REVISION);
+ return base <= max_rev;
+}
+
+static constexpr bool CanConsumeBuffer(std::size_t size, std::size_t offset, std::size_t required) {
+ if (offset > size) {
+ return false;
+ }
+ if (size < required) {
+ return false;
+ }
+ if ((size - offset) < required) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace AudioCore
diff --git a/src/common/bit_field.h b/src/common/bit_field.h
index fd2bbbd99..26ae6c7fc 100644
--- a/src/common/bit_field.h
+++ b/src/common/bit_field.h
@@ -180,7 +180,7 @@ public:
}
constexpr void Assign(const T& value) {
- storage = (static_cast<StorageType>(storage) & ~mask) | FormatValue(value);
+ storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
}
constexpr T Value() const {
diff --git a/src/common/uuid.h b/src/common/uuid.h
index f6ad064fb..4d3af8cec 100644
--- a/src/common/uuid.h
+++ b/src/common/uuid.h
@@ -40,6 +40,11 @@ struct UUID {
uuid = INVALID_UUID;
}
+ // TODO(ogniK): Properly generate a Nintendo ID
+ constexpr u64 GetNintendoID() const {
+ return uuid[0];
+ }
+
std::string Format() const;
std::string FormatSwitch() const;
};
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 9add5d363..337b97be9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -20,6 +20,7 @@
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/svc.h"
#include "core/memory.h"
+#include "core/settings.h"
namespace Core {
@@ -144,6 +145,8 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
config.page_table_address_space_bits = address_space_bits;
config.silently_mirror_page_table = false;
config.absolute_offset_page_table = true;
+ config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
+ config.only_detect_misalignment_via_page_table_on_page_boundary = true;
// Multi-process state
config.processor_id = core_index;
@@ -159,8 +162,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
// Unpredictable instructions
config.define_unpredictable_behaviour = true;
- config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
- config.only_detect_misalignment_via_page_table_on_page_boundary = true;
+ // Optimizations
+ if (Settings::values.disable_cpu_opt) {
+ config.enable_optimizations = false;
+ config.enable_fast_dispatch = false;
+ }
return std::make_shared<Dynarmic::A64::Jit>(config);
}
@@ -179,10 +185,9 @@ void ARM_Dynarmic_64::Step() {
ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor,
std::size_t core_index)
- : ARM_Interface{system},
- cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system},
- core_index{core_index}, exclusive_monitor{
- dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
+ : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks64>(*this)),
+ inner_unicorn{system, ARM_Unicorn::Arch::AArch64}, core_index{core_index},
+ exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index d189efb63..e40e9626a 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -11,6 +11,7 @@
#include "core/core_timing.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/svc.h"
+#include "core/memory.h"
namespace Core {
@@ -61,8 +62,9 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
return false;
}
-ARM_Unicorn::ARM_Unicorn(System& system) : ARM_Interface{system} {
- CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc));
+ARM_Unicorn::ARM_Unicorn(System& system, Arch architecture) : ARM_Interface{system} {
+ const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64;
+ CHECKED(uc_open(arch, UC_MODE_ARM, &uc));
auto fpv = 3 << 20;
CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv));
@@ -171,7 +173,17 @@ MICROPROFILE_DEFINE(ARM_Jit_Unicorn, "ARM JIT", "Unicorn", MP_RGB(255, 64, 64));
void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
MICROPROFILE_SCOPE(ARM_Jit_Unicorn);
+
+ // Temporarily map the code page for Unicorn
+ u64 map_addr{GetPC() & ~Memory::PAGE_MASK};
+ std::vector<u8> page_buffer(Memory::PAGE_SIZE);
+ system.Memory().ReadBlock(map_addr, page_buffer.data(), page_buffer.size());
+
+ CHECKED(uc_mem_map_ptr(uc, map_addr, page_buffer.size(),
+ UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data()));
CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
+ CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size()));
+
system.CoreTiming().AddTicks(num_instructions);
if (GDBStub::IsServerEnabled()) {
if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) {
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index f30d13cb6..725c65085 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -15,7 +15,12 @@ class System;
class ARM_Unicorn final : public ARM_Interface {
public:
- explicit ARM_Unicorn(System& system);
+ enum class Arch {
+ AArch32, // 32-bit ARM
+ AArch64, // 64-bit ARM
+ };
+
+ explicit ARM_Unicorn(System& system, Arch architecture);
~ARM_Unicorn() override;
void SetPC(u64 pc) override;
diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp
index 87e6a1fd3..8997c7082 100644
--- a/src/core/crypto/key_manager.cpp
+++ b/src/core/crypto/key_manager.cpp
@@ -1202,7 +1202,8 @@ const boost::container::flat_map<std::string, KeyIndex<S128KeyType>> KeyManager:
{S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyAreaKey),
static_cast<u64>(KeyAreaKeyType::System)}},
{"titlekek_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::Titlekek), 0}},
- {"keyblob_mac_key_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyblobMAC)}},
+ {"keyblob_mac_key_source",
+ {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyblobMAC), 0}},
{"tsec_key", {S128KeyType::TSEC, 0, 0}},
{"secure_boot_key", {S128KeyType::SecureBoot, 0, 0}},
{"sd_seed", {S128KeyType::SDSeed, 0, 0}},
diff --git a/src/core/crypto/partition_data_manager.cpp b/src/core/crypto/partition_data_manager.cpp
index d64302f2e..7ed71ac3a 100644
--- a/src/core/crypto/partition_data_manager.cpp
+++ b/src/core/crypto/partition_data_manager.cpp
@@ -202,8 +202,8 @@ static std::array<Key128, 0x20> FindEncryptedMasterKeyFromHex(const std::vector<
return out;
}
-FileSys::VirtualFile FindFileInDirWithNames(const FileSys::VirtualDir& dir,
- const std::string& name) {
+static FileSys::VirtualFile FindFileInDirWithNames(const FileSys::VirtualDir& dir,
+ const std::string& name) {
const auto upper = Common::ToUpper(name);
for (const auto& fname : {name, name + ".bin", upper, upper + ".BIN"}) {
@@ -345,8 +345,7 @@ FileSys::VirtualFile PartitionDataManager::GetPackage2Raw(Package2Type type) con
return package2.at(static_cast<size_t>(type));
}
-bool AttemptDecrypt(const std::array<u8, 16>& key, Package2Header& header) {
-
+static bool AttemptDecrypt(const std::array<u8, 16>& key, Package2Header& header) {
const std::vector<u8> iv(header.header_ctr.begin(), header.header_ctr.end());
Package2Header temp = header;
AESCipher<Key128> cipher(key, Mode::CTR);
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index 1d6c30962..43169bf9f 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -51,6 +51,17 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
return Loader::ResultStatus::Success;
}
+/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
+ ProgramMetadata result;
+
+ result.LoadManual(
+ true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
+ 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/,
+ {}, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, {} /*capabilities*/);
+
+ return result;
+}
+
void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space,
s32 main_thread_prio, u32 main_thread_core,
u32 main_thread_stack_size, u64 title_id,
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index f8759a396..35069972b 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -44,9 +44,13 @@ public:
ProgramMetadata();
~ProgramMetadata();
+ /// Gets a default ProgramMetadata configuration, should only be used for homebrew formats where
+ /// we do not have an NPDM file
+ static ProgramMetadata GetDefault();
+
Loader::ResultStatus Load(VirtualFile file);
- // Load from parameters instead of NPDM file, used for KIP
+ /// Load from parameters instead of NPDM file, used for KIP
void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio,
u32 main_thread_core, u32 main_thread_stack_size, u64 title_id,
u64 filesystem_permissions, KernelCapabilityDescriptors capabilities);
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index 2f15635c5..70c0f8b80 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -1389,10 +1389,9 @@ void SendTrap(Kernel::Thread* thread, int trap) {
return;
}
- if (!halt_loop || current_thread == thread) {
- current_thread = thread;
- SendSignal(thread, trap);
- }
+ current_thread = thread;
+ SendSignal(thread, trap);
+
halt_loop = true;
send_trap = false;
}
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index e441a27fc..35448b576 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -30,6 +30,7 @@ HandleTable::~HandleTable() = default;
ResultCode HandleTable::SetSize(s32 handle_table_size) {
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
+ LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
return ERR_OUT_OF_MEMORY;
}
@@ -80,6 +81,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
ResultCode HandleTable::Close(Handle handle) {
if (!IsValid(handle)) {
+ LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
return ERR_INVALID_HANDLE;
}
diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h
index e11043b60..9db1f7b39 100644
--- a/src/core/hle/kernel/memory/memory_block.h
+++ b/src/core/hle/kernel/memory/memory_block.h
@@ -17,7 +17,7 @@ namespace Kernel::Memory {
enum class MemoryState : u32 {
None = 0,
- Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF
+ Mask = 0xFF,
All = ~None,
FlagCanReprotect = (1 << 8),
@@ -253,6 +253,23 @@ public:
};
}
+ void ShareToDevice(MemoryPermission /*new_perm*/) {
+ ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
+ device_use_count == 0);
+ attribute |= MemoryAttribute::DeviceShared;
+ const u16 new_use_count{++device_use_count};
+ ASSERT(new_use_count > 0);
+ }
+
+ void UnshareToDevice(MemoryPermission /*new_perm*/) {
+ ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
+ const u16 prev_use_count{device_use_count--};
+ ASSERT(prev_use_count > 0);
+ if (prev_use_count == 1) {
+ attribute &= ~MemoryAttribute::DeviceShared;
+ }
+ }
+
private:
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
@@ -287,9 +304,9 @@ private:
state = new_state;
perm = new_perm;
- // TODO(bunnei): Is this right?
attribute = static_cast<MemoryAttribute>(
- new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/);
+ new_attribute |
+ (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
}
constexpr MemoryBlock Split(VAddr split_addr) {
diff --git a/src/core/hle/kernel/memory/memory_block_manager.cpp b/src/core/hle/kernel/memory/memory_block_manager.cpp
index 1ebc126c0..900395c37 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_block_manager.cpp
@@ -143,6 +143,42 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState s
}
}
+void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
+ MemoryPermission perm) {
+ const std::size_t prev_count{memory_block_tree.size()};
+ const VAddr end_addr{addr + num_pages * PageSize};
+ iterator node{memory_block_tree.begin()};
+
+ while (node != memory_block_tree.end()) {
+ MemoryBlock* block{&(*node)};
+ iterator next_node{std::next(node)};
+ const VAddr cur_addr{block->GetAddress()};
+ const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
+
+ if (addr < cur_end_addr && cur_addr < end_addr) {
+ iterator new_node{node};
+
+ if (addr > cur_addr) {
+ memory_block_tree.insert(node, block->Split(addr));
+ }
+
+ if (end_addr < cur_end_addr) {
+ new_node = memory_block_tree.insert(node, block->Split(end_addr));
+ }
+
+ lock_func(new_node, perm);
+
+ MergeAdjacent(new_node, next_node);
+ }
+
+ if (cur_end_addr - 1 >= end_addr - 1) {
+ break;
+ }
+
+ node = next_node;
+ }
+}
+
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
const_iterator it{FindIterator(start)};
MemoryInfo info{};
diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h
index 0f2270f0f..9451b5df6 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.h
+++ b/src/core/hle/kernel/memory/memory_block_manager.h
@@ -45,6 +45,9 @@ public:
MemoryPermission perm = MemoryPermission::None,
MemoryAttribute attribute = MemoryAttribute::None);
+ using LockFunc = std::function<void(iterator, MemoryPermission)>;
+ void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
+
using IterateFunc = std::function<void(const MemoryInfo&)>;
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index 091e52ca4..3281611f8 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -840,6 +840,50 @@ ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s
return MakeResult<VAddr>(addr);
}
+ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+ std::lock_guard lock{page_table_lock};
+
+ MemoryPermission perm{};
+ if (const ResultCode result{CheckMemoryState(
+ nullptr, &perm, nullptr, addr, size, MemoryState::FlagCanChangeAttribute,
+ MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
+ MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
+ MemoryAttribute::DeviceSharedAndUncached)};
+ result.IsError()) {
+ return result;
+ }
+
+ block_manager->UpdateLock(addr, size / PageSize,
+ [](MemoryBlockManager::iterator block, MemoryPermission perm) {
+ block->ShareToDevice(perm);
+ },
+ perm);
+
+ return RESULT_SUCCESS;
+}
+
+ResultCode PageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+ std::lock_guard lock{page_table_lock};
+
+ MemoryPermission perm{};
+ if (const ResultCode result{CheckMemoryState(
+ nullptr, &perm, nullptr, addr, size, MemoryState::FlagCanChangeAttribute,
+ MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
+ MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
+ MemoryAttribute::DeviceSharedAndUncached)};
+ result.IsError()) {
+ return result;
+ }
+
+ block_manager->UpdateLock(addr, size / PageSize,
+ [](MemoryBlockManager::iterator block, MemoryPermission perm) {
+ block->UnshareToDevice(perm);
+ },
+ perm);
+
+ return RESULT_SUCCESS;
+}
+
ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
block_manager = std::make_unique<MemoryBlockManager>(start, end);
diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h
index 80384ab0f..a867aa050 100644
--- a/src/core/hle/kernel/memory/page_table.h
+++ b/src/core/hle/kernel/memory/page_table.h
@@ -53,6 +53,8 @@ public:
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, MemoryState state,
MemoryPermission perm, PAddr map_addr = 0);
+ ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
Common::PageTable& PageTableImpl() {
return page_table_impl;
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index eff4e45b0..7869eb32b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -7,6 +7,7 @@
#include <vector>
#include "common/assert.h"
+#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
@@ -67,6 +68,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
+ LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
return ERR_INVALID_ADDRESS;
}
@@ -88,6 +90,8 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
}
if (holding_thread == nullptr) {
+ LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}",
+ holding_thread_handle);
return ERR_INVALID_HANDLE;
}
@@ -109,6 +113,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
ResultCode Mutex::Release(VAddr address) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
+ LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
return ERR_INVALID_ADDRESS;
}
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index aa2787467..a15011076 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -27,7 +27,9 @@ PhysicalCore::PhysicalCore(Core::System& system, std::size_t id,
std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
#else
- arm_interface = std::make_shared<Core::ARM_Unicorn>(system);
+ using Core::ARM_Unicorn;
+ arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32);
+ arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64);
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
#endif
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 48e5ae682..63880f13d 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "common/bit_util.h"
+#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/memory/page_table.h"
@@ -119,22 +120,30 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
// The MapPhysical type uses two descriptor flags for its parameters.
// If there's only one, then there's a problem.
if (i >= num_capabilities) {
+ LOG_ERROR(Kernel, "Invalid combination! i={}", i);
return ERR_INVALID_COMBINATION;
}
const auto size_flags = capabilities[i];
if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
+ LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
return ERR_INVALID_COMBINATION;
}
const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
if (result.IsError()) {
+ LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
+ descriptor, size_flags);
return result;
}
} else {
const auto result =
ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
if (result.IsError()) {
+ LOG_ERROR(
+ Kernel,
+ "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
+ set_flags, set_svc_bits, descriptor);
return result;
}
}
@@ -162,6 +171,9 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
const u32 flag_length = GetFlagBitOffset(type);
const u32 set_flag = 1U << flag_length;
if ((set_flag & set_flags & InitializeOnceMask) != 0) {
+ LOG_ERROR(Kernel,
+ "Attempted to initialize flags that may only be initialized once. set_flags={}",
+ set_flags);
return ERR_INVALID_COMBINATION;
}
set_flags |= set_flag;
@@ -187,6 +199,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
break;
}
+ LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast<u32>(type));
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
}
@@ -208,23 +221,31 @@ void ProcessCapabilities::Clear() {
ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
+ LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
+ priority_mask, core_mask);
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
}
const u32 core_num_min = (flags >> 16) & 0xFF;
const u32 core_num_max = (flags >> 24) & 0xFF;
if (core_num_min > core_num_max) {
+ LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
+ core_num_min, core_num_max);
return ERR_INVALID_COMBINATION;
}
const u32 priority_min = (flags >> 10) & 0x3F;
const u32 priority_max = (flags >> 4) & 0x3F;
if (priority_min > priority_max) {
+ LOG_ERROR(Kernel,
+ "Priority min is greater than priority max! priority_min={}, priority_max={}",
+ core_num_min, priority_max);
return ERR_INVALID_COMBINATION;
}
// The switch only has 4 usable cores.
if (core_num_max >= 4) {
+ LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
return ERR_INVALID_PROCESSOR_ID;
}
@@ -259,6 +280,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
}
if (svc_number >= svc_capabilities.size()) {
+ LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
return ERR_OUT_OF_RANGE;
}
@@ -295,6 +317,8 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
// emulate that, it's sufficient to mark every interrupt as defined.
if (interrupt >= interrupt_capabilities.size()) {
+ LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
+ interrupt);
return ERR_OUT_OF_RANGE;
}
@@ -307,6 +331,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
+ LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ERR_RESERVED_VALUE;
}
@@ -324,6 +349,9 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
const u32 major_version = kernel_version >> 19;
if (major_version != 0 || flags < 0x80000) {
+ LOG_ERROR(Kernel,
+ "Kernel version is non zero or flags are too small! major_version={}, flags={}",
+ major_version, flags);
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
}
@@ -334,6 +362,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
+ LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ERR_RESERVED_VALUE;
}
@@ -344,6 +373,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
+ LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
return ERR_RESERVED_VALUE;
}
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 9d3d3a81b..00860fcbd 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -4,6 +4,7 @@
#include <algorithm>
#include "common/assert.h"
+#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/readable_event.h"
@@ -23,10 +24,12 @@ void ReadableEvent::Acquire(Thread* thread) {
}
void ReadableEvent::Signal() {
- if (!is_signaled) {
- is_signaled = true;
- SynchronizationObject::Signal();
- };
+ if (is_signaled) {
+ return;
+ }
+
+ is_signaled = true;
+ SynchronizationObject::Signal();
}
void ReadableEvent::Clear() {
@@ -35,6 +38,8 @@ void ReadableEvent::Clear() {
ResultCode ReadableEvent::Reset() {
if (!is_signaled) {
+ LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
+ GetObjectId(), GetTypeName(), GetName());
return ERR_INVALID_STATE;
}
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 96e5b9892..d9beaa3a4 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -69,6 +69,8 @@ ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
limit[index] = value;
return RESULT_SUCCESS;
} else {
+ LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}",
+ static_cast<u32>(resource), value, index);
return ERR_INVALID_STATE;
}
}
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index c67696757..0cd467110 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -36,22 +36,22 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
}
ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
- Memory::MemoryPermission permission) {
+ Memory::MemoryPermission permissions) {
const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize};
if (page_list.GetNumPages() != page_count) {
UNIMPLEMENTED_MSG("Page count does not match");
}
- Memory::MemoryPermission expected =
+ const Memory::MemoryPermission expected =
&target_process == owner_process ? owner_permission : user_permission;
- if (permission != expected) {
+ if (permissions != expected) {
UNIMPLEMENTED_MSG("Permission does not match");
}
return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared,
- permission);
+ permissions);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index cd16d6412..0ef87235c 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -51,7 +51,7 @@ public:
* @param permissions Memory block map permissions (specified by SVC field)
*/
ResultCode Map(Process& target_process, VAddr address, std::size_t size,
- Memory::MemoryPermission permission);
+ Memory::MemoryPermission permissions);
/**
* Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4134acf65..4ae4529f5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -55,9 +55,6 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
-// 8 GiB
-constexpr u64 MAIN_MEMORY_SIZE = 0x200000000;
-
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
@@ -688,6 +685,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
if (info_sub_id != 0) {
+ LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
+ info_sub_id);
return ERR_INVALID_ENUM_VALUE;
}
@@ -695,6 +694,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
system.Kernel().CurrentProcess()->GetHandleTable();
const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle));
if (!process) {
+ LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
+ info_id, info_sub_id, handle);
return ERR_INVALID_HANDLE;
}
@@ -776,7 +777,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
break;
}
- LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id);
+ LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ERR_INVALID_ENUM_VALUE;
}
@@ -786,10 +787,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
case GetInfoType::RegisterResourceLimit: {
if (handle != 0) {
+ LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
return ERR_INVALID_HANDLE;
}
if (info_sub_id != 0) {
+ LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
+ info_sub_id);
return ERR_INVALID_COMBINATION;
}
@@ -869,7 +873,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
}
default:
- LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id);
+ LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ERR_INVALID_ENUM_VALUE;
}
}
@@ -1229,6 +1233,142 @@ static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
return QueryMemory(system, memory_info_address, page_info_address, query_address);
}
+static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
+ "src_address=0x{:016X}, size=0x{:016X}",
+ process_handle, dst_address, src_address, size);
+
+ if (!Common::Is4KBAligned(src_address)) {
+ LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
+ src_address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (!Common::Is4KBAligned(dst_address)) {
+ LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
+ dst_address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range overflows the address space (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ if (!IsValidAddressRange(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range overflows the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ auto process = handle_table.Get<Process>(process_handle);
+ if (!process) {
+ LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
+ process_handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ auto& page_table = process->PageTable();
+ if (!page_table.IsInsideAddressSpace(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range is not within the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ if (!page_table.IsInsideASLRRegion(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return page_table.MapProcessCodeMemory(dst_address, src_address, size);
+}
+
+static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
+ u64 dst_address, u64 src_address, u64 size) {
+ LOG_DEBUG(Kernel_SVC,
+ "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
+ "size=0x{:016X}",
+ process_handle, dst_address, src_address, size);
+
+ if (!Common::Is4KBAligned(dst_address)) {
+ LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
+ dst_address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (!Common::Is4KBAligned(src_address)) {
+ LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
+ src_address);
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || Common::Is4KBAligned(size)) {
+ LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
+ return ERR_INVALID_SIZE;
+ }
+
+ if (!IsValidAddressRange(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range overflows the address space (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ if (!IsValidAddressRange(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range overflows the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ auto process = handle_table.Get<Process>(process_handle);
+ if (!process) {
+ LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
+ process_handle);
+ return ERR_INVALID_HANDLE;
+ }
+
+ auto& page_table = process->PageTable();
+ if (!page_table.IsInsideAddressSpace(src_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Source address range is not within the address space (src_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ src_address, size);
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
+ if (!page_table.IsInsideASLRRegion(dst_address, size)) {
+ LOG_ERROR(Kernel_SVC,
+ "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
+ "size=0x{:016X}).",
+ dst_address, size);
+ return ERR_INVALID_MEMORY_RANGE;
+ }
+
+ return page_table.UnmapProcessCodeMemory(dst_address, src_address, size);
+}
+
/// Exits the current process
static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess();
@@ -2256,8 +2396,8 @@ static const FunctionDef SVC_Table_64[] = {
{0x74, nullptr, "MapProcessMemory"},
{0x75, nullptr, "UnmapProcessMemory"},
{0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
- {0x77, nullptr, "MapProcessCodeMemory"},
- {0x78, nullptr, "UnmapProcessCodeMemory"},
+ {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
+ {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
{0x79, nullptr, "CreateProcess"},
{0x7A, nullptr, "StartProcess"},
{0x7B, nullptr, "TerminateProcess"},
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 4c0451c01..db7f379ac 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -150,8 +150,7 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
context.pc = entry_point;
context.sp = stack_top;
// TODO(merry): Perform a hardware test to determine the below value.
- // AHP = 0, DN = 1, FTZ = 1, RMode = Round towards zero
- context.fpcr = 0x03C00000;
+ context.fpcr = 0;
}
ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name,
@@ -424,6 +423,8 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
if (new_core == THREADPROCESSORID_DONT_UPDATE) {
new_core = use_override ? ideal_core_override : ideal_core;
if ((new_affinity_mask & (1ULL << new_core)) == 0) {
+ LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
+ new_core, new_affinity_mask);
return ERR_INVALID_COMBINATION;
}
}
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index cfac8ca9a..630a8b048 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -228,7 +228,8 @@ public:
class IManagerForApplication final : public ServiceFramework<IManagerForApplication> {
public:
- IManagerForApplication() : ServiceFramework("IManagerForApplication") {
+ explicit IManagerForApplication(Common::UUID user_id)
+ : ServiceFramework("IManagerForApplication"), user_id(user_id) {
// clang-format off
static const FunctionInfo functions[] = {
{0, &IManagerForApplication::CheckAvailability, "CheckAvailability"},
@@ -254,12 +255,14 @@ private:
}
void GetAccountId(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_ACC, "(STUBBED) called");
- // Should return a nintendo account ID
+ LOG_DEBUG(Service_ACC, "called");
+
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
- rb.PushRaw<u64>(1);
+ rb.PushRaw<u64>(user_id.GetNintendoID());
}
+
+ Common::UUID user_id;
};
void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) {
@@ -319,46 +322,37 @@ void Module::Interface::IsUserRegistrationRequestPermitted(Kernel::HLERequestCon
void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
- auto pid = rp.Pop<u64>();
- LOG_DEBUG(Service_ACC, "called, process_id={}", pid);
+ LOG_DEBUG(Service_ACC, "called");
IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(InitializeApplicationInfoBase(pid));
+ rb.Push(InitializeApplicationInfoBase());
}
void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
- auto pid = rp.Pop<u64>();
- LOG_WARNING(Service_ACC, "(Partial implementation) called, process_id={}", pid);
+ LOG_WARNING(Service_ACC, "(Partial implementation) called");
// TODO(ogniK): We require checking if the user actually owns the title and what not. As of
// currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called
// first then we do extra checks if the game is a digital copy.
IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(InitializeApplicationInfoBase(pid));
+ rb.Push(InitializeApplicationInfoBase());
}
-ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) {
+ResultCode Module::Interface::InitializeApplicationInfoBase() {
if (application_info) {
LOG_ERROR(Service_ACC, "Application already initialized");
return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
}
- const auto& list = system.Kernel().GetProcessList();
- const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) {
- return process->GetProcessID() == process_id;
- });
-
- if (iter == list.end()) {
- LOG_ERROR(Service_ACC, "Failed to find process ID");
- application_info.application_type = ApplicationType::Unknown;
-
- return ERR_ACCOUNTINFO_BAD_APPLICATION;
- }
-
- const auto launch_property = system.GetARPManager().GetLaunchProperty((*iter)->GetTitleID());
+ // TODO(ogniK): This should be changed to reflect the target process for when we have multiple
+ // processes emulated. As we don't actually have pid support we should assume we're just using
+ // our own process
+ const auto& current_process = system.Kernel().CurrentProcess();
+ const auto launch_property =
+ system.GetARPManager().GetLaunchProperty(current_process->GetTitleID());
if (launch_property.Failed()) {
LOG_ERROR(Service_ACC, "Failed to get launch property");
@@ -372,10 +366,12 @@ ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) {
case FileSys::StorageId::Host:
case FileSys::StorageId::NandUser:
case FileSys::StorageId::SdCard:
+ case FileSys::StorageId::None: // Yuzu specific, differs from hardware
application_info.application_type = ApplicationType::Digital;
break;
default:
- LOG_ERROR(Service_ACC, "Invalid game storage ID");
+ LOG_ERROR(Service_ACC, "Invalid game storage ID! storage_id={}",
+ launch_property->base_game_storage_id);
return ERR_ACCOUNTINFO_BAD_APPLICATION;
}
@@ -389,7 +385,7 @@ void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestCo
LOG_DEBUG(Service_ACC, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<IManagerForApplication>();
+ rb.PushIpcInterface<IManagerForApplication>(profile_manager->GetLastOpenedUser());
}
void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) {
@@ -428,6 +424,17 @@ void Module::Interface::GetProfileEditor(Kernel::HLERequestContext& ctx) {
rb.PushIpcInterface<IProfileEditor>(user_id, *profile_manager);
}
+void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_ACC, "called");
+
+ // All users should be qualified. We don't actually have parental control or anything to do with
+ // nintendo online currently. We're just going to assume the user running the game has access to
+ // the game regardless of parental control settings.
+ ctx.WriteBuffer(profile_manager->GetAllUsers());
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_ACC, "called");
// A u8 is passed into this function which we can safely ignore. It's to determine if we have
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index 7a7dc9ec6..74ca39d6e 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -33,9 +33,10 @@ public:
void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx);
void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
void GetProfileEditor(Kernel::HLERequestContext& ctx);
+ void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
private:
- ResultCode InitializeApplicationInfoBase(u64 process_id);
+ ResultCode InitializeApplicationInfoBase();
enum class ApplicationType : u32_le {
GameCard = 0,
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp
index b941c260b..2eefc6df5 100644
--- a/src/core/hle/service/acc/acc_su.cpp
+++ b/src/core/hle/service/acc/acc_su.cpp
@@ -33,8 +33,10 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{111, nullptr, "ClearSaveDataThumbnail"},
{112, nullptr, "LoadSaveDataThumbnail"},
{113, nullptr, "GetSaveDataThumbnailExistence"},
+ {120, nullptr, "ListOpenUsersInApplication"},
{130, nullptr, "ActivateOpenContextRetention"},
- {140, nullptr, "ListQualifiedUsers"},
+ {140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"},
+ {150, nullptr, "AuthenticateApplicationAsync"},
{190, nullptr, "GetUserLastOpenedApplication"},
{191, nullptr, "ActivateOpenContextHolder"},
{200, nullptr, "BeginUserRegistration"},
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp
index 0ac19f4ff..fb4e7e772 100644
--- a/src/core/hle/service/acc/acc_u0.cpp
+++ b/src/core/hle/service/acc/acc_u0.cpp
@@ -32,7 +32,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{130, nullptr, "LoadOpenContext"},
{131, nullptr, "ListOpenContextStoredUsers"},
{140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"},
- {141, nullptr, "ListQualifiedUsers"},
+ {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"},
{150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"},
};
// clang-format on
diff --git a/src/core/hle/service/acc/acc_u1.cpp b/src/core/hle/service/acc/acc_u1.cpp
index 858e91dde..9f29cdc82 100644
--- a/src/core/hle/service/acc/acc_u1.cpp
+++ b/src/core/hle/service/acc/acc_u1.cpp
@@ -34,7 +34,8 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{112, nullptr, "LoadSaveDataThumbnail"},
{113, nullptr, "GetSaveDataThumbnailExistence"},
{130, nullptr, "ActivateOpenContextRetention"},
- {140, nullptr, "ListQualifiedUsers"},
+ {140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"},
+ {150, nullptr, "AuthenticateApplicationAsync"},
{190, nullptr, "GetUserLastOpenedApplication"},
{191, nullptr, "ActivateOpenContextHolder"},
{997, nullptr, "DebugInvalidateTokenCacheForUser"},
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 557608e76..a967e6ef7 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -43,20 +43,15 @@
namespace Service::AM {
-constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 0x2};
-constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 0x3};
-constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 0x1F7};
+constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 2};
+constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 3};
+constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 503};
enum class LaunchParameterKind : u32 {
ApplicationSpecific = 1,
AccountPreselectedUser = 2,
};
-enum class VrMode : u8 {
- Disabled = 0,
- Enabled = 1,
-};
-
constexpr u32 LAUNCH_PARAMETER_ACCOUNT_PRESELECTED_USER_MAGIC = 0xC79497CA;
struct LaunchParameterAccountPreselectedUser {
@@ -235,6 +230,7 @@ IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} {
{30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"},
{40, nullptr, "GetAppletResourceUsageInfo"},
{100, nullptr, "SetCpuBoostModeForApplet"},
+ {101, nullptr, "CancelCpuBoostModeForApplet"},
{110, nullptr, "PushToAppletBoundChannelForDebug"},
{111, nullptr, "TryPopFromAppletBoundChannelForDebug"},
{120, nullptr, "AlarmSettingNotificationEnableAppEventReserve"},
@@ -277,6 +273,8 @@ ISelfController::ISelfController(Core::System& system,
{41, nullptr, "IsSystemBufferSharingEnabled"},
{42, nullptr, "GetSystemSharedLayerHandle"},
{43, nullptr, "GetSystemSharedBufferHandle"},
+ {44, nullptr, "CreateManagedDisplaySeparableLayer"},
+ {45, nullptr, "SetManagedDisplayLayerSeparationMode"},
{50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"},
{51, nullptr, "ApproveToDisplay"},
{60, nullptr, "OverrideAutoSleepTimeAndDimmingTime"},
@@ -623,11 +621,15 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system,
{64, nullptr, "SetTvPowerStateMatchingMode"},
{65, nullptr, "GetApplicationIdByContentActionName"},
{66, &ICommonStateGetter::SetCpuBoostMode, "SetCpuBoostMode"},
+ {67, nullptr, "CancelCpuBoostMode"},
{80, nullptr, "PerformSystemButtonPressingIfInFocus"},
{90, nullptr, "SetPerformanceConfigurationChangedNotification"},
{91, nullptr, "GetCurrentPerformanceConfiguration"},
+ {100, nullptr, "SetHandlingHomeButtonShortPressedEnabled"},
{200, nullptr, "GetOperationModeSystemInfo"},
{300, nullptr, "GetSettingsPlatformRegion"},
+ {400, nullptr, "ActivateMigrationService"},
+ {401, nullptr, "DeactivateMigrationService"},
};
// clang-format on
@@ -678,27 +680,21 @@ void ICommonStateGetter::GetCurrentFocusState(Kernel::HLERequestContext& ctx) {
}
void ICommonStateGetter::IsVrModeEnabled(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_AM, "(STUBBED) called");
+ LOG_DEBUG(Service_AM, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.PushEnum(VrMode::Disabled);
+ rb.Push(vr_mode_state);
}
void ICommonStateGetter::SetVrModeEnabled(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
- const auto is_vr_mode_enabled = rp.Pop<bool>();
+ vr_mode_state = rp.Pop<bool>();
- LOG_WARNING(Service_AM, "(STUBBED) called. is_vr_mode_enabled={}", is_vr_mode_enabled);
+ LOG_WARNING(Service_AM, "VR Mode is {}", vr_mode_state ? "on" : "off");
IPC::ResponseBuilder rb{ctx, 2};
- if (!is_vr_mode_enabled) {
- rb.Push(RESULT_SUCCESS);
- } else {
- // TODO: Find better error code for this
- UNIMPLEMENTED_MSG("is_vr_mode_enabled={}", is_vr_mode_enabled);
- rb.Push(RESULT_UNKNOWN);
- }
+ rb.Push(RESULT_SUCCESS);
}
void ICommonStateGetter::SetLcdBacklighOffEnabled(Kernel::HLERequestContext& ctx) {
@@ -835,6 +831,7 @@ public:
{25, nullptr, "Terminate"},
{30, &ILibraryAppletAccessor::GetResult, "GetResult"},
{50, nullptr, "SetOutOfFocusApplicationSuspendingEnabled"},
+ {60, nullptr, "PresetLibraryAppletGpuTimeSliceZero"},
{100, &ILibraryAppletAccessor::PushInData, "PushInData"},
{101, &ILibraryAppletAccessor::PopOutData, "PopOutData"},
{102, nullptr, "PushExtraStorage"},
@@ -903,7 +900,7 @@ private:
void PopOutData(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called");
- const auto storage = applet->GetBroker().PopNormalDataToGame();
+ auto storage = applet->GetBroker().PopNormalDataToGame();
if (storage == nullptr) {
LOG_ERROR(Service_AM,
"storage is a nullptr. There is no data in the current normal channel");
@@ -934,7 +931,7 @@ private:
void PopInteractiveOutData(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called");
- const auto storage = applet->GetBroker().PopInteractiveDataToGame();
+ auto storage = applet->GetBroker().PopInteractiveDataToGame();
if (storage == nullptr) {
LOG_ERROR(Service_AM,
"storage is a nullptr. There is no data in the current interactive channel");
@@ -1139,6 +1136,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
{31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"},
{32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"},
{33, &IApplicationFunctions::EndBlockingHomeButton, "EndBlockingHomeButton"},
+ {34, nullptr, "SelectApplicationLicense"},
{40, &IApplicationFunctions::NotifyRunning, "NotifyRunning"},
{50, &IApplicationFunctions::GetPseudoDeviceId, "GetPseudoDeviceId"},
{60, nullptr, "SetMediaPlaybackStateForApplication"},
@@ -1148,6 +1146,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
{68, nullptr, "RequestFlushGamePlayingMovieForDebug"},
{70, nullptr, "RequestToShutdown"},
{71, nullptr, "RequestToReboot"},
+ {72, nullptr, "RequestToSleep"},
{80, nullptr, "ExitAndRequestToShowThanksMessage"},
{90, &IApplicationFunctions::EnableApplicationCrashReport, "EnableApplicationCrashReport"},
{100, &IApplicationFunctions::InitializeApplicationCopyrightFrameBuffer, "InitializeApplicationCopyrightFrameBuffer"},
@@ -1159,7 +1158,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
{121, nullptr, "ClearUserChannel"},
{122, nullptr, "UnpopToUserChannel"},
{130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"},
- {140, nullptr, "GetFriendInvitationStorageChannelEvent"},
+ {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"},
{141, nullptr, "TryPopFromFriendInvitationStorageChannel"},
{150, nullptr, "GetNotificationStorageChannelEvent"},
{151, nullptr, "TryPopFromNotificationStorageChannel"},
@@ -1176,6 +1175,9 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
auto& kernel = system.Kernel();
gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair(
kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent");
+
+ friend_invitation_storage_channel_event = Kernel::WritableEvent::CreateEventPair(
+ kernel, "IApplicationFunctions:FriendInvitationStorageChannelEvent");
}
IApplicationFunctions::~IApplicationFunctions() = default;
@@ -1333,12 +1335,23 @@ void IApplicationFunctions::SetTerminateResult(Kernel::HLERequestContext& ctx) {
}
void IApplicationFunctions::GetDisplayVersion(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_AM, "(STUBBED) called");
+ LOG_DEBUG(Service_AM, "called");
+
+ std::array<u8, 0x10> version_string{};
+
+ FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID()};
+ const auto res = pm.GetControlMetadata();
+ if (res.first != nullptr) {
+ const auto& version = res.first->GetVersionString();
+ std::copy(version.begin(), version.end(), version_string.begin());
+ } else {
+ constexpr u128 default_version = {1, 0};
+ std::memcpy(version_string.data(), default_version.data(), sizeof(u128));
+ }
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(RESULT_SUCCESS);
- rb.Push<u64>(1);
- rb.Push<u64>(0);
+ rb.PushRaw(version_string);
}
void IApplicationFunctions::GetDesiredLanguage(Kernel::HLERequestContext& ctx) {
@@ -1490,6 +1503,14 @@ void IApplicationFunctions::GetGpuErrorDetectedSystemEvent(Kernel::HLERequestCon
rb.PushCopyObjects(gpu_error_detected_event.readable);
}
+void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_AM, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushCopyObjects(friend_invitation_storage_channel_event.readable);
+}
+
void InstallInterfaces(SM::ServiceManager& service_manager,
std::shared_ptr<NVFlinger::NVFlinger> nvflinger, Core::System& system) {
auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel());
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 53cfce10f..dfa701d73 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -191,6 +191,7 @@ private:
Core::System& system;
std::shared_ptr<AppletMessageQueue> msg_queue;
+ bool vr_mode_state{};
};
class IStorageImpl {
@@ -280,10 +281,12 @@ private:
void QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx);
void QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx);
void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx);
+ void GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx);
bool launch_popped_application_specific = false;
bool launch_popped_account_preselect = false;
Kernel::EventPair gpu_error_detected_event;
+ Kernel::EventPair friend_invitation_storage_channel_event;
Core::System& system;
};
diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp
index 9e08e5346..6ddb547fb 100644
--- a/src/core/hle/service/audio/audctl.cpp
+++ b/src/core/hle/service/audio/audctl.cpp
@@ -39,6 +39,8 @@ AudCtl::AudCtl() : ServiceFramework{"audctl"} {
{25, nullptr, "GetAudioVolumeDataForPlayReport"},
{26, nullptr, "UpdateHeadphoneSettings"},
{27, nullptr, "SetVolumeMappingTableForDev"},
+ {28, nullptr, "GetAudioOutputChannelCountForPlayReport"},
+ {29, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"},
};
// clang-format on
diff --git a/src/core/hle/service/audio/audin_u.cpp b/src/core/hle/service/audio/audin_u.cpp
index d7f1d348d..3e2299426 100644
--- a/src/core/hle/service/audio/audin_u.cpp
+++ b/src/core/hle/service/audio/audin_u.cpp
@@ -2,6 +2,9 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/logging/log.h"
+#include "core/hle/ipc_helpers.h"
+#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/service/audio/audin_u.h"
namespace Service::Audio {
@@ -36,11 +39,12 @@ public:
AudInU::AudInU() : ServiceFramework("audin:u") {
// clang-format off
static const FunctionInfo functions[] = {
- {0, nullptr, "ListAudioIns"},
- {1, nullptr, "OpenAudioIn"},
- {2, nullptr, "Unknown"},
- {3, nullptr, "OpenAudioInAuto"},
- {4, nullptr, "ListAudioInsAuto"},
+ {0, &AudInU::ListAudioIns, "ListAudioIns"},
+ {1, &AudInU::OpenAudioIn, "OpenAudioIn"},
+ {2, &AudInU::ListAudioIns, "ListAudioInsAuto"},
+ {3, &AudInU::OpenAudioIn, "OpenAudioInAuto"},
+ {4, &AudInU::ListAudioInsAutoFiltered, "ListAudioInsAutoFiltered"},
+ {5, &AudInU::OpenAudioInProtocolSpecified, "OpenAudioInProtocolSpecified"},
};
// clang-format on
@@ -49,4 +53,60 @@ AudInU::AudInU() : ServiceFramework("audin:u") {
AudInU::~AudInU() = default;
+void AudInU::ListAudioIns(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_Audio, "called");
+ const std::size_t count = ctx.GetWriteBufferSize() / sizeof(AudioInDeviceName);
+
+ const std::size_t device_count = std::min(count, audio_device_names.size());
+ std::vector<AudioInDeviceName> device_names;
+ device_names.reserve(device_count);
+
+ for (std::size_t i = 0; i < device_count; i++) {
+ const auto& device_name = audio_device_names[i];
+ auto& entry = device_names.emplace_back();
+ device_name.copy(entry.data(), device_name.size());
+ }
+
+ ctx.WriteBuffer(device_names);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push(static_cast<u32>(device_names.size()));
+}
+
+void AudInU::ListAudioInsAutoFiltered(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_Audio, "called");
+ constexpr u32 device_count = 0;
+
+ // Since we don't actually use any other audio input devices, we return 0 devices. Filtered
+ // device listing just omits the default input device
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push(static_cast<u32>(device_count));
+}
+
+void AudInU::OpenInOutImpl(Kernel::HLERequestContext& ctx) {
+ AudInOutParams params{};
+ params.channel_count = 2;
+ params.sample_format = SampleFormat::PCM16;
+ params.sample_rate = 48000;
+ params.state = State::Started;
+
+ IPC::ResponseBuilder rb{ctx, 6, 0, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushRaw<AudInOutParams>(params);
+ rb.PushIpcInterface<IAudioIn>();
+}
+
+void AudInU::OpenAudioIn(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_Audio, "(STUBBED) called");
+ OpenInOutImpl(ctx);
+}
+
+void AudInU::OpenAudioInProtocolSpecified(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_Audio, "(STUBBED) called");
+ OpenInOutImpl(ctx);
+}
+
} // namespace Service::Audio
diff --git a/src/core/hle/service/audio/audin_u.h b/src/core/hle/service/audio/audin_u.h
index 0538b9560..a599f4a64 100644
--- a/src/core/hle/service/audio/audin_u.h
+++ b/src/core/hle/service/audio/audin_u.h
@@ -16,6 +16,35 @@ class AudInU final : public ServiceFramework<AudInU> {
public:
explicit AudInU();
~AudInU() override;
+
+private:
+ enum class SampleFormat : u32_le {
+ PCM16 = 2,
+ };
+
+ enum class State : u32_le {
+ Started = 0,
+ Stopped = 1,
+ };
+
+ struct AudInOutParams {
+ u32_le sample_rate{};
+ u32_le channel_count{};
+ SampleFormat sample_format{};
+ State state{};
+ };
+ static_assert(sizeof(AudInOutParams) == 0x10, "AudInOutParams is an invalid size");
+
+ using AudioInDeviceName = std::array<char, 256>;
+ static constexpr std::array<std::string_view, 1> audio_device_names{{
+ "BuiltInHeadset",
+ }};
+
+ void ListAudioIns(Kernel::HLERequestContext& ctx);
+ void ListAudioInsAutoFiltered(Kernel::HLERequestContext& ctx);
+ void OpenInOutImpl(Kernel::HLERequestContext& ctx);
+ void OpenAudioIn(Kernel::HLERequestContext& ctx);
+ void OpenAudioInProtocolSpecified(Kernel::HLERequestContext& ctx);
};
} // namespace Service::Audio
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 175cabf45..d8359abaa 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -92,11 +92,16 @@ private:
}
void RequestUpdateImpl(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_Audio, "(STUBBED) called");
+ LOG_DEBUG(Service_Audio, "(STUBBED) called");
+
+ auto result = renderer->UpdateAudioRenderer(ctx.ReadBuffer());
+
+ if (result.Succeeded()) {
+ ctx.WriteBuffer(result.Unwrap());
+ }
- ctx.WriteBuffer(renderer->UpdateAudioRenderer(ctx.ReadBuffer()));
IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(RESULT_SUCCESS);
+ rb.Push(result.Code());
}
void Start(Kernel::HLERequestContext& ctx) {
@@ -252,8 +257,6 @@ private:
}
void GetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
-
const auto device_name_buffer = ctx.ReadBuffer();
const std::string name = Common::StringFromBuffer(device_name_buffer);
diff --git a/src/core/hle/service/bcat/backend/boxcat.cpp b/src/core/hle/service/bcat/backend/boxcat.cpp
index f589864ee..5febe8fc1 100644
--- a/src/core/hle/service/bcat/backend/boxcat.cpp
+++ b/src/core/hle/service/bcat/backend/boxcat.cpp
@@ -18,6 +18,7 @@
#include "core/hle/service/bcat/backend/boxcat.h"
#include "core/settings.h"
+namespace Service::BCAT {
namespace {
// Prevents conflicts with windows macro called CreateFile
@@ -30,10 +31,6 @@ bool VfsDeleteFileWrap(FileSys::VirtualDir dir, std::string_view name) {
return dir->DeleteFile(name);
}
-} // Anonymous namespace
-
-namespace Service::BCAT {
-
constexpr ResultCode ERROR_GENERAL_BCAT_FAILURE{ErrorModule::BCAT, 1};
constexpr char BOXCAT_HOSTNAME[] = "api.yuzu-emu.org";
@@ -90,8 +87,6 @@ constexpr u32 PORT = 443;
constexpr u32 TIMEOUT_SECONDS = 30;
[[maybe_unused]] constexpr u64 VFS_COPY_BLOCK_SIZE = 1ULL << 24; // 4MB
-namespace {
-
std::string GetBINFilePath(u64 title_id) {
return fmt::format("{}bcat/{:016X}/launchparam.bin",
FileUtil::GetUserPath(FileUtil::UserPath::CacheDir), title_id);
diff --git a/src/core/hle/service/bcat/module.cpp b/src/core/hle/service/bcat/module.cpp
index 7ada67130..34aba7a27 100644
--- a/src/core/hle/service/bcat/module.cpp
+++ b/src/core/hle/service/bcat/module.cpp
@@ -141,6 +141,7 @@ public:
{20301, nullptr, "RequestSuspendDeliveryTask"},
{20400, nullptr, "RegisterSystemApplicationDeliveryTask"},
{20401, nullptr, "UnregisterSystemApplicationDeliveryTask"},
+ {20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"},
{30100, &IBcatService::SetPassphrase, "SetPassphrase"},
{30200, nullptr, "RegisterBackgroundDeliveryTask"},
{30201, nullptr, "UnregisterBackgroundDeliveryTask"},
diff --git a/src/core/hle/service/caps/caps_su.cpp b/src/core/hle/service/caps/caps_su.cpp
index 2b4c2d808..e8b0698e8 100644
--- a/src/core/hle/service/caps/caps_su.cpp
+++ b/src/core/hle/service/caps/caps_su.cpp
@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/logging/log.h"
+#include "core/hle/ipc_helpers.h"
#include "core/hle/service/caps/caps_su.h"
namespace Service::Capture {
@@ -9,8 +11,11 @@ namespace Service::Capture {
CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") {
// clang-format off
static const FunctionInfo functions[] = {
+ {32, &CAPS_SU::SetShimLibraryVersion, "SetShimLibraryVersion"},
{201, nullptr, "SaveScreenShot"},
{203, nullptr, "SaveScreenShotEx0"},
+ {205, nullptr, "SaveScreenShotEx1"},
+ {210, nullptr, "SaveScreenShotEx2"},
};
// clang-format on
@@ -19,4 +24,11 @@ CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") {
CAPS_SU::~CAPS_SU() = default;
+void CAPS_SU::SetShimLibraryVersion(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_Capture, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
} // namespace Service::Capture
diff --git a/src/core/hle/service/caps/caps_su.h b/src/core/hle/service/caps/caps_su.h
index cb11f7c9a..c494d7c84 100644
--- a/src/core/hle/service/caps/caps_su.h
+++ b/src/core/hle/service/caps/caps_su.h
@@ -16,6 +16,9 @@ class CAPS_SU final : public ServiceFramework<CAPS_SU> {
public:
explicit CAPS_SU();
~CAPS_SU() override;
+
+private:
+ void SetShimLibraryVersion(Kernel::HLERequestContext& ctx);
};
} // namespace Service::Capture
diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp
index df00ae625..f8e9df4b1 100644
--- a/src/core/hle/service/es/es.cpp
+++ b/src/core/hle/service/es/es.cpp
@@ -4,6 +4,7 @@
#include "core/crypto/key_manager.h"
#include "core/hle/ipc_helpers.h"
+#include "core/hle/service/es/es.h"
#include "core/hle/service/service.h"
namespace Service::ES {
@@ -76,7 +77,6 @@ private:
}
void ImportTicket(Kernel::HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
const auto ticket = ctx.ReadBuffer();
const auto cert = ctx.ReadBuffer(1);
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index 61045c75c..f6503fe2f 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -316,8 +316,8 @@ public:
{8, &IFileSystem::OpenFile, "OpenFile"},
{9, &IFileSystem::OpenDirectory, "OpenDirectory"},
{10, &IFileSystem::Commit, "Commit"},
- {11, nullptr, "GetFreeSpaceSize"},
- {12, nullptr, "GetTotalSpaceSize"},
+ {11, &IFileSystem::GetFreeSpaceSize, "GetFreeSpaceSize"},
+ {12, &IFileSystem::GetTotalSpaceSize, "GetTotalSpaceSize"},
{13, &IFileSystem::CleanDirectoryRecursively, "CleanDirectoryRecursively"},
{14, nullptr, "GetFileTimeStampRaw"},
{15, nullptr, "QueryEntry"},
@@ -697,12 +697,14 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter)
{68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"},
{69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"},
{70, nullptr, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"},
+ {71, nullptr, "ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute"},
{80, nullptr, "OpenSaveDataMetaFile"},
{81, nullptr, "OpenSaveDataTransferManager"},
{82, nullptr, "OpenSaveDataTransferManagerVersion2"},
{83, nullptr, "OpenSaveDataTransferProhibiterForCloudBackUp"},
{84, nullptr, "ListApplicationAccessibleSaveDataOwnerId"},
{85, nullptr, "OpenSaveDataTransferManagerForSaveDataRepair"},
+ {86, nullptr, "OpenSaveDataMover"},
{100, nullptr, "OpenImageDirectoryFileSystem"},
{110, nullptr, "OpenContentStorageFileSystem"},
{120, nullptr, "OpenCloudBackupWorkStorageFileSystem"},
@@ -762,9 +764,11 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter)
{1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"},
{1012, nullptr, "GetFsStackUsage"},
{1013, nullptr, "UnsetSaveDataRootPath"},
+ {1014, nullptr, "OutputMultiProgramTagAccessLog"},
{1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"},
{1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"},
{1200, nullptr, "OpenMultiCommitManager"},
+ {1300, nullptr, "OpenBisWiper"},
};
// clang-format on
RegisterHandlers(functions);
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index 7938b4b80..68f259b70 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -96,6 +96,7 @@ public:
{30830, nullptr, "ClearPlayLog"},
{30900, nullptr, "SendFriendInvitation"},
{30910, nullptr, "ReadFriendInvitation"},
+ {30911, nullptr, "ReadAllFriendInvitations"},
{49900, nullptr, "DeleteNetworkServiceAccountCache"},
};
// clang-format on
diff --git a/src/core/hle/service/glue/errors.h b/src/core/hle/service/glue/errors.h
index c2874c585..f6647f724 100644
--- a/src/core/hle/service/glue/errors.h
+++ b/src/core/hle/service/glue/errors.h
@@ -8,9 +8,9 @@
namespace Service::Glue {
-constexpr ResultCode ERR_INVALID_RESOURCE{ErrorModule::ARP, 0x1E};
-constexpr ResultCode ERR_INVALID_PROCESS_ID{ErrorModule::ARP, 0x1F};
-constexpr ResultCode ERR_INVALID_ACCESS{ErrorModule::ARP, 0x2A};
-constexpr ResultCode ERR_NOT_REGISTERED{ErrorModule::ARP, 0x66};
+constexpr ResultCode ERR_INVALID_RESOURCE{ErrorModule::ARP, 30};
+constexpr ResultCode ERR_INVALID_PROCESS_ID{ErrorModule::ARP, 31};
+constexpr ResultCode ERR_INVALID_ACCESS{ErrorModule::ARP, 42};
+constexpr ResultCode ERR_NOT_REGISTERED{ErrorModule::ARP, 102};
} // namespace Service::Glue
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 2ccfffc19..c55d900e2 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -502,7 +502,7 @@ void Controller_NPad::SetNpadMode(u32 npad_id, NPadAssignments assignment_mode)
void Controller_NPad::VibrateController(const std::vector<u32>& controller_ids,
const std::vector<Vibration>& vibrations) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
+ LOG_DEBUG(Service_HID, "(STUBBED) called");
if (!can_controllers_vibrate) {
return;
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index d6031a987..5559587e3 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -233,7 +233,7 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
{302, nullptr, "StopConsoleSixAxisSensor"},
{303, nullptr, "ActivateSevenSixAxisSensor"},
{304, nullptr, "StartSevenSixAxisSensor"},
- {305, nullptr, "StopSevenSixAxisSensor"},
+ {305, &Hid::StopSevenSixAxisSensor, "StopSevenSixAxisSensor"},
{306, &Hid::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"},
{307, nullptr, "FinalizeSevenSixAxisSensor"},
{308, nullptr, "SetSevenSixAxisSensorFusionStrength"},
@@ -282,6 +282,7 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
{1001, nullptr, "GetNpadCommunicationMode"},
{1002, nullptr, "SetTouchScreenConfiguration"},
{1003, nullptr, "IsFirmwareUpdateNeededForNotification"},
+ {2000, nullptr, "ActivateDigitizer"},
};
// clang-format on
@@ -852,6 +853,17 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
}
+void Hid::StopSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
+ applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
void Hid::InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_HID, "(STUBBED) called");
@@ -870,6 +882,7 @@ public:
{10, nullptr, "DeactivateTouchScreen"},
{11, nullptr, "SetTouchScreenAutoPilotState"},
{12, nullptr, "UnsetTouchScreenAutoPilotState"},
+ {13, nullptr, "GetTouchScreenConfiguration"},
{20, nullptr, "DeactivateMouse"},
{21, nullptr, "SetMouseAutoPilotState"},
{22, nullptr, "UnsetMouseAutoPilotState"},
@@ -879,7 +892,9 @@ public:
{50, nullptr, "DeactivateXpad"},
{51, nullptr, "SetXpadAutoPilotState"},
{52, nullptr, "UnsetXpadAutoPilotState"},
- {60, nullptr, "DeactivateJoyXpad"},
+ {60, nullptr, "ClearNpadSystemCommonPolicy"},
+ {61, nullptr, "DeactivateNpad"},
+ {62, nullptr, "ForceDisconnectNpad"},
{91, nullptr, "DeactivateGesture"},
{110, nullptr, "DeactivateHomeButton"},
{111, nullptr, "SetHomeButtonAutoPilotState"},
@@ -899,6 +914,15 @@ public:
{141, nullptr, "GetConsoleSixAxisSensorSamplingFrequency"},
{142, nullptr, "DeactivateSevenSixAxisSensor"},
{143, nullptr, "GetConsoleSixAxisSensorCountStates"},
+ {144, nullptr, "GetAccelerometerFsr"},
+ {145, nullptr, "SetAccelerometerFsr"},
+ {146, nullptr, "GetAccelerometerOdr"},
+ {147, nullptr, "SetAccelerometerOdr"},
+ {148, nullptr, "GetGyroscopeFsr"},
+ {149, nullptr, "SetGyroscopeFsr"},
+ {150, nullptr, "GetGyroscopeOdr"},
+ {151, nullptr, "SetGyroscopeOdr"},
+ {152, nullptr, "GetWhoAmI"},
{201, nullptr, "ActivateFirmwareUpdate"},
{202, nullptr, "DeactivateFirmwareUpdate"},
{203, nullptr, "StartFirmwareUpdate"},
@@ -927,6 +951,17 @@ public:
{233, nullptr, "ClearPairingInfo"},
{234, nullptr, "GetUniquePadDeviceTypeSetInternal"},
{235, nullptr, "EnableAnalogStickPower"},
+ {236, nullptr, "RequestKuinaUartClockCal"},
+ {237, nullptr, "GetKuinaUartClockCal"},
+ {238, nullptr, "SetKuinaUartClockTrim"},
+ {239, nullptr, "KuinaLoopbackTest"},
+ {240, nullptr, "RequestBatteryVoltage"},
+ {241, nullptr, "GetBatteryVoltage"},
+ {242, nullptr, "GetUniquePadPowerInfo"},
+ {243, nullptr, "RebootUniquePad"},
+ {244, nullptr, "RequestKuinaFirmwareVersion"},
+ {245, nullptr, "GetKuinaFirmwareVersion"},
+ {246, nullptr, "GetVidPid"},
{301, nullptr, "GetAbstractedPadHandles"},
{302, nullptr, "GetAbstractedPadState"},
{303, nullptr, "GetAbstractedPadsState"},
@@ -945,6 +980,17 @@ public:
{350, nullptr, "AddRegisteredDevice"},
{400, nullptr, "DisableExternalMcuOnNxDevice"},
{401, nullptr, "DisableRailDeviceFiltering"},
+ {402, nullptr, "EnableWiredPairing"},
+ {403, nullptr, "EnableShipmentModeAutoClear"},
+ {500, nullptr, "SetFactoryInt"},
+ {501, nullptr, "IsFactoryBootEnabled"},
+ {550, nullptr, "SetAnalogStickModelDataTemporarily"},
+ {551, nullptr, "GetAnalogStickModelData"},
+ {552, nullptr, "ResetAnalogStickModelData"},
+ {600, nullptr, "ConvertPadState"},
+ {2000, nullptr, "DeactivateDigitizer"},
+ {2001, nullptr, "SetDigitizerAutoPilotState"},
+ {2002, nullptr, "UnsetDigitizerAutoPilotState"},
};
// clang-format on
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index 039c38b58..23552efb1 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -128,6 +128,7 @@ private:
void StopSixAxisSensor(Kernel::HLERequestContext& ctx);
void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
+ void StopSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
void InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
std::shared_ptr<IAppletResource> applet_resource;
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 0cde7a557..6ad3be1b3 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -116,6 +116,7 @@ public:
{1, nullptr, "GetProgramInfo"},
{2, nullptr, "RegisterTitle"},
{3, nullptr, "UnregisterTitle"},
+ {4, nullptr, "SetEnabledProgramVerification"},
};
// clang-format on
diff --git a/src/core/hle/service/ncm/ncm.cpp b/src/core/hle/service/ncm/ncm.cpp
index 89e283ca5..ec9aae04a 100644
--- a/src/core/hle/service/ncm/ncm.cpp
+++ b/src/core/hle/service/ncm/ncm.cpp
@@ -122,6 +122,7 @@ public:
{11, nullptr, "ActivateContentMetaDatabase"},
{12, nullptr, "InactivateContentMetaDatabase"},
{13, nullptr, "InvalidateRightsIdCache"},
+ {14, nullptr, "GetMemoryReport"},
};
// clang-format on
diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp
index e85f123e2..f19affce7 100644
--- a/src/core/hle/service/nim/nim.cpp
+++ b/src/core/hle/service/nim/nim.cpp
@@ -15,6 +15,66 @@
namespace Service::NIM {
+class IShopServiceAsync final : public ServiceFramework<IShopServiceAsync> {
+public:
+ IShopServiceAsync() : ServiceFramework("IShopServiceAsync") {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, nullptr, "Cancel"},
+ {1, nullptr, "GetSize"},
+ {2, nullptr, "Read"},
+ {3, nullptr, "GetErrorCode"},
+ {4, nullptr, "Request"},
+ {5, nullptr, "Prepare"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+ }
+};
+
+class IShopServiceAccessor final : public ServiceFramework<IShopServiceAccessor> {
+public:
+ IShopServiceAccessor() : ServiceFramework("IShopServiceAccessor") {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, &IShopServiceAccessor::CreateAsyncInterface, "CreateAsyncInterface"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+ }
+
+private:
+ void CreateAsyncInterface(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_NIM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2, 0, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushIpcInterface<IShopServiceAsync>();
+ }
+};
+
+class IShopServiceAccessServer final : public ServiceFramework<IShopServiceAccessServer> {
+public:
+ IShopServiceAccessServer() : ServiceFramework("IShopServiceAccessServer") {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, &IShopServiceAccessServer::CreateAccessorInterface, "CreateAccessorInterface"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+ }
+
+private:
+ void CreateAccessorInterface(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_NIM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2, 0, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushIpcInterface<IShopServiceAccessor>();
+ }
+};
+
class NIM final : public ServiceFramework<NIM> {
public:
explicit NIM() : ServiceFramework{"nim"} {
@@ -78,7 +138,7 @@ public:
explicit NIM_ECA() : ServiceFramework{"nim:eca"} {
// clang-format off
static const FunctionInfo functions[] = {
- {0, nullptr, "CreateServerInterface"},
+ {0, &NIM_ECA::CreateServerInterface, "CreateServerInterface"},
{1, nullptr, "RefreshDebugAvailability"},
{2, nullptr, "ClearDebugResponse"},
{3, nullptr, "RegisterDebugResponse"},
@@ -87,6 +147,14 @@ public:
RegisterHandlers(functions);
}
+
+private:
+ void CreateServerInterface(Kernel::HLERequestContext& ctx) {
+ LOG_WARNING(Service_NIM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2, 0, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushIpcInterface<IShopServiceAccessServer>();
+ }
};
class NIM_SHP final : public ServiceFramework<NIM_SHP> {
diff --git a/src/core/hle/service/npns/npns.cpp b/src/core/hle/service/npns/npns.cpp
index aa171473b..f38d01084 100644
--- a/src/core/hle/service/npns/npns.cpp
+++ b/src/core/hle/service/npns/npns.cpp
@@ -48,6 +48,8 @@ public:
{151, nullptr, "GetStateWithHandover"},
{152, nullptr, "GetStateChangeEventWithHandover"},
{153, nullptr, "GetDropEventWithHandover"},
+ {161, nullptr, "GetRequestChangeStateCancelEvent"},
+ {162, nullptr, "RequestChangeStateForceTimedWithCancelEvent"},
{201, nullptr, "RequestChangeStateForceTimed"},
{202, nullptr, "RequestChangeStateForceAsync"},
};
diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp
index fdab3cf78..7e5ceccdb 100644
--- a/src/core/hle/service/ns/ns.cpp
+++ b/src/core/hle/service/ns/ns.cpp
@@ -110,6 +110,10 @@ IApplicationManagerInterface::IApplicationManagerInterface()
{100, nullptr, "ResetToFactorySettings"},
{101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"},
{102, nullptr, "ResetToFactorySettingsForRefurbishment"},
+ {103, nullptr, "ResetToFactorySettingsWithPlatformRegion"},
+ {104, nullptr, "ResetToFactorySettingsWithPlatformRegionAuthentication"},
+ {105, nullptr, "RequestResetToFactorySettingsSecurely"},
+ {106, nullptr, "RequestResetToFactorySettingsWithPlatformRegionAuthenticationSecurely"},
{200, nullptr, "CalculateUserSaveDataStatistics"},
{201, nullptr, "DeleteUserSaveDataAll"},
{210, nullptr, "DeleteUserSystemSaveData"},
@@ -191,6 +195,9 @@ IApplicationManagerInterface::IApplicationManagerInterface()
{1307, nullptr, "TryDeleteRunningApplicationContentEntities"},
{1308, nullptr, "DeleteApplicationCompletelyForDebug"},
{1309, nullptr, "CleanupUnavailableAddOnContents"},
+ {1310, nullptr, "RequestMoveApplicationEntity"},
+ {1311, nullptr, "EstimateSizeToMove"},
+ {1312, nullptr, "HasMovableEntity"},
{1400, nullptr, "PrepareShutdown"},
{1500, nullptr, "FormatSdCard"},
{1501, nullptr, "NeedsSystemUpdateToFormatSdCard"},
@@ -241,7 +248,7 @@ IApplicationManagerInterface::IApplicationManagerInterface()
{2153, nullptr, "DeactivateRightsEnvironment"},
{2154, nullptr, "ForceActivateRightsContextForExit"},
{2155, nullptr, "UpdateRightsEnvironmentStatus"},
- {2156, nullptr, "CreateRightsEnvironmentForPreomia"},
+ {2156, nullptr, "CreateRightsEnvironmentForMicroApplication"},
{2160, nullptr, "AddTargetApplicationToRightsEnvironment"},
{2161, nullptr, "SetUsersToRightsEnvironment"},
{2170, nullptr, "GetRightsEnvironmentStatus"},
@@ -258,6 +265,7 @@ IApplicationManagerInterface::IApplicationManagerInterface()
{2350, nullptr, "PerformAutoUpdateByApplicationId"},
{2351, nullptr, "RequestNoDownloadRightsErrorResolution"},
{2352, nullptr, "RequestResolveNoDownloadRightsError"},
+ {2353, nullptr, "GetApplicationDownloadTaskInfo"},
{2400, nullptr, "GetPromotionInfo"},
{2401, nullptr, "CountPromotionInfo"},
{2402, nullptr, "ListPromotionInfo"},
@@ -266,9 +274,12 @@ IApplicationManagerInterface::IApplicationManagerInterface()
{2500, nullptr, "ConfirmAvailableTime"},
{2510, nullptr, "CreateApplicationResource"},
{2511, nullptr, "GetApplicationResource"},
- {2513, nullptr, "LaunchPreomia"},
+ {2513, nullptr, "LaunchMicroApplication"},
{2514, nullptr, "ClearTaskOfAsyncTaskManager"},
+ {2515, nullptr, "CleanupAllPlaceHolderAndFragmentsIfNoTask"},
+ {2516, nullptr, "EnsureApplicationCertificate"},
{2800, nullptr, "GetApplicationIdOfPreomia"},
+ {9999, nullptr, "GetApplicationCertificate"},
};
// clang-format on
@@ -360,10 +371,15 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
// Convert to application language, get priority list
const auto application_language = ConvertToApplicationLanguage(language_code);
if (application_language == std::nullopt) {
+ LOG_ERROR(Service_NS, "Could not convert application language! language_code={}",
+ language_code);
return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
}
const auto priority_list = GetApplicationLanguagePriorityList(*application_language);
if (!priority_list) {
+ LOG_ERROR(Service_NS,
+ "Could not find application language priorities! application_language={}",
+ *application_language);
return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
}
@@ -375,6 +391,8 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
}
}
+ LOG_ERROR(Service_NS, "Could not find a valid language! supported_languages={:08X}",
+ supported_languages);
return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
}
@@ -399,6 +417,7 @@ ResultVal<u64> IApplicationManagerInterface::ConvertApplicationLanguageToLanguag
const auto language_code =
ConvertToLanguageCode(static_cast<ApplicationLanguage>(application_language));
if (language_code == std::nullopt) {
+ LOG_ERROR(Service_NS, "Language not found! application_language={}", application_language);
return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
}
@@ -505,6 +524,10 @@ IFactoryResetInterface::IFactoryResetInterface::IFactoryResetInterface()
{100, nullptr, "ResetToFactorySettings"},
{101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"},
{102, nullptr, "ResetToFactorySettingsForRefurbishment"},
+ {103, nullptr, "ResetToFactorySettingsWithPlatformRegion"},
+ {104, nullptr, "ResetToFactorySettingsWithPlatformRegionAuthentication"},
+ {105, nullptr, "RequestResetToFactorySettingsSecurely"},
+ {106, nullptr, "RequestResetToFactorySettingsWithPlatformRegionAuthenticationSecurely"},
};
// clang-format on
@@ -553,6 +576,9 @@ public:
{10, nullptr, "TerminateApplication2"},
{11, nullptr, "GetRunningApplicationProcessId"},
{12, nullptr, "SetCurrentApplicationRightsEnvironmentCanBeActive"},
+ {13, nullptr, "CreateApplicationResourceForDevelop"},
+ {14, nullptr, "IsPreomiaForDevelop"},
+ {15, nullptr, "GetApplicationProgramIdFromHost"},
};
// clang-format on
diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp
index ab1746d28..6efdf1606 100644
--- a/src/core/hle/service/ns/pl_u.cpp
+++ b/src/core/hle/service/ns/pl_u.cpp
@@ -164,6 +164,7 @@ PL_U::PL_U(Core::System& system)
{6, nullptr, "GetSharedFontInOrderOfPriorityForSystem"},
{100, nullptr, "RequestApplicationFunctionAuthorization"},
{101, nullptr, "RequestApplicationFunctionAuthorizationForSystem"},
+ {102, nullptr, "RequestApplicationFunctionAuthorizationByApplicationId"},
{1000, nullptr, "LoadNgWordDataForPlatformRegionChina"},
{1001, nullptr, "GetNgWordDataSizeForPlatformRegionChina"},
};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 642b0a2cb..07b644ec5 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -159,9 +159,10 @@ private:
static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size");
struct IoctlGetGpuTime {
- u64_le gpu_time;
+ u64_le gpu_time{};
+ INSERT_PADDING_WORDS(2);
};
- static_assert(sizeof(IoctlGetGpuTime) == 8, "IoctlGetGpuTime is incorrect size");
+ static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
std::vector<u8>& output2, IoctlVersion version);
diff --git a/src/core/hle/service/pctl/module.cpp b/src/core/hle/service/pctl/module.cpp
index c75b4ee34..caf14ed61 100644
--- a/src/core/hle/service/pctl/module.cpp
+++ b/src/core/hle/service/pctl/module.cpp
@@ -31,6 +31,8 @@ public:
{1014, nullptr, "ConfirmPlayableApplicationVideoOld"},
{1015, nullptr, "ConfirmPlayableApplicationVideo"},
{1016, nullptr, "ConfirmShowNewsPermission"},
+ {1017, nullptr, "EndFreeCommunication"},
+ {1018, nullptr, "IsFreeCommunicationAvailable"},
{1031, nullptr, "IsRestrictionEnabled"},
{1032, nullptr, "GetSafetyLevel"},
{1033, nullptr, "SetSafetyLevel"},
diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp
index 8f1be0e48..14309c679 100644
--- a/src/core/hle/service/prepo/prepo.cpp
+++ b/src/core/hle/service/prepo/prepo.cpp
@@ -21,8 +21,10 @@ public:
static const FunctionInfo functions[] = {
{10100, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old>, "SaveReportOld"},
{10101, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old>, "SaveReportWithUserOld"},
- {10102, &PlayReport::SaveReport<Core::Reporter::PlayReportType::New>, "SaveReport"},
- {10103, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"},
+ {10102, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old2>, "SaveReportOld2"},
+ {10103, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old2>, "SaveReportWithUserOld2"},
+ {10104, nullptr, "SaveReport"},
+ {10105, nullptr, "SaveReportWithUser"},
{10200, nullptr, "RequestImmediateTransmission"},
{10300, nullptr, "GetTransmissionStatus"},
{10400, nullptr, "GetSystemSessionId"},
@@ -35,8 +37,10 @@ public:
{30400, nullptr, "GetStatistics"},
{30401, nullptr, "GetThroughputHistory"},
{30500, nullptr, "GetLastUploadError"},
+ {30600, nullptr, "GetApplicationUploadSummary"},
{40100, nullptr, "IsUserAgreementCheckEnabled"},
{40101, nullptr, "SetUserAgreementCheckEnabled"},
+ {50100, nullptr, "ReadAllApplicationReportFiles"},
{90100, nullptr, "ReadAllReportFiles"},
};
// clang-format on
@@ -51,7 +55,7 @@ private:
const auto process_id = rp.PopRaw<u64>();
std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)};
- if (Type == Core::Reporter::PlayReportType::New) {
+ if constexpr (Type == Core::Reporter::PlayReportType::Old2) {
data.emplace_back(ctx.ReadBuffer(1));
}
@@ -71,7 +75,7 @@ private:
const auto user_id = rp.PopRaw<u128>();
const auto process_id = rp.PopRaw<u64>();
std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)};
- if (Type == Core::Reporter::PlayReportType::New) {
+ if constexpr (Type == Core::Reporter::PlayReportType::Old2) {
data.emplace_back(ctx.ReadBuffer(1));
}
diff --git a/src/core/hle/service/ptm/psm.cpp b/src/core/hle/service/ptm/psm.cpp
index c2d5fda94..12d154ecf 100644
--- a/src/core/hle/service/ptm/psm.cpp
+++ b/src/core/hle/service/ptm/psm.cpp
@@ -12,9 +12,6 @@
namespace Service::PSM {
-constexpr u32 BATTERY_FULLY_CHARGED = 100; // 100% Full
-constexpr u32 BATTERY_CURRENTLY_CHARGING = 1; // Plugged into an official dock
-
class PSM final : public ServiceFramework<PSM> {
public:
explicit PSM() : ServiceFramework{"psm"} {
@@ -48,20 +45,30 @@ public:
private:
void GetBatteryChargePercentage(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_PSM, "(STUBBED) called");
+ LOG_DEBUG(Service_PSM, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.Push<u32>(BATTERY_FULLY_CHARGED);
+ rb.Push<u32>(battery_charge_percentage);
}
void GetChargerType(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_PSM, "(STUBBED) called");
+ LOG_DEBUG(Service_PSM, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.Push<u32>(BATTERY_CURRENTLY_CHARGING);
+ rb.PushEnum(charger_type);
}
+
+ enum class ChargerType : u32 {
+ Unplugged = 0,
+ RegularCharger = 1,
+ LowPowerCharger = 2,
+ Unknown = 3,
+ };
+
+ u32 battery_charge_percentage{100}; // 100%
+ ChargerType charger_type{ChargerType::RegularCharger};
};
void InstallInterfaces(SM::ServiceManager& sm) {
diff --git a/src/core/hle/service/set/set.cpp b/src/core/hle/service/set/set.cpp
index 9e12c76fc..f3b4b286c 100644
--- a/src/core/hle/service/set/set.cpp
+++ b/src/core/hle/service/set/set.cpp
@@ -67,6 +67,7 @@ void SET::MakeLanguageCode(Kernel::HLERequestContext& ctx) {
const auto index = rp.Pop<u32>();
if (index >= available_language_codes.size()) {
+ LOG_ERROR(Service_SET, "Invalid language code index! index={}", index);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_INVALID_LANGUAGE);
return;
diff --git a/src/core/hle/service/set/set_cal.cpp b/src/core/hle/service/set/set_cal.cpp
index 1398a4a48..3fbfecc9e 100644
--- a/src/core/hle/service/set/set_cal.cpp
+++ b/src/core/hle/service/set/set_cal.cpp
@@ -50,6 +50,8 @@ SET_CAL::SET_CAL() : ServiceFramework("set:cal") {
{39, nullptr, "GetConsoleSixAxisSensorModuleType"},
{40, nullptr, "GetConsoleSixAxisSensorHorizontalOffset"},
{41, nullptr, "GetBatteryVersion"},
+ {42, nullptr, "GetDeviceId"},
+ {43, nullptr, "GetConsoleSixAxisSensorMountType"},
};
// clang-format on
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index b7c9ea74b..8bd4c7e79 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -288,6 +288,18 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") {
{186, nullptr, "GetMemoryUsageRateFlag"},
{187, nullptr, "GetTouchScreenMode"},
{188, nullptr, "SetTouchScreenMode"},
+ {189, nullptr, "GetButtonConfigSettingsFull"},
+ {190, nullptr, "SetButtonConfigSettingsFull"},
+ {191, nullptr, "GetButtonConfigSettingsEmbedded"},
+ {192, nullptr, "SetButtonConfigSettingsEmbedded"},
+ {193, nullptr, "GetButtonConfigSettingsLeft"},
+ {194, nullptr, "SetButtonConfigSettingsLeft"},
+ {195, nullptr, "GetButtonConfigSettingsRight"},
+ {196, nullptr, "SetButtonConfigSettingsRight"},
+ {197, nullptr, "GetButtonConfigRegisteredSettingsEmbedded"},
+ {198, nullptr, "SetButtonConfigRegisteredSettingsEmbedded"},
+ {199, nullptr, "GetButtonConfigRegisteredSettings"},
+ {200, nullptr, "SetButtonConfigRegisteredSettings"},
};
// clang-format on
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 88909504d..6ada13be4 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -28,9 +28,11 @@ void ServiceManager::InvokeControlRequest(Kernel::HLERequestContext& context) {
static ResultCode ValidateServiceName(const std::string& name) {
if (name.size() <= 0 || name.size() > 8) {
+ LOG_ERROR(Service_SM, "Invalid service name! service={}", name);
return ERR_INVALID_NAME;
}
if (name.find('\0') != std::string::npos) {
+ LOG_ERROR(Service_SM, "A non null terminated service was passed");
return ERR_INVALID_NAME;
}
return RESULT_SUCCESS;
@@ -51,8 +53,10 @@ ResultVal<std::shared_ptr<Kernel::ServerPort>> ServiceManager::RegisterService(
CASCADE_CODE(ValidateServiceName(name));
- if (registered_services.find(name) != registered_services.end())
+ if (registered_services.find(name) != registered_services.end()) {
+ LOG_ERROR(Service_SM, "Service is already registered! service={}", name);
return ERR_ALREADY_REGISTERED;
+ }
auto& kernel = Core::System::GetInstance().Kernel();
auto [server_port, client_port] =
@@ -66,9 +70,10 @@ ResultCode ServiceManager::UnregisterService(const std::string& name) {
CASCADE_CODE(ValidateServiceName(name));
const auto iter = registered_services.find(name);
- if (iter == registered_services.end())
+ if (iter == registered_services.end()) {
+ LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
return ERR_SERVICE_NOT_REGISTERED;
-
+ }
registered_services.erase(iter);
return RESULT_SUCCESS;
}
@@ -79,6 +84,7 @@ ResultVal<std::shared_ptr<Kernel::ClientPort>> ServiceManager::GetServicePort(
CASCADE_CODE(ValidateServiceName(name));
auto it = registered_services.find(name);
if (it == registered_services.end()) {
+ LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
return ERR_SERVICE_NOT_REGISTERED;
}
diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp
index f67fab2f9..8d4952c0e 100644
--- a/src/core/hle/service/sockets/bsd.cpp
+++ b/src/core/hle/service/sockets/bsd.cpp
@@ -148,6 +148,7 @@ BSD::BSD(const char* name) : ServiceFramework(name) {
{30, nullptr, "SendMMsg"},
{31, nullptr, "EventFd"},
{32, nullptr, "RegisterResourceStatisticsName"},
+ {33, nullptr, "Initialize2"},
};
// clang-format on
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index e722886de..67f1bbcf3 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -20,8 +20,8 @@ namespace Service::Time {
class ISystemClock final : public ServiceFramework<ISystemClock> {
public:
- ISystemClock(Clock::SystemClockCore& clock_core)
- : ServiceFramework("ISystemClock"), clock_core{clock_core} {
+ explicit ISystemClock(Clock::SystemClockCore& clock_core, Core::System& system)
+ : ServiceFramework("ISystemClock"), clock_core{clock_core}, system{system} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &ISystemClock::GetCurrentTime, "GetCurrentTime"},
@@ -46,9 +46,8 @@ private:
}
s64 posix_time{};
- if (const ResultCode result{
- clock_core.GetCurrentTime(Core::System::GetInstance(), posix_time)};
- result != RESULT_SUCCESS) {
+ if (const ResultCode result{clock_core.GetCurrentTime(system, posix_time)};
+ result.IsError()) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(result);
return;
@@ -69,9 +68,8 @@ private:
}
Clock::SystemClockContext system_clock_context{};
- if (const ResultCode result{
- clock_core.GetClockContext(Core::System::GetInstance(), system_clock_context)};
- result != RESULT_SUCCESS) {
+ if (const ResultCode result{clock_core.GetClockContext(system, system_clock_context)};
+ result.IsError()) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(result);
return;
@@ -83,12 +81,13 @@ private:
}
Clock::SystemClockCore& clock_core;
+ Core::System& system;
};
class ISteadyClock final : public ServiceFramework<ISteadyClock> {
public:
- ISteadyClock(Clock::SteadyClockCore& clock_core)
- : ServiceFramework("ISteadyClock"), clock_core{clock_core} {
+ explicit ISteadyClock(Clock::SteadyClockCore& clock_core, Core::System& system)
+ : ServiceFramework("ISteadyClock"), clock_core{clock_core}, system{system} {
static const FunctionInfo functions[] = {
{0, &ISteadyClock::GetCurrentTimePoint, "GetCurrentTimePoint"},
};
@@ -105,14 +104,14 @@ private:
return;
}
- const Clock::SteadyClockTimePoint time_point{
- clock_core.GetCurrentTimePoint(Core::System::GetInstance())};
+ const Clock::SteadyClockTimePoint time_point{clock_core.GetCurrentTimePoint(system)};
IPC::ResponseBuilder rb{ctx, (sizeof(Clock::SteadyClockTimePoint) / 4) + 2};
rb.Push(RESULT_SUCCESS);
rb.PushRaw(time_point);
}
Clock::SteadyClockCore& clock_core;
+ Core::System& system;
};
ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal(
@@ -134,7 +133,7 @@ ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal(
}
const auto current_time_point{
- time_manager.GetStandardSteadyClockCore().GetCurrentTimePoint(Core::System::GetInstance())};
+ time_manager.GetStandardSteadyClockCore().GetCurrentTimePoint(system)};
if (const ResultCode result{Clock::ClockSnapshot::GetCurrentTime(
clock_snapshot.user_time, current_time_point, clock_snapshot.user_context)};
result != RESULT_SUCCESS) {
@@ -176,21 +175,24 @@ void Module::Interface::GetStandardUserSystemClock(Kernel::HLERequestContext& ct
LOG_DEBUG(Service_Time, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardUserSystemClockCore());
+ rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardUserSystemClockCore(),
+ system);
}
void Module::Interface::GetStandardNetworkSystemClock(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Time, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardNetworkSystemClockCore());
+ rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardNetworkSystemClockCore(),
+ system);
}
void Module::Interface::GetStandardSteadyClock(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Time, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISteadyClock>(module->GetTimeManager().GetStandardSteadyClockCore());
+ rb.PushIpcInterface<ISteadyClock>(module->GetTimeManager().GetStandardSteadyClockCore(),
+ system);
}
void Module::Interface::GetTimeZoneService(Kernel::HLERequestContext& ctx) {
@@ -204,7 +206,8 @@ void Module::Interface::GetStandardLocalSystemClock(Kernel::HLERequestContext& c
LOG_DEBUG(Service_Time, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardLocalSystemClockCore());
+ rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardLocalSystemClockCore(),
+ system);
}
void Module::Interface::IsStandardNetworkSystemClockAccuracySufficient(
@@ -228,8 +231,7 @@ void Module::Interface::CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERe
IPC::RequestParser rp{ctx};
const auto context{rp.PopRaw<Clock::SystemClockContext>()};
- const auto current_time_point{
- steady_clock_core.GetCurrentTimePoint(Core::System::GetInstance())};
+ const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
const auto ticks{Clock::TimeSpanType::FromTicks(
@@ -255,8 +257,8 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
Clock::SystemClockContext user_context{};
if (const ResultCode result{
module->GetTimeManager().GetStandardUserSystemClockCore().GetClockContext(
- Core::System::GetInstance(), user_context)};
- result != RESULT_SUCCESS) {
+ system, user_context)};
+ result.IsError()) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(result);
return;
@@ -264,8 +266,8 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
Clock::SystemClockContext network_context{};
if (const ResultCode result{
module->GetTimeManager().GetStandardNetworkSystemClockCore().GetClockContext(
- Core::System::GetInstance(), network_context)};
- result != RESULT_SUCCESS) {
+ system, network_context)};
+ result.IsError()) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(result);
return;
@@ -274,7 +276,7 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
Clock::ClockSnapshot clock_snapshot{};
if (const ResultCode result{GetClockSnapshotFromSystemClockContextInternal(
&ctx.GetThread(), user_context, network_context, type, clock_snapshot)};
- result != RESULT_SUCCESS) {
+ result.IsError()) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(result);
return;
diff --git a/src/core/hle/service/time/time_zone_manager.cpp b/src/core/hle/service/time/time_zone_manager.cpp
index c8159bcd5..69152d0ac 100644
--- a/src/core/hle/service/time/time_zone_manager.cpp
+++ b/src/core/hle/service/time/time_zone_manager.cpp
@@ -518,8 +518,8 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
constexpr s32 time_zone_max_leaps{50};
constexpr s32 time_zone_max_chars{50};
if (!(0 <= header.leap_count && header.leap_count < time_zone_max_leaps &&
- 0 < header.type_count && header.type_count < time_zone_rule.ttis.size() &&
- 0 <= header.time_count && header.time_count < time_zone_rule.ats.size() &&
+ 0 < header.type_count && header.type_count < s32(time_zone_rule.ttis.size()) &&
+ 0 <= header.time_count && header.time_count < s32(time_zone_rule.ats.size()) &&
0 <= header.char_count && header.char_count < time_zone_max_chars &&
(header.ttis_std_count == header.type_count || header.ttis_std_count == 0) &&
(header.ttis_gmt_count == header.type_count || header.ttis_gmt_count == 0))) {
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 7f109f4eb..46e14c2a3 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -267,7 +267,7 @@ protected:
private:
struct Data {
- u32_le unk_0;
+ u32_le unk_0{};
};
Data data{};
@@ -614,6 +614,14 @@ private:
ctx.WriteBuffer(response.Serialize());
break;
}
+ case TransactionId::SetBufferCount: {
+ LOG_WARNING(Service_VI, "(STUBBED) called, transaction=SetBufferCount");
+ [[maybe_unused]] const auto buffer = ctx.ReadBuffer();
+
+ IGBPEmptyResponseParcel response{};
+ ctx.WriteBuffer(response.Serialize());
+ break;
+ }
default:
ASSERT_MSG(false, "Unimplemented");
}
@@ -859,6 +867,7 @@ private:
const auto layer_id = nv_flinger->CreateLayer(display);
if (!layer_id) {
+ LOG_ERROR(Service_VI, "Layer not found! display=0x{:016X}", display);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -975,6 +984,7 @@ private:
const auto display_id = nv_flinger->OpenDisplay(name);
if (!display_id) {
+ LOG_ERROR(Service_VI, "Display not found! display_name={}", name);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -1074,6 +1084,7 @@ private:
const auto display_id = nv_flinger->OpenDisplay(display_name);
if (!display_id) {
+ LOG_ERROR(Service_VI, "Layer not found! layer_id={}", layer_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -1081,6 +1092,7 @@ private:
const auto buffer_queue_id = nv_flinger->FindBufferQueueId(*display_id, layer_id);
if (!buffer_queue_id) {
+ LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", *display_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -1116,6 +1128,7 @@ private:
const auto layer_id = nv_flinger->CreateLayer(display_id);
if (!layer_id) {
+ LOG_ERROR(Service_VI, "Layer not found! layer_id={}", *layer_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -1123,6 +1136,7 @@ private:
const auto buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, *layer_id);
if (!buffer_queue_id) {
+ LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", display_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -1153,6 +1167,7 @@ private:
const auto vsync_event = nv_flinger->FindVsyncEvent(display_id);
if (!vsync_event) {
+ LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NOT_FOUND);
return;
@@ -1193,6 +1208,7 @@ private:
case NintendoScaleMode::PreserveAspectRatio:
return MakeResult(ConvertedScaleMode::PreserveAspectRatio);
default:
+ LOG_ERROR(Service_VI, "Invalid scaling mode specified, mode={}", mode);
return ERR_OPERATION_FAILED;
}
}
@@ -1249,6 +1265,7 @@ void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx,
const auto policy = rp.PopEnum<Policy>();
if (!IsValidServiceAccess(permission, policy)) {
+ LOG_ERROR(Service_VI, "Permission denied for policy {}", static_cast<u32>(policy));
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_PERMISSION_DENIED);
return;
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index 1e9ed2837..8f7615115 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -398,6 +398,11 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) {
Kernel::CodeSet codeset = elf_reader.LoadInto(base_address);
const VAddr entry_point = codeset.entrypoint;
+ // Setup the process code layout
+ if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), buffer.size()).IsError()) {
+ return {ResultStatus::ErrorNotInitialized, {}};
+ }
+
process.LoadModule(std::move(codeset), entry_point);
is_loaded = true;
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 5d7e8136e..906544bc9 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -131,7 +131,7 @@ static constexpr u32 PageAlignSize(u32 size) {
}
static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
- const std::string& name, VAddr load_base) {
+ const std::string& name) {
if (data.size() < sizeof(NroHeader)) {
return {};
}
@@ -187,19 +187,25 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
codeset.DataSegment().size += bss_size;
program_image.resize(static_cast<u32>(program_image.size()) + bss_size);
+ // Setup the process code layout
+ if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size())
+ .IsError()) {
+ return false;
+ }
+
// Load codeset for current process
codeset.memory = std::move(program_image);
- process.LoadModule(std::move(codeset), load_base);
+ process.LoadModule(std::move(codeset), process.PageTable().GetCodeRegionStart());
// Register module with GDBStub
- GDBStub::RegisterModule(name, load_base, load_base);
+ GDBStub::RegisterModule(name, process.PageTable().GetCodeRegionStart(),
+ process.PageTable().GetCodeRegionEnd());
return true;
}
-bool AppLoader_NRO::LoadNro(Kernel::Process& process, const FileSys::VfsFile& file,
- VAddr load_base) {
- return LoadNroImpl(process, file.ReadAllBytes(), file.GetName(), load_base);
+bool AppLoader_NRO::LoadNro(Kernel::Process& process, const FileSys::VfsFile& file) {
+ return LoadNroImpl(process, file.ReadAllBytes(), file.GetName());
}
AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) {
@@ -207,10 +213,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) {
return {ResultStatus::ErrorAlreadyLoaded, {}};
}
- // Load NRO
- const VAddr base_address = process.PageTable().GetCodeRegionStart();
-
- if (!LoadNro(process, *file, base_address)) {
+ if (!LoadNro(process, *file)) {
return {ResultStatus::ErrorLoadingNRO, {}};
}
diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h
index 71811bc29..4593d48fb 100644
--- a/src/core/loader/nro.h
+++ b/src/core/loader/nro.h
@@ -47,7 +47,7 @@ public:
bool IsRomFSUpdatable() const override;
private:
- bool LoadNro(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base);
+ bool LoadNro(Kernel::Process& process, const FileSys::VfsFile& file);
std::vector<u8> icon_data;
std::unique_ptr<FileSys::NACP> nacp;
diff --git a/src/core/reporter.h b/src/core/reporter.h
index 380941b1b..86d760cf0 100644
--- a/src/core/reporter.h
+++ b/src/core/reporter.h
@@ -56,6 +56,7 @@ public:
enum class PlayReportType {
Old,
+ Old2,
New,
System,
};
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index c1282cb80..2b0bdc4d3 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -92,10 +92,11 @@ void LogSettings() {
LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit);
LogSetting("Renderer_FrameLimit", Settings::values.frame_limit);
LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache);
- LogSetting("Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation);
+ LogSetting("Renderer_GPUAccuracyLevel", Settings::values.gpu_accuracy);
LogSetting("Renderer_UseAsynchronousGpuEmulation",
Settings::values.use_asynchronous_gpu_emulation);
LogSetting("Renderer_UseVsync", Settings::values.use_vsync);
+ LogSetting("Renderer_AnisotropicFilteringLevel", Settings::values.max_anisotropy);
LogSetting("Audio_OutputEngine", Settings::values.sink_id);
LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching);
LogSetting("Audio_OutputDevice", Settings::values.audio_device_id);
@@ -109,4 +110,12 @@ void LogSettings() {
LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local);
}
+bool IsGPULevelExtreme() {
+ return values.gpu_accuracy == GPUAccuracy::Extreme;
+}
+
+bool IsGPULevelHigh() {
+ return values.gpu_accuracy == GPUAccuracy::Extreme || values.gpu_accuracy == GPUAccuracy::High;
+}
+
} // namespace Settings
diff --git a/src/core/settings.h b/src/core/settings.h
index 79ec01731..163900f0b 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -376,6 +376,12 @@ enum class RendererBackend {
Vulkan = 1,
};
+enum class GPUAccuracy : u32 {
+ Normal = 0,
+ High = 1,
+ Extreme = 2,
+};
+
struct Values {
// System
bool use_docked_mode;
@@ -436,10 +442,11 @@ struct Values {
bool use_frame_limit;
u16 frame_limit;
bool use_disk_shader_cache;
- bool use_accurate_gpu_emulation;
+ GPUAccuracy gpu_accuracy;
bool use_asynchronous_gpu_emulation;
bool use_vsync;
bool force_30fps_mode;
+ bool use_fast_gpu_time;
float bg_red;
float bg_green;
@@ -464,6 +471,7 @@ struct Values {
bool dump_nso;
bool reporting_services;
bool quest_flag;
+ bool disable_cpu_opt;
// BCAT
std::string bcat_backend;
@@ -479,6 +487,9 @@ struct Values {
std::map<u64, std::vector<std::string>> disabled_addons;
} extern values;
+bool IsGPULevelExtreme();
+bool IsGPULevelHigh();
+
void Apply();
void LogSettings();
} // namespace Settings
diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp
index fd5a3ee9f..1c3b03a1c 100644
--- a/src/core/telemetry_session.cpp
+++ b/src/core/telemetry_session.cpp
@@ -56,6 +56,18 @@ static const char* TranslateRenderer(Settings::RendererBackend backend) {
return "Unknown";
}
+static const char* TranslateGPUAccuracyLevel(Settings::GPUAccuracy backend) {
+ switch (backend) {
+ case Settings::GPUAccuracy::Normal:
+ return "Normal";
+ case Settings::GPUAccuracy::High:
+ return "High";
+ case Settings::GPUAccuracy::Extreme:
+ return "Extreme";
+ }
+ return "Unknown";
+}
+
u64 GetTelemetryId() {
u64 telemetry_id{};
const std::string filename{FileUtil::GetUserPath(FileUtil::UserPath::ConfigDir) +
@@ -184,8 +196,8 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) {
AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit);
AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit);
AddField(field_type, "Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache);
- AddField(field_type, "Renderer_UseAccurateGpuEmulation",
- Settings::values.use_accurate_gpu_emulation);
+ AddField(field_type, "Renderer_GPUAccuracyLevel",
+ TranslateGPUAccuracyLevel(Settings::values.gpu_accuracy));
AddField(field_type, "Renderer_UseAsynchronousGpuEmulation",
Settings::values.use_asynchronous_gpu_emulation);
AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync);
diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp
index c98c848cf..95e351e24 100644
--- a/src/input_common/main.cpp
+++ b/src/input_common/main.cpp
@@ -18,7 +18,9 @@ namespace InputCommon {
static std::shared_ptr<Keyboard> keyboard;
static std::shared_ptr<MotionEmu> motion_emu;
+#ifdef HAVE_SDL2
static std::unique_ptr<SDL::State> sdl;
+#endif
static std::unique_ptr<CemuhookUDP::State> udp;
void Init() {
@@ -29,7 +31,9 @@ void Init() {
motion_emu = std::make_shared<MotionEmu>();
Input::RegisterFactory<Input::MotionDevice>("motion_emu", motion_emu);
+#ifdef HAVE_SDL2
sdl = SDL::Init();
+#endif
udp = CemuhookUDP::Init();
}
@@ -40,7 +44,9 @@ void Shutdown() {
Input::UnregisterFactory<Input::AnalogDevice>("analog_from_button");
Input::UnregisterFactory<Input::MotionDevice>("motion_emu");
motion_emu.reset();
+#ifdef HAVE_SDL2
sdl.reset();
+#endif
udp.reset();
}
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 1e3940801..ff2d11cc8 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -14,13 +14,14 @@
#include "core/core.h"
#include "core/core_timing.h"
+namespace {
// Numbers are chosen randomly to make sure the correct one is given.
-static constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
-static constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
+constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
+constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
-static std::bitset<CB_IDS.size()> callbacks_ran_flags;
-static u64 expected_callback = 0;
-static s64 lateness = 0;
+std::bitset<CB_IDS.size()> callbacks_ran_flags;
+u64 expected_callback = 0;
+s64 lateness = 0;
template <unsigned int IDX>
void CallbackTemplate(u64 userdata, s64 cycles_late) {
@@ -31,7 +32,7 @@ void CallbackTemplate(u64 userdata, s64 cycles_late) {
REQUIRE(lateness == cycles_late);
}
-static u64 callbacks_done = 0;
+u64 callbacks_done = 0;
void EmptyCallback(u64 userdata, s64 cycles_late) {
++callbacks_done;
@@ -48,8 +49,8 @@ struct ScopeInit final {
Core::Timing::CoreTiming core_timing;
};
-static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0,
- int expected_lateness = 0, int cpu_downcount = 0) {
+void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0,
+ int expected_lateness = 0, int cpu_downcount = 0) {
callbacks_ran_flags = 0;
expected_callback = CB_IDS[idx];
lateness = expected_lateness;
@@ -62,6 +63,7 @@ static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32
REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags);
}
+} // Anonymous namespace
TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
ScopeInit guard;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 258d58eba..ff53282c9 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -23,6 +23,7 @@ add_library(video_core STATIC
engines/shader_bytecode.h
engines/shader_header.h
engines/shader_type.h
+ fence_manager.h
gpu.cpp
gpu.h
gpu_asynch.cpp
@@ -51,6 +52,8 @@ add_library(video_core STATIC
renderer_opengl/gl_buffer_cache.h
renderer_opengl/gl_device.cpp
renderer_opengl/gl_device.h
+ renderer_opengl/gl_fence_manager.cpp
+ renderer_opengl/gl_fence_manager.h
renderer_opengl/gl_framebuffer_cache.cpp
renderer_opengl/gl_framebuffer_cache.h
renderer_opengl/gl_rasterizer.cpp
@@ -121,6 +124,8 @@ add_library(video_core STATIC
shader/decode.cpp
shader/expr.cpp
shader/expr.h
+ shader/memory_util.cpp
+ shader/memory_util.h
shader/node_helper.cpp
shader/node_helper.h
shader/node.h
@@ -160,6 +165,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/fixed_pipeline_state.h
renderer_vulkan/maxwell_to_vk.cpp
renderer_vulkan/maxwell_to_vk.h
+ renderer_vulkan/nsight_aftermath_tracker.cpp
+ renderer_vulkan/nsight_aftermath_tracker.h
renderer_vulkan/renderer_vulkan.h
renderer_vulkan/renderer_vulkan.cpp
renderer_vulkan/vk_blit_screen.cpp
@@ -174,6 +181,8 @@ if (ENABLE_VULKAN)
renderer_vulkan/vk_descriptor_pool.h
renderer_vulkan/vk_device.cpp
renderer_vulkan/vk_device.h
+ renderer_vulkan/vk_fence_manager.cpp
+ renderer_vulkan/vk_fence_manager.h
renderer_vulkan/vk_graphics_pipeline.cpp
renderer_vulkan/vk_graphics_pipeline.h
renderer_vulkan/vk_image.cpp
@@ -213,19 +222,30 @@ if (ENABLE_VULKAN)
renderer_vulkan/wrapper.cpp
renderer_vulkan/wrapper.h
)
-
- target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
- target_compile_definitions(video_core PRIVATE HAS_VULKAN)
endif()
create_target_directory_groups(video_core)
target_link_libraries(video_core PUBLIC common core)
target_link_libraries(video_core PRIVATE glad)
+
if (ENABLE_VULKAN)
+ target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
+ target_compile_definitions(video_core PRIVATE HAS_VULKAN)
target_link_libraries(video_core PRIVATE sirit)
endif()
+if (ENABLE_NSIGHT_AFTERMATH)
+ if (NOT DEFINED ENV{NSIGHT_AFTERMATH_SDK})
+ message(ERROR "Environment variable NSIGHT_AFTERMATH_SDK has to be provided")
+ endif()
+ if (NOT WIN32)
+ message(ERROR "Nsight Aftermath doesn't support non-Windows platforms")
+ endif()
+ target_compile_definitions(video_core PRIVATE HAS_NSIGHT_AFTERMATH)
+ target_include_directories(video_core PRIVATE "$ENV{NSIGHT_AFTERMATH_SDK}/include")
+endif()
+
if (MSVC)
target_compile_options(video_core PRIVATE /we4267)
else()
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 83e7a1cde..56e570994 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -4,7 +4,7 @@
#pragma once
-#include <array>
+#include <list>
#include <memory>
#include <mutex>
#include <unordered_map>
@@ -18,8 +18,10 @@
#include "common/alignment.h"
#include "common/common_types.h"
+#include "common/logging/log.h"
#include "core/core.h"
#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/buffer_cache/buffer_block.h"
#include "video_core/buffer_cache/map_interval.h"
#include "video_core/memory_manager.h"
@@ -79,14 +81,13 @@ public:
auto map = MapAddress(block, gpu_addr, cpu_addr, size);
if (is_written) {
map->MarkAsModified(true, GetModifiedTicks());
+ if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
+ MarkForAsyncFlush(map);
+ }
if (!map->IsWritten()) {
map->MarkAsWritten(true);
MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
}
- } else {
- if (map->IsWritten()) {
- WriteBarrier();
- }
}
return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))};
@@ -137,11 +138,22 @@ public:
});
for (auto& object : objects) {
if (object->IsModified() && object->IsRegistered()) {
+ mutex.unlock();
FlushMap(object);
+ mutex.lock();
}
}
}
+ bool MustFlushRegion(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ const std::vector<MapInterval> objects = GetMapsInRange(addr, size);
+ return std::any_of(objects.cbegin(), objects.cend(), [](const MapInterval& map) {
+ return map->IsModified() && map->IsRegistered();
+ });
+ }
+
/// Mark the specified region as being invalidated
void InvalidateRegion(VAddr addr, u64 size) {
std::lock_guard lock{mutex};
@@ -154,6 +166,77 @@ public:
}
}
+ void OnCPUWrite(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& object : GetMapsInRange(addr, size)) {
+ if (object->IsMemoryMarked() && object->IsRegistered()) {
+ UnmarkMemory(object);
+ object->SetSyncPending(true);
+ marked_for_unregister.emplace_back(object);
+ }
+ }
+ }
+
+ void SyncGuestHost() {
+ std::lock_guard lock{mutex};
+
+ for (const auto& object : marked_for_unregister) {
+ if (object->IsRegistered()) {
+ object->SetSyncPending(false);
+ Unregister(object);
+ }
+ }
+ marked_for_unregister.clear();
+ }
+
+ void CommitAsyncFlushes() {
+ if (uncommitted_flushes) {
+ auto commit_list = std::make_shared<std::list<MapInterval>>();
+ for (auto& map : *uncommitted_flushes) {
+ if (map->IsRegistered() && map->IsModified()) {
+ // TODO(Blinkhawk): Implement backend asynchronous flushing
+ // AsyncFlushMap(map)
+ commit_list->push_back(map);
+ }
+ }
+ if (!commit_list->empty()) {
+ committed_flushes.push_back(commit_list);
+ } else {
+ committed_flushes.emplace_back();
+ }
+ } else {
+ committed_flushes.emplace_back();
+ }
+ uncommitted_flushes.reset();
+ }
+
+ bool ShouldWaitAsyncFlushes() const {
+ return !committed_flushes.empty() && committed_flushes.front() != nullptr;
+ }
+
+ bool HasUncommittedFlushes() const {
+ return uncommitted_flushes != nullptr;
+ }
+
+ void PopAsyncFlushes() {
+ if (committed_flushes.empty()) {
+ return;
+ }
+ auto& flush_list = committed_flushes.front();
+ if (!flush_list) {
+ committed_flushes.pop_front();
+ return;
+ }
+ for (MapInterval& map : *flush_list) {
+ if (map->IsRegistered()) {
+ // TODO(Blinkhawk): Replace this for reading the asynchronous flush
+ FlushMap(map);
+ }
+ }
+ committed_flushes.pop_front();
+ }
+
virtual BufferType GetEmptyBuffer(std::size_t size) = 0;
protected:
@@ -166,8 +249,6 @@ protected:
virtual BufferType ToHandle(const OwnerBuffer& storage) = 0;
- virtual void WriteBarrier() = 0;
-
virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
@@ -196,17 +277,30 @@ protected:
const IntervalType interval{new_map->GetStart(), new_map->GetEnd()};
mapped_addresses.insert({interval, new_map});
rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
+ new_map->SetMemoryMarked(true);
if (inherit_written) {
MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1);
new_map->MarkAsWritten(true);
}
}
- /// Unregisters an object from the cache
- void Unregister(MapInterval& map) {
+ void UnmarkMemory(const MapInterval& map) {
+ if (!map->IsMemoryMarked()) {
+ return;
+ }
const std::size_t size = map->GetEnd() - map->GetStart();
rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1);
+ map->SetMemoryMarked(false);
+ }
+
+ /// Unregisters an object from the cache
+ void Unregister(const MapInterval& map) {
+ UnmarkMemory(map);
map->MarkAsRegistered(false);
+ if (map->IsSyncPending()) {
+ marked_for_unregister.remove(map);
+ map->SetSyncPending(false);
+ }
if (map->IsWritten()) {
UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
}
@@ -264,6 +358,9 @@ private:
MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr);
if (modified_inheritance) {
new_map->MarkAsModified(true, GetModifiedTicks());
+ if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
+ MarkForAsyncFlush(new_map);
+ }
}
Register(new_map, write_inheritance);
return new_map;
@@ -450,6 +547,13 @@ private:
return false;
}
+ void MarkForAsyncFlush(MapInterval& map) {
+ if (!uncommitted_flushes) {
+ uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval>>();
+ }
+ uncommitted_flushes->insert(map);
+ }
+
VideoCore::RasterizerInterface& rasterizer;
Core::System& system;
@@ -479,6 +583,10 @@ private:
u64 modified_ticks = 0;
std::vector<u8> staging_buffer;
+ std::list<MapInterval> marked_for_unregister;
+
+ std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes;
std::recursive_mutex mutex;
};
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
index b0956029d..29d8b26f3 100644
--- a/src/video_core/buffer_cache/map_interval.h
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -46,6 +46,22 @@ public:
return is_registered;
}
+ void SetMemoryMarked(bool is_memory_marked_) {
+ is_memory_marked = is_memory_marked_;
+ }
+
+ bool IsMemoryMarked() const {
+ return is_memory_marked;
+ }
+
+ void SetSyncPending(bool is_sync_pending_) {
+ is_sync_pending = is_sync_pending_;
+ }
+
+ bool IsSyncPending() const {
+ return is_sync_pending;
+ }
+
VAddr GetStart() const {
return start;
}
@@ -83,6 +99,8 @@ private:
bool is_written{};
bool is_modified{};
bool is_registered{};
+ bool is_memory_marked{};
+ bool is_sync_pending{};
u64 ticks{};
};
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 0b77afc71..16311f05e 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -21,6 +21,7 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
void DmaPusher::DispatchCalls() {
MICROPROFILE_SCOPE(DispatchCalls);
+ gpu.SyncGuestHost();
// On entering GPU code, assume all memory may be touched by the ARM core.
gpu.Maxwell3D().OnMemoryWrite();
@@ -32,6 +33,8 @@ void DmaPusher::DispatchCalls() {
}
}
gpu.FlushCommands();
+ gpu.SyncGuestHost();
+ gpu.OnCommandListEnd();
}
bool DmaPusher::Step() {
@@ -68,16 +71,22 @@ bool DmaPusher::Step() {
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
command_list_header.size * sizeof(u32));
- for (const CommandHeader& command_header : command_headers) {
+ for (std::size_t index = 0; index < command_headers.size();) {
+ const CommandHeader& command_header = command_headers[index];
- // now, see if we're in the middle of a command
- if (dma_state.length_pending) {
- // Second word of long non-inc methods command - method count
- dma_state.length_pending = 0;
- dma_state.method_count = command_header.method_count_;
- } else if (dma_state.method_count) {
+ if (dma_state.method_count) {
// Data word of methods command
- CallMethod(command_header.argument);
+ if (dma_state.non_incrementing) {
+ const u32 max_write = static_cast<u32>(
+ std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
+ index);
+ CallMultiMethod(&command_header.argument, max_write);
+ dma_state.method_count -= max_write;
+ index += max_write;
+ continue;
+ } else {
+ CallMethod(command_header.argument);
+ }
if (!dma_state.non_incrementing) {
dma_state.method++;
@@ -117,6 +126,7 @@ bool DmaPusher::Step() {
break;
}
}
+ index++;
}
if (!non_main) {
@@ -137,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const {
gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
}
+void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
+ gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
+ dma_state.method_count);
+}
+
} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index d6188614a..6cef71306 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,7 @@ private:
void SetState(const CommandHeader& command_header);
void CallMethod(u32 argument) const;
+ void CallMultiMethod(const u32* base_start, u32 num_methods) const;
std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 85d308e26..8a47614d2 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -28,7 +28,13 @@ void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
}
}
-std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
+void Fermi2D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
+static std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
const u32 line_a = src_2 - src_1;
const u32 line_b = dst_2 - dst_1;
const u32 excess = std::max<s32>(0, line_a - src_line + src_1);
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index dba342c70..939a5966d 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -39,6 +39,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
enum class Origin : u32 {
Center = 0,
Corner = 1,
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 368c75a66..00a12175f 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -51,6 +51,13 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
ASSERT(cbuf_mask[regs.tex_cb_index]);
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index eeb79c56f..fe55fdfd0 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -202,6 +202,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
Texture::FullTextureInfo GetTexture(std::size_t offset) const;
/// Given a texture handle, returns the TSC and TIC entries.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 597872e43..586ff15dc 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -41,4 +41,11 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
} // namespace Tegra::Engines
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 396fb6e86..bb26fb030 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -40,6 +40,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr size_t NUM_REGS = 0x7F;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index ba63b44b4..7db055ea0 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -92,6 +92,10 @@ void Maxwell3D::InitializeRegisterDefaults() {
color_mask.A.Assign(1);
}
+ for (auto& format : regs.vertex_attrib_format) {
+ format.constant.Assign(1);
+ }
+
// NVN games expect these values to be enabled at boot
regs.rasterize_enable = 1;
regs.rt_separate_frag_data = 1;
@@ -180,6 +184,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
}
switch (method) {
+ case MAXWELL3D_REG_INDEX(wait_for_idle): {
+ rasterizer.WaitForIdle();
+ break;
+ }
case MAXWELL3D_REG_INDEX(shadow_ram_control): {
shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(method_call.argument);
break;
@@ -276,6 +284,58 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ // Methods after 0xE00 are special, they're actually triggers for some microcode that was
+ // uploaded to the GPU during initialization.
+ if (method >= MacroRegistersStart) {
+ // We're trying to execute a macro
+ if (executing_macro == 0) {
+ // A macro call must begin by writing the macro method's register, not its argument.
+ ASSERT_MSG((method % 2) == 0,
+ "Can't start macro execution by writing to the ARGS register");
+ executing_macro = method;
+ }
+
+ for (std::size_t i = 0; i < amount; i++) {
+ macro_params.push_back(base_start[i]);
+ }
+
+ // Call the macro when there are no more parameters in the command buffer
+ if (amount == methods_pending) {
+ CallMacroMethod(executing_macro, macro_params.size(), macro_params.data());
+ macro_params.clear();
+ }
+ return;
+ }
+ switch (method) {
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
+ ProcessCBMultiData(method, base_start, amount);
+ break;
+ }
+ default: {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+ }
+ }
+}
+
void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
if (mme_draw.current_mode == MMEDrawMode::Undefined) {
if (mme_draw.gl_begin_consume) {
@@ -400,7 +460,11 @@ void Maxwell3D::ProcessQueryGet() {
switch (regs.query.query_get.operation) {
case Regs::QueryOperation::Release:
- StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
+ if (regs.query.query_get.fence == 1) {
+ rasterizer.SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence);
+ } else {
+ StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
+ }
break;
case Regs::QueryOperation::Acquire:
// TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
@@ -479,7 +543,7 @@ void Maxwell3D::ProcessSyncPoint() {
const u32 increment = regs.sync_info.increment.Value();
[[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value();
if (increment) {
- system.GPU().IncrementSyncPoint(sync_point);
+ rasterizer.SignalSyncPoint(sync_point);
}
}
@@ -562,6 +626,28 @@ void Maxwell3D::StartCBData(u32 method) {
ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
}
+void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) {
+ if (cb_data_state.current != method) {
+ if (cb_data_state.current != null_cb_data) {
+ FinishCBData();
+ }
+ constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]);
+ cb_data_state.start_pos = regs.const_buffer.cb_pos;
+ cb_data_state.id = method - first_cb_data;
+ cb_data_state.current = method;
+ cb_data_state.counter = 0;
+ }
+ const std::size_t id = cb_data_state.id;
+ const std::size_t size = amount;
+ std::size_t i = 0;
+ for (; i < size; i++) {
+ cb_data_state.buffer[id][cb_data_state.counter] = start_base[i];
+ cb_data_state.counter++;
+ }
+ // Increment the current buffer position.
+ regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount;
+}
+
void Maxwell3D::FinishCBData() {
// Write the input value to the current const buffer at the current position.
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 5cf6a4cc3..864924ff3 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -709,7 +709,9 @@ public:
union {
struct {
- INSERT_UNION_PADDING_WORDS(0x45);
+ INSERT_UNION_PADDING_WORDS(0x44);
+
+ u32 wait_for_idle;
struct {
u32 upload_address;
@@ -1149,7 +1151,7 @@ public:
/// Returns whether the vertex array specified by index is supposed to be
/// accessed per instance or not.
- bool IsInstancingEnabled(u32 index) const {
+ bool IsInstancingEnabled(std::size_t index) const {
return is_instanced[index];
}
} instanced_arrays;
@@ -1179,6 +1181,7 @@ public:
BitField<0, 1, u32> depth_range_0_1;
BitField<3, 1, u32> depth_clamp_near;
BitField<4, 1, u32> depth_clamp_far;
+ BitField<11, 1, u32> depth_clamp_disabled;
} view_volume_clip_control;
INSERT_UNION_PADDING_WORDS(0x1F);
@@ -1259,7 +1262,8 @@ public:
GPUVAddr LimitAddress() const {
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
- limit_low);
+ limit_low) +
+ 1;
}
} vertex_array_limit[NumVertexArrays];
@@ -1358,6 +1362,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
/// Write the value to the register identified by method.
void CallMethodFromMME(const GPU::MethodCall& method_call);
@@ -1511,6 +1518,7 @@ private:
/// Handles a write to the CB_DATA[i] register.
void StartCBData(u32 method);
void ProcessCBData(u32 value);
+ void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount);
void FinishCBData();
/// Handles a write to the CB_BIND register.
@@ -1530,6 +1538,7 @@ private:
static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \
"Field " #field_name " has invalid position")
+ASSERT_REG_POSITION(wait_for_idle, 0x44);
ASSERT_REG_POSITION(macros, 0x45);
ASSERT_REG_POSITION(shadow_ram_control, 0x49);
ASSERT_REG_POSITION(upload, 0x60);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index c2610f992..6630005b0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -36,6 +36,13 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
#undef MAXWELLDMA_REG_INDEX
}
+void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
void MaxwellDMA::HandleCopy() {
LOG_TRACE(HW_GPU, "Requested a DMA copy");
@@ -104,8 +111,13 @@ void MaxwellDMA::HandleCopy() {
write_buffer.resize(dst_size);
}
- memory_manager.ReadBlock(source, read_buffer.data(), src_size);
- memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
+ if (Settings::IsGPULevelExtreme()) {
+ memory_manager.ReadBlock(source, read_buffer.data(), src_size);
+ memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
+ } else {
+ memory_manager.ReadBlockUnsafe(source, read_buffer.data(), src_size);
+ memory_manager.ReadBlockUnsafe(dest, write_buffer.data(), dst_size);
+ }
Texture::UnswizzleSubrect(
regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel,
@@ -136,7 +148,7 @@ void MaxwellDMA::HandleCopy() {
write_buffer.resize(dst_size);
}
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
memory_manager.ReadBlock(source, read_buffer.data(), src_size);
memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
} else {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 4f40d1d1f..c43ed8194 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -35,6 +35,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr std::size_t NUM_REGS = 0x1D6;
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 7231597d4..8dae754d4 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -655,6 +655,7 @@ union Instruction {
}
constexpr Instruction(u64 value) : value{value} {}
+ constexpr Instruction(const Instruction& instr) : value(instr.value) {}
BitField<0, 8, Register> gpr0;
BitField<8, 8, Register> gpr8;
@@ -813,15 +814,17 @@ union Instruction {
} alu_integer;
union {
+ BitField<43, 1, u64> x;
+ } iadd;
+
+ union {
BitField<39, 1, u64> ftz;
BitField<32, 1, u64> saturate;
BitField<49, 2, HalfMerge> merge;
- BitField<43, 1, u64> negate_a;
BitField<44, 1, u64> abs_a;
BitField<47, 2, HalfType> type_a;
- BitField<31, 1, u64> negate_b;
BitField<30, 1, u64> abs_b;
BitField<28, 2, HalfType> type_b;
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
new file mode 100644
index 000000000..8b2a6a42c
--- /dev/null
+++ b/src/video_core/fence_manager.h
@@ -0,0 +1,172 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <queue>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/core.h"
+#include "core/memory.h"
+#include "core/settings.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace VideoCommon {
+
+class FenceBase {
+public:
+ FenceBase(u32 payload, bool is_stubbed)
+ : address{}, payload{payload}, is_semaphore{false}, is_stubbed{is_stubbed} {}
+
+ FenceBase(GPUVAddr address, u32 payload, bool is_stubbed)
+ : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {}
+
+ GPUVAddr GetAddress() const {
+ return address;
+ }
+
+ u32 GetPayload() const {
+ return payload;
+ }
+
+ bool IsSemaphore() const {
+ return is_semaphore;
+ }
+
+private:
+ GPUVAddr address;
+ u32 payload;
+ bool is_semaphore;
+
+protected:
+ bool is_stubbed;
+};
+
+template <typename TFence, typename TTextureCache, typename TTBufferCache, typename TQueryCache>
+class FenceManager {
+public:
+ void SignalSemaphore(GPUVAddr addr, u32 value) {
+ TryReleasePendingFences();
+ const bool should_flush = ShouldFlush();
+ CommitAsyncFlushes();
+ TFence new_fence = CreateFence(addr, value, !should_flush);
+ fences.push(new_fence);
+ QueueFence(new_fence);
+ if (should_flush) {
+ rasterizer.FlushCommands();
+ }
+ rasterizer.SyncGuestHost();
+ }
+
+ void SignalSyncPoint(u32 value) {
+ TryReleasePendingFences();
+ const bool should_flush = ShouldFlush();
+ CommitAsyncFlushes();
+ TFence new_fence = CreateFence(value, !should_flush);
+ fences.push(new_fence);
+ QueueFence(new_fence);
+ if (should_flush) {
+ rasterizer.FlushCommands();
+ }
+ rasterizer.SyncGuestHost();
+ }
+
+ void WaitPendingFences() {
+ auto& gpu{system.GPU()};
+ auto& memory_manager{gpu.MemoryManager()};
+ while (!fences.empty()) {
+ TFence& current_fence = fences.front();
+ if (ShouldWait()) {
+ WaitFence(current_fence);
+ }
+ PopAsyncFlushes();
+ if (current_fence->IsSemaphore()) {
+ memory_manager.template Write<u32>(current_fence->GetAddress(),
+ current_fence->GetPayload());
+ } else {
+ gpu.IncrementSyncPoint(current_fence->GetPayload());
+ }
+ fences.pop();
+ }
+ }
+
+protected:
+ FenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ TTextureCache& texture_cache, TTBufferCache& buffer_cache,
+ TQueryCache& query_cache)
+ : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache},
+ buffer_cache{buffer_cache}, query_cache{query_cache} {}
+
+ virtual ~FenceManager() {}
+
+ /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
+ /// true
+ virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
+ /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
+ virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
+ /// Queues a fence into the backend if the fence isn't stubbed.
+ virtual void QueueFence(TFence& fence) = 0;
+ /// Notifies that the backend fence has been signaled/reached in host GPU.
+ virtual bool IsFenceSignaled(TFence& fence) const = 0;
+ /// Waits until a fence has been signalled by the host GPU.
+ virtual void WaitFence(TFence& fence) = 0;
+
+ Core::System& system;
+ VideoCore::RasterizerInterface& rasterizer;
+ TTextureCache& texture_cache;
+ TTBufferCache& buffer_cache;
+ TQueryCache& query_cache;
+
+private:
+ void TryReleasePendingFences() {
+ auto& gpu{system.GPU()};
+ auto& memory_manager{gpu.MemoryManager()};
+ while (!fences.empty()) {
+ TFence& current_fence = fences.front();
+ if (ShouldWait() && !IsFenceSignaled(current_fence)) {
+ return;
+ }
+ PopAsyncFlushes();
+ if (current_fence->IsSemaphore()) {
+ memory_manager.template Write<u32>(current_fence->GetAddress(),
+ current_fence->GetPayload());
+ } else {
+ gpu.IncrementSyncPoint(current_fence->GetPayload());
+ }
+ fences.pop();
+ }
+ }
+
+ bool ShouldWait() const {
+ return texture_cache.ShouldWaitAsyncFlushes() || buffer_cache.ShouldWaitAsyncFlushes() ||
+ query_cache.ShouldWaitAsyncFlushes();
+ }
+
+ bool ShouldFlush() const {
+ return texture_cache.HasUncommittedFlushes() || buffer_cache.HasUncommittedFlushes() ||
+ query_cache.HasUncommittedFlushes();
+ }
+
+ void PopAsyncFlushes() {
+ texture_cache.PopAsyncFlushes();
+ buffer_cache.PopAsyncFlushes();
+ query_cache.PopAsyncFlushes();
+ }
+
+ void CommitAsyncFlushes() {
+ texture_cache.CommitAsyncFlushes();
+ buffer_cache.CommitAsyncFlushes();
+ query_cache.CommitAsyncFlushes();
+ }
+
+ std::queue<TFence> fences;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index a606f4abd..b87fd873d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -9,6 +9,7 @@
#include "core/core_timing_util.h"
#include "core/frontend/emu_window.h"
#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/kepler_memory.h"
@@ -125,6 +126,28 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
return true;
}
+u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
+ std::unique_lock lck{flush_request_mutex};
+ const u64 fence = ++last_flush_fence;
+ flush_requests.emplace_back(fence, addr, size);
+ return fence;
+}
+
+void GPU::TickWork() {
+ std::unique_lock lck{flush_request_mutex};
+ while (!flush_requests.empty()) {
+ auto& request = flush_requests.front();
+ const u64 fence = request.fence;
+ const VAddr addr = request.addr;
+ const std::size_t size = request.size;
+ flush_requests.pop_front();
+ flush_request_mutex.unlock();
+ renderer->Rasterizer().FlushRegion(addr, size);
+ current_flush_fence.store(fence);
+ flush_request_mutex.lock();
+ }
+}
+
u64 GPU::GetTicks() const {
// This values were reversed engineered by fincs from NVN
// The gpu clock is reported in units of 385/625 nanoseconds
@@ -132,7 +155,10 @@ u64 GPU::GetTicks() const {
constexpr u64 gpu_ticks_den = 625;
const u64 cpu_ticks = system.CoreTiming().GetTicks();
- const u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ if (Settings::values.use_fast_gpu_time) {
+ nanoseconds /= 256;
+ }
const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
@@ -142,6 +168,13 @@ void GPU::FlushCommands() {
renderer->Rasterizer().FlushCommands();
}
+void GPU::SyncGuestHost() {
+ renderer->Rasterizer().SyncGuestHost();
+}
+
+void GPU::OnCommandListEnd() {
+ renderer->Rasterizer().ReleaseFences();
+}
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
// So the values you see in docs might be multiplied by 4.
@@ -180,16 +213,32 @@ void GPU::CallMethod(const MethodCall& method_call) {
ASSERT(method_call.subchannel < bound_engines.size());
- if (ExecuteMethodOnEngine(method_call)) {
+ if (ExecuteMethodOnEngine(method_call.method)) {
CallEngineMethod(method_call);
} else {
CallPullerMethod(method_call);
}
}
-bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) {
- const auto method = static_cast<BufferMethods>(method_call.method);
- return method >= BufferMethods::NonPullerMethods;
+void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+
+ ASSERT(subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method)) {
+ CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
+ } else {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallPullerMethod(
+ {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
+ }
+ }
+}
+
+bool GPU::ExecuteMethodOnEngine(u32 method) {
+ const auto buffer_method = static_cast<BufferMethods>(method);
+ return buffer_method >= BufferMethods::NonPullerMethods;
}
void GPU::CallPullerMethod(const MethodCall& method_call) {
@@ -269,6 +318,31 @@ void GPU::CallEngineMethod(const MethodCall& method_call) {
}
}
+void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ const EngineID engine = bound_engines[subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_B:
+ maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
void GPU::ProcessBindMethod(const MethodCall& method_call) {
// Bind the current subchannel to the desired engine id.
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 1a2d747be..dd51c95b7 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -155,7 +155,27 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
+ /// Calls a GPU multivalue method.
+ void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
+ /// Flush all current written commands into the host GPU for execution.
void FlushCommands();
+ /// Synchronizes CPU writes with Host GPU memory.
+ void SyncGuestHost();
+ /// Signal the ending of command list.
+ virtual void OnCommandListEnd();
+
+ /// Request a host GPU memory flush from the CPU.
+ u64 RequestFlush(VAddr addr, std::size_t size);
+
+ /// Obtains current flush request fence id.
+ u64 CurrentFlushRequestFence() const {
+ return current_flush_fence.load(std::memory_order_relaxed);
+ }
+
+ /// Tick pending requests within the GPU.
+ void TickWork();
/// Returns a reference to the Maxwell3D GPU engine.
Engines::Maxwell3D& Maxwell3D();
@@ -293,8 +313,12 @@ private:
/// Calls a GPU engine method.
void CallEngineMethod(const MethodCall& method_call);
+ /// Calls a GPU engine multivalue method.
+ void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
/// Determines where the method should be executed.
- bool ExecuteMethodOnEngine(const MethodCall& method_call);
+ bool ExecuteMethodOnEngine(u32 method);
protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
@@ -325,6 +349,19 @@ private:
std::condition_variable sync_cv;
+ struct FlushRequest {
+ FlushRequest(u64 fence, VAddr addr, std::size_t size)
+ : fence{fence}, addr{addr}, size{size} {}
+ u64 fence;
+ VAddr addr;
+ std::size_t size;
+ };
+
+ std::list<FlushRequest> flush_requests;
+ std::atomic<u64> current_flush_fence{};
+ u64 last_flush_fence{};
+ std::mutex flush_request_mutex;
+
const bool is_async;
};
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index 20e73a37e..53305ab43 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -52,4 +52,8 @@ void GPUAsynch::WaitIdle() const {
gpu_thread.WaitIdle();
}
+void GPUAsynch::OnCommandListEnd() {
+ gpu_thread.OnCommandListEnd();
+}
+
} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 03fd0eef0..517658612 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -32,6 +32,8 @@ public:
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void WaitIdle() const override;
+ void OnCommandListEnd() override;
+
protected:
void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override;
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 10cda686b..c3bb4fe06 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -6,6 +6,7 @@
#include "common/microprofile.h"
#include "core/core.h"
#include "core/frontend/emu_window.h"
+#include "core/settings.h"
#include "video_core/dma_pusher.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
@@ -14,8 +15,9 @@
namespace VideoCommon::GPUThread {
/// Runs the GPU thread
-static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher, SynchState& state) {
+static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
+ Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher,
+ SynchState& state) {
MicroProfileOnThreadCreate("GpuThread");
// Wait for first GPU command before acquiring the window context
@@ -37,10 +39,14 @@ static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::Graphic
dma_pusher.DispatchCalls();
} else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) {
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
+ } else if (const auto data = std::get_if<OnCommandListEndCommand>(&next.data)) {
+ renderer.Rasterizer().ReleaseFences();
+ } else if (const auto data = std::get_if<GPUTickCommand>(&next.data)) {
+ system.GPU().TickWork();
} else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) {
renderer.Rasterizer().FlushRegion(data->addr, data->size);
} else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) {
- renderer.Rasterizer().InvalidateRegion(data->addr, data->size);
+ renderer.Rasterizer().OnCPUWrite(data->addr, data->size);
} else if (std::holds_alternative<EndProcessingCommand>(next.data)) {
return;
} else {
@@ -65,8 +71,8 @@ ThreadManager::~ThreadManager() {
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
Core::Frontend::GraphicsContext& context,
Tegra::DmaPusher& dma_pusher) {
- thread = std::thread{RunThread, std::ref(renderer), std::ref(context), std::ref(dma_pusher),
- std::ref(state)};
+ thread = std::thread{RunThread, std::ref(system), std::ref(renderer),
+ std::ref(context), std::ref(dma_pusher), std::ref(state)};
}
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
@@ -78,16 +84,29 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
}
void ThreadManager::FlushRegion(VAddr addr, u64 size) {
- PushCommand(FlushRegionCommand(addr, size));
+ if (!Settings::IsGPULevelHigh()) {
+ PushCommand(FlushRegionCommand(addr, size));
+ return;
+ }
+ if (!Settings::IsGPULevelExtreme()) {
+ return;
+ }
+ if (system.Renderer().Rasterizer().MustFlushRegion(addr, size)) {
+ auto& gpu = system.GPU();
+ u64 fence = gpu.RequestFlush(addr, size);
+ PushCommand(GPUTickCommand());
+ while (fence > gpu.CurrentFlushRequestFence()) {
+ }
+ }
}
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
- system.Renderer().Rasterizer().InvalidateRegion(addr, size);
+ system.Renderer().Rasterizer().OnCPUWrite(addr, size);
}
void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
// Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
- InvalidateRegion(addr, size);
+ system.Renderer().Rasterizer().OnCPUWrite(addr, size);
}
void ThreadManager::WaitIdle() const {
@@ -95,6 +114,10 @@ void ThreadManager::WaitIdle() const {
}
}
+void ThreadManager::OnCommandListEnd() {
+ PushCommand(OnCommandListEndCommand());
+}
+
u64 ThreadManager::PushCommand(CommandData&& command_data) {
const u64 fence{++state.last_fence};
state.queue.Push(CommandDataContainer(std::move(command_data), fence));
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index cd74ad330..5a28335d6 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -70,9 +70,16 @@ struct FlushAndInvalidateRegionCommand final {
u64 size;
};
+/// Command called within the gpu, to schedule actions after a command list end
+struct OnCommandListEndCommand final {};
+
+/// Command to make the gpu look into pending requests
+struct GPUTickCommand final {};
+
using CommandData =
std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand,
- InvalidateRegionCommand, FlushAndInvalidateRegionCommand>;
+ InvalidateRegionCommand, FlushAndInvalidateRegionCommand, OnCommandListEndCommand,
+ GPUTickCommand>;
struct CommandDataContainer {
CommandDataContainer() = default;
@@ -122,6 +129,8 @@ public:
// Wait until the gpu thread is idle.
void WaitIdle() const;
+ void OnCommandListEnd();
+
private:
/// Pushes a command to be executed by the GPU thread
u64 PushCommand(CommandData&& command_data);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index fd49bc2a9..dbee9f634 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -51,11 +51,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
- Kernel::Memory::MemoryAttribute::DeviceShared)
- .IsSuccess());
+ ASSERT(
+ system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
return gpu_addr;
}
@@ -66,11 +63,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
const u64 aligned_size{Common::AlignUp(size, page_size)};
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
- Kernel::Memory::MemoryAttribute::DeviceShared)
- .IsSuccess());
+ ASSERT(
+ system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
return gpu_addr;
}
@@ -87,9 +81,7 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
UnmapRange(gpu_addr, aligned_size);
ASSERT(system.CurrentProcess()
->PageTable()
- .SetMemoryAttribute(cpu_addr.value(), size,
- Kernel::Memory::MemoryAttribute::DeviceShared,
- Kernel::Memory::MemoryAttribute::None)
+ .UnlockForDeviceAddressSpace(cpu_addr.value(), size)
.IsSuccess());
return gpu_addr;
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 5ea2b01f2..2f75f8801 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -12,10 +12,12 @@
#include <mutex>
#include <optional>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "common/assert.h"
#include "core/core.h"
+#include "core/settings.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
@@ -130,6 +132,9 @@ public:
}
query->BindCounter(Stream(type).Current(), timestamp);
+ if (Settings::values.use_asynchronous_gpu_emulation) {
+ AsyncFlushQuery(cpu_addr);
+ }
}
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
@@ -170,6 +175,37 @@ public:
return streams[static_cast<std::size_t>(type)];
}
+ void CommitAsyncFlushes() {
+ committed_flushes.push_back(uncommitted_flushes);
+ uncommitted_flushes.reset();
+ }
+
+ bool HasUncommittedFlushes() const {
+ return uncommitted_flushes != nullptr;
+ }
+
+ bool ShouldWaitAsyncFlushes() const {
+ if (committed_flushes.empty()) {
+ return false;
+ }
+ return committed_flushes.front() != nullptr;
+ }
+
+ void PopAsyncFlushes() {
+ if (committed_flushes.empty()) {
+ return;
+ }
+ auto& flush_list = committed_flushes.front();
+ if (!flush_list) {
+ committed_flushes.pop_front();
+ return;
+ }
+ for (VAddr query_address : *flush_list) {
+ FlushAndRemoveRegion(query_address, 4);
+ }
+ committed_flushes.pop_front();
+ }
+
protected:
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
@@ -224,6 +260,13 @@ private:
return found != std::end(contents) ? &*found : nullptr;
}
+ void AsyncFlushQuery(VAddr addr) {
+ if (!uncommitted_flushes) {
+ uncommitted_flushes = std::make_shared<std::unordered_set<VAddr>>();
+ }
+ uncommitted_flushes->insert(addr);
+ }
+
static constexpr std::uintptr_t PAGE_SIZE = 4096;
static constexpr unsigned PAGE_SHIFT = 12;
@@ -235,6 +278,9 @@ private:
std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
+
+ std::shared_ptr<std::unordered_set<VAddr>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::unordered_set<VAddr>>> committed_flushes;
};
template <class QueryCache, class HostCounter>
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 8ae5b9c4e..3cbdac8e7 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -49,19 +49,40 @@ public:
/// Records a GPU query and caches it
virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0;
+ /// Signal a GPU based semaphore as a fence
+ virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0;
+
+ /// Signal a GPU based syncpoint as a fence
+ virtual void SignalSyncPoint(u32 value) = 0;
+
+ /// Release all pending fences.
+ virtual void ReleaseFences() = 0;
+
/// Notify rasterizer that all caches should be flushed to Switch memory
virtual void FlushAll() = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
virtual void FlushRegion(VAddr addr, u64 size) = 0;
+ /// Check if the the specified memory area requires flushing to CPU Memory.
+ virtual bool MustFlushRegion(VAddr addr, u64 size) = 0;
+
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
+ /// Notify rasterizer that any caches of the specified region are desync with guest
+ virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
+
+ /// Sync memory between guest and host.
+ virtual void SyncGuestHost() = 0;
+
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
+ /// Notify the host renderer to wait for previous primitive and compute operations.
+ virtual void WaitForIdle() = 0;
+
/// Notify the rasterizer to send all written commands to the host GPU.
virtual void FlushCommands() = 0;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index cb5792407..d2cab50bd 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -51,10 +51,6 @@ Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
return std::make_shared<CachedBufferBlock>(cpu_addr, size);
}
-void OGLBufferCache::WriteBarrier() {
- glMemoryBarrier(GL_ALL_BARRIER_BITS);
-}
-
GLuint OGLBufferCache::ToHandle(const Buffer& buffer) {
return buffer->GetHandle();
}
@@ -72,6 +68,7 @@ void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, s
void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
u8* data) {
MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
+ glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset),
static_cast<GLsizeiptr>(size), data);
}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index a74817857..a9e86cfc7 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -59,8 +59,6 @@ protected:
GLuint ToHandle(const Buffer& buffer) override;
- void WriteBarrier() override;
-
void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
const u8* data) override;
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
new file mode 100644
index 000000000..99ddcb3f8
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -0,0 +1,72 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+
+#include "video_core/renderer_opengl/gl_fence_manager.h"
+
+namespace OpenGL {
+
+GLInnerFence::GLInnerFence(u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(payload, is_stubbed), sync_object{} {}
+
+GLInnerFence::GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(address, payload, is_stubbed), sync_object{} {}
+
+GLInnerFence::~GLInnerFence() = default;
+
+void GLInnerFence::Queue() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(sync_object.handle == 0);
+ sync_object.Create();
+}
+
+bool GLInnerFence::IsSignaled() const {
+ if (is_stubbed) {
+ return true;
+ }
+ ASSERT(sync_object.handle != 0);
+ GLsizei length;
+ GLint sync_status;
+ glGetSynciv(sync_object.handle, GL_SYNC_STATUS, sizeof(GLint), &length, &sync_status);
+ return sync_status == GL_SIGNALED;
+}
+
+void GLInnerFence::Wait() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(sync_object.handle != 0);
+ glClientWaitSync(sync_object.handle, 0, GL_TIMEOUT_IGNORED);
+}
+
+FenceManagerOpenGL::FenceManagerOpenGL(Core::System& system,
+ VideoCore::RasterizerInterface& rasterizer,
+ TextureCacheOpenGL& texture_cache,
+ OGLBufferCache& buffer_cache, QueryCache& query_cache)
+ : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache) {}
+
+Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) {
+ return std::make_shared<GLInnerFence>(value, is_stubbed);
+}
+
+Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
+ return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
+}
+
+void FenceManagerOpenGL::QueueFence(Fence& fence) {
+ fence->Queue();
+}
+
+bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) const {
+ return fence->IsSignaled();
+}
+
+void FenceManagerOpenGL::WaitFence(Fence& fence) {
+ fence->Wait();
+}
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
new file mode 100644
index 000000000..c917b3343
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -0,0 +1,53 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <glad/glad.h>
+
+#include "common/common_types.h"
+#include "video_core/fence_manager.h"
+#include "video_core/renderer_opengl/gl_buffer_cache.h"
+#include "video_core/renderer_opengl/gl_query_cache.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
+
+namespace OpenGL {
+
+class GLInnerFence : public VideoCommon::FenceBase {
+public:
+ GLInnerFence(u32 payload, bool is_stubbed);
+ GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed);
+ ~GLInnerFence();
+
+ void Queue();
+
+ bool IsSignaled() const;
+
+ void Wait();
+
+private:
+ OGLSync sync_object;
+};
+
+using Fence = std::shared_ptr<GLInnerFence>;
+using GenericFenceManager =
+ VideoCommon::FenceManager<Fence, TextureCacheOpenGL, OGLBufferCache, QueryCache>;
+
+class FenceManagerOpenGL final : public GenericFenceManager {
+public:
+ FenceManagerOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache,
+ QueryCache& query_cache);
+
+protected:
+ Fence CreateFence(u32 value, bool is_stubbed) override;
+ Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ void QueueFence(Fence& fence) override;
+ bool IsFenceSignaled(Fence& fence) const override;
+ void WaitFence(Fence& fence) override;
+};
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 175374f0d..8b3b3ce92 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -59,14 +59,12 @@ constexpr std::size_t NumSupportedVertexAttributes = 16;
template <typename Engine, typename Entry>
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
ShaderType shader_type, std::size_t index = 0) {
- if (entry.IsBindless()) {
- const Tegra::Texture::TextureHandle tex_handle =
- engine.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset());
+ if (entry.is_bindless) {
+ const auto tex_handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
return engine.GetTextureInfo(tex_handle);
}
const auto& gpu_profile = engine.AccessGuestDriverProfile();
- const u32 offset =
- entry.GetOffset() + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
+ const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
return engine.GetStageTexture(shader_type, offset);
} else {
@@ -99,9 +97,10 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind
ScreenInfo& info, GLShader::ProgramManager& program_manager,
StateTracker& state_tracker)
: RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device, state_tracker},
- shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, system{system},
- screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker},
- buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} {
+ shader_cache{*this, system, emu_window, device}, query_cache{system, *this},
+ buffer_cache{*this, system, device, STREAM_BUFFER_SIZE},
+ fence_manager{system, *this, texture_cache, buffer_cache, query_cache}, system{system},
+ screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker} {
CheckExtensions();
}
@@ -185,8 +184,12 @@ void RasterizerOpenGL::SetupVertexBuffer() {
const GPUVAddr start = vertex_array.StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- const u64 size = end - start + 1;
+ ASSERT(end >= start);
+ const u64 size = end - start;
+ if (size == 0) {
+ glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride);
+ continue;
+ }
const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
vertex_array.stride);
@@ -310,8 +313,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
const GPUVAddr start = regs.vertex_array[index].StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- size += end - start + 1;
+ size += end - start;
+ ASSERT(end >= start);
}
return size;
@@ -343,7 +346,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
texture_cache.GuardRenderTargets(true);
- View depth_surface = texture_cache.GetDepthBufferSurface();
+ View depth_surface = texture_cache.GetDepthBufferSurface(true);
const auto& regs = gpu.regs;
UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -352,7 +355,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
FramebufferCacheKey key;
const auto colors_count = static_cast<std::size_t>(regs.rt_control.count);
for (std::size_t index = 0; index < colors_count; ++index) {
- View color_surface{texture_cache.GetColorBufferSurface(index)};
+ View color_surface{texture_cache.GetColorBufferSurface(index, true)};
if (!color_surface) {
continue;
}
@@ -376,28 +379,52 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
}
-void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb,
- bool using_stencil_fb) {
+void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) {
auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
texture_cache.GuardRenderTargets(true);
View color_surface;
- if (using_color_fb) {
+
+ if (using_color) {
+ // Determine if we have to preserve the contents.
+ // First we have to make sure all clear masks are enabled.
+ bool preserve_contents = !regs.clear_buffers.R || !regs.clear_buffers.G ||
+ !regs.clear_buffers.B || !regs.clear_buffers.A;
const std::size_t index = regs.clear_buffers.RT;
- color_surface = texture_cache.GetColorBufferSurface(index);
+ if (regs.clear_flags.scissor) {
+ // Then we have to confirm scissor testing clears the whole image.
+ const auto& scissor = regs.scissor_test[0];
+ preserve_contents |= scissor.min_x > 0;
+ preserve_contents |= scissor.min_y > 0;
+ preserve_contents |= scissor.max_x < regs.rt[index].width;
+ preserve_contents |= scissor.max_y < regs.rt[index].height;
+ }
+
+ color_surface = texture_cache.GetColorBufferSurface(index, preserve_contents);
texture_cache.MarkColorBufferInUse(index);
}
+
View depth_surface;
- if (using_depth_fb || using_stencil_fb) {
- depth_surface = texture_cache.GetDepthBufferSurface();
+ if (using_depth_stencil) {
+ bool preserve_contents = false;
+ if (regs.clear_flags.scissor) {
+ // For depth stencil clears we only have to confirm scissor test covers the whole image.
+ const auto& scissor = regs.scissor_test[0];
+ preserve_contents |= scissor.min_x > 0;
+ preserve_contents |= scissor.min_y > 0;
+ preserve_contents |= scissor.max_x < regs.zeta_width;
+ preserve_contents |= scissor.max_y < regs.zeta_height;
+ }
+
+ depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
texture_cache.MarkDepthBufferInUse();
}
texture_cache.GuardRenderTargets(false);
FramebufferCacheKey key;
- key.colors[0] = color_surface;
- key.zeta = depth_surface;
+ key.colors[0] = std::move(color_surface);
+ key.zeta = std::move(depth_surface);
state_tracker.NotifyFramebuffer();
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
@@ -417,8 +444,7 @@ void RasterizerOpenGL::Clear() {
if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
regs.clear_buffers.A) {
use_color = true;
- }
- if (use_color) {
+
state_tracker.NotifyColorMask0();
glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0,
regs.clear_buffers.B != 0, regs.clear_buffers.A != 0);
@@ -456,7 +482,7 @@ void RasterizerOpenGL::Clear() {
UNIMPLEMENTED_IF(regs.clear_flags.viewport);
- ConfigureClearFramebuffer(use_color, use_depth, use_stencil);
+ ConfigureClearFramebuffer(use_color, use_depth || use_stencil);
if (use_color) {
glClearBufferfv(GL_COLOR, 0, regs.clear_color);
@@ -599,6 +625,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
EndTransformFeedback();
++num_queued_commands;
+
+ system.GPU().TickWork();
}
void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
@@ -649,6 +677,13 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
query_cache.FlushRegion(addr, size);
}
+bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) {
+ if (!Settings::IsGPULevelHigh()) {
+ return buffer_cache.MustFlushRegion(addr, size);
+ }
+ return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
+}
+
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
if (addr == 0 || size == 0) {
@@ -660,13 +695,68 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
query_cache.InvalidateRegion(addr, size);
}
+void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
+ MICROPROFILE_SCOPE(OpenGL_CacheManagement);
+ if (addr == 0 || size == 0) {
+ return;
+ }
+ texture_cache.OnCPUWrite(addr, size);
+ shader_cache.InvalidateRegion(addr, size);
+ buffer_cache.OnCPUWrite(addr, size);
+ query_cache.InvalidateRegion(addr, size);
+}
+
+void RasterizerOpenGL::SyncGuestHost() {
+ MICROPROFILE_SCOPE(OpenGL_CacheManagement);
+ texture_cache.SyncGuestHost();
+ buffer_cache.SyncGuestHost();
+}
+
+void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ auto& memory_manager{gpu.MemoryManager()};
+ memory_manager.Write<u32>(addr, value);
+ return;
+ }
+ fence_manager.SignalSemaphore(addr, value);
+}
+
+void RasterizerOpenGL::SignalSyncPoint(u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ gpu.IncrementSyncPoint(value);
+ return;
+ }
+ fence_manager.SignalSyncPoint(value);
+}
+
+void RasterizerOpenGL::ReleaseFences() {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ return;
+ }
+ fence_manager.WaitPendingFences();
+}
+
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
FlushRegion(addr, size);
}
InvalidateRegion(addr, size);
}
+void RasterizerOpenGL::WaitForIdle() {
+ // Place a barrier on everything that is not framebuffer related.
+ // This is related to another flag that is not currently implemented.
+ glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT | GL_ELEMENT_ARRAY_BARRIER_BIT |
+ GL_UNIFORM_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT |
+ GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_COMMAND_BARRIER_BIT |
+ GL_PIXEL_BUFFER_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT |
+ GL_BUFFER_UPDATE_BARRIER_BIT | GL_TRANSFORM_FEEDBACK_BARRIER_BIT |
+ GL_SHADER_STORAGE_BARRIER_BIT | GL_QUERY_BUFFER_BARRIER_BIT);
+}
+
void RasterizerOpenGL::FlushCommands() {
// Only flush when we have commands queued to OpenGL.
if (num_queued_commands == 0) {
@@ -775,9 +865,9 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer;
for (const auto& entry : shader->GetEntries().global_memory_entries) {
- const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()};
- const auto gpu_addr{memory_manager.Read<u64>(addr)};
- const auto size{memory_manager.Read<u32>(addr + 8)};
+ const GPUVAddr addr{cbufs.const_buffers[entry.cbuf_index].address + entry.cbuf_offset};
+ const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)};
+ const u32 size{memory_manager.Read<u32>(addr + 8)};
SetupGlobalMemory(binding++, entry, gpu_addr, size);
}
}
@@ -789,7 +879,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
u32 binding = 0;
for (const auto& entry : kernel->GetEntries().global_memory_entries) {
- const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
+ const auto addr{cbufs[entry.cbuf_index].Address() + entry.cbuf_offset};
const auto gpu_addr{memory_manager.Read<u64>(addr)};
const auto size{memory_manager.Read<u32>(addr + 8)};
SetupGlobalMemory(binding++, entry, gpu_addr, size);
@@ -800,7 +890,7 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
GPUVAddr gpu_addr, std::size_t size) {
const auto alignment{device.GetShaderStorageBufferAlignment()};
const auto [ssbo, buffer_offset] =
- buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten());
+ buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.is_written);
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset,
static_cast<GLsizeiptr>(size));
}
@@ -811,7 +901,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
u32 binding = device.GetBaseBindings(stage_index).sampler;
for (const auto& entry : shader->GetEntries().samplers) {
const auto shader_type = static_cast<ShaderType>(stage_index);
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i);
SetupTexture(binding++, texture, entry);
}
@@ -823,7 +913,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
const auto& compute = system.GPU().KeplerCompute();
u32 binding = 0;
for (const auto& entry : kernel->GetEntries().samplers) {
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i);
SetupTexture(binding++, texture, entry);
}
@@ -880,7 +970,7 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t
if (!tic.IsBuffer()) {
view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
}
- if (entry.IsWritten()) {
+ if (entry.is_written) {
view->MarkAsModified(texture_cache.Tick());
}
glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE,
@@ -941,11 +1031,7 @@ void RasterizerOpenGL::SyncDepthClamp() {
}
flags[Dirty::DepthClampEnabled] = false;
- const auto& state = gpu.regs.view_volume_clip_control;
- UNIMPLEMENTED_IF_MSG(state.depth_clamp_far != state.depth_clamp_near,
- "Unimplemented depth clamp separation!");
-
- oglEnable(GL_DEPTH_CLAMP, state.depth_clamp_far || state.depth_clamp_near);
+ oglEnable(GL_DEPTH_CLAMP, gpu.regs.view_volume_clip_control.depth_clamp_disabled == 0);
}
void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index caea174d2..b94c65907 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -23,6 +23,7 @@
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_device.h"
+#include "video_core/renderer_opengl/gl_fence_manager.h"
#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
#include "video_core/renderer_opengl/gl_query_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
@@ -66,8 +67,15 @@ public:
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void FlushAll() override;
void FlushRegion(VAddr addr, u64 size) override;
+ bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
+ void OnCPUWrite(VAddr addr, u64 size) override;
+ void SyncGuestHost() override;
+ void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void SignalSyncPoint(u32 value) override;
+ void ReleaseFences() override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+ void WaitForIdle() override;
void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -88,7 +96,8 @@ private:
/// Configures the color and depth framebuffer states.
void ConfigureFramebuffers();
- void ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, bool using_stencil_fb);
+ /// Configures the color and depth framebuffer for clearing.
+ void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil);
/// Configures the current constbuffers to use for the draw command.
void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader);
@@ -222,6 +231,8 @@ private:
SamplerCacheOpenGL sampler_cache;
FramebufferCacheOpenGL framebuffer_cache;
QueryCache query_cache;
+ OGLBufferCache buffer_cache;
+ FenceManagerOpenGL fence_manager;
Core::System& system;
ScreenInfo& screen_info;
@@ -229,7 +240,6 @@ private:
StateTracker& state_tracker;
static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024;
- OGLBufferCache buffer_cache;
GLint vertex_binding = 0;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 6d2ff20f9..9759a7078 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -10,8 +10,6 @@
#include <thread>
#include <unordered_set>
-#include <boost/functional/hash.hpp>
-
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
@@ -28,76 +26,26 @@
#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
#include "video_core/renderer_opengl/utils.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace OpenGL {
using Tegra::Engines::ShaderType;
+using VideoCommon::Shader::GetShaderAddress;
+using VideoCommon::Shader::GetShaderCode;
+using VideoCommon::Shader::GetUniqueIdentifier;
+using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
using VideoCommon::Shader::ProgramCode;
using VideoCommon::Shader::Registry;
using VideoCommon::Shader::ShaderIR;
+using VideoCommon::Shader::STAGE_MAIN_OFFSET;
namespace {
-constexpr u32 STAGE_MAIN_OFFSET = 10;
-constexpr u32 KERNEL_MAIN_OFFSET = 0;
-
constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
-/// Gets the address for the specified shader stage program
-GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
- const auto& gpu{system.GPU().Maxwell3D()};
- const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
- return gpu.regs.code_address.CodeAddress() + shader_config.offset;
-}
-
-/// Gets if the current instruction offset is a scheduler instruction
-constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
- // Sched instructions appear once every 4 instructions.
- constexpr std::size_t SchedPeriod = 4;
- const std::size_t absolute_offset = offset - main_offset;
- return (absolute_offset % SchedPeriod) == 0;
-}
-
-/// Calculates the size of a program stream
-std::size_t CalculateProgramSize(const ProgramCode& program) {
- constexpr std::size_t start_offset = 10;
- // This is the encoded version of BRA that jumps to itself. All Nvidia
- // shaders end with one.
- constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
- constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
- std::size_t offset = start_offset;
- while (offset < program.size()) {
- const u64 instruction = program[offset];
- if (!IsSchedInstruction(offset, start_offset)) {
- if ((instruction & mask) == self_jumping_branch) {
- // End on Maxwell's "nop" instruction
- break;
- }
- if (instruction == 0) {
- break;
- }
- }
- offset++;
- }
- // The last instruction is included in the program size
- return std::min(offset + 1, program.size());
-}
-
-/// Gets the shader program code from memory for the specified address
-ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
- const u8* host_ptr) {
- ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
- ASSERT_OR_EXECUTE(host_ptr != nullptr, {
- std::fill(code.begin(), code.end(), 0);
- return code;
- });
- memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
- code.resize(CalculateProgramSize(code));
- return code;
-}
-
/// Gets the shader type from a Maxwell program type
constexpr GLenum GetGLShaderType(ShaderType shader_type) {
switch (shader_type) {
@@ -114,17 +62,6 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) {
}
}
-/// Hashes one (or two) program streams
-u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code,
- const ProgramCode& code_b = {}) {
- u64 unique_identifier = boost::hash_value(code);
- if (is_a) {
- // VertexA programs include two programs
- boost::hash_combine(unique_identifier, boost::hash_value(code_b));
- }
- return unique_identifier;
-}
-
constexpr const char* GetShaderTypeName(ShaderType shader_type) {
switch (shader_type) {
case ShaderType::Vertex:
@@ -448,7 +385,7 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
// Look up shader in the cache based on address
const auto cpu_addr{memory_manager.GpuToCpuAddress(address)};
- Shader shader{cpu_addr ? TryGet(*cpu_addr) : nullptr};
+ Shader shader{cpu_addr ? TryGet(*cpu_addr) : null_shader};
if (shader) {
return last_shaders[static_cast<std::size_t>(program)] = shader;
}
@@ -456,11 +393,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const auto host_ptr{memory_manager.GetPointer(address)};
// No shader found - create a new one
- ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)};
+ ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)};
ProgramCode code_b;
if (program == Maxwell::ShaderProgram::VertexA) {
const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
- code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b));
+ const u8* host_ptr_b = memory_manager.GetPointer(address_b);
+ code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false);
}
const auto unique_identifier = GetUniqueIdentifier(
@@ -477,7 +415,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const std::size_t size_in_bytes = code.size() * sizeof(u64);
shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
}
- Register(shader);
+
+ if (cpu_addr) {
+ Register(shader);
+ } else {
+ null_shader = shader;
+ }
return last_shaders[static_cast<std::size_t>(program)] = shader;
}
@@ -486,14 +429,14 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
auto& memory_manager{system.GPU().MemoryManager()};
const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)};
- auto kernel = cpu_addr ? TryGet(*cpu_addr) : nullptr;
+ auto kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel;
if (kernel) {
return kernel;
}
const auto host_ptr{memory_manager.GetPointer(code_addr)};
// No kernel found, create a new one
- auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
+ auto code{GetShaderCode(memory_manager, code_addr, host_ptr, true)};
const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
const ShaderParameters params{system, disk_cache, device,
@@ -507,7 +450,11 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
}
- Register(kernel);
+ if (cpu_addr) {
+ Register(kernel);
+ } else {
+ null_kernel = kernel;
+ }
return kernel;
}
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index c836df5bd..91690b470 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -125,6 +125,9 @@ private:
ShaderDiskCacheOpenGL disk_cache;
std::unordered_map<u64, PrecompiledShader> runtime_cache;
+ Shader null_shader{};
+ Shader null_kernel{};
+
std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
};
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 22242cce9..99fd4ae2c 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -484,7 +484,7 @@ private:
code.AddLine("switch (jmp_to) {{");
for (const auto& pair : ir.GetBasicBlocks()) {
- const auto [address, bb] = pair;
+ const auto& [address, bb] = pair;
code.AddLine("case 0x{:X}U: {{", address);
++code.scope;
@@ -870,13 +870,13 @@ private:
for (const auto& sampler : ir.GetSamplers()) {
const std::string name = GetSampler(sampler);
const std::string description = fmt::format("layout (binding = {}) uniform", binding);
- binding += sampler.IsIndexed() ? sampler.Size() : 1;
+ binding += sampler.is_indexed ? sampler.size : 1;
std::string sampler_type = [&]() {
- if (sampler.IsBuffer()) {
+ if (sampler.is_buffer) {
return "samplerBuffer";
}
- switch (sampler.GetType()) {
+ switch (sampler.type) {
case Tegra::Shader::TextureType::Texture1D:
return "sampler1D";
case Tegra::Shader::TextureType::Texture2D:
@@ -890,17 +890,17 @@ private:
return "sampler2D";
}
}();
- if (sampler.IsArray()) {
+ if (sampler.is_array) {
sampler_type += "Array";
}
- if (sampler.IsShadow()) {
+ if (sampler.is_shadow) {
sampler_type += "Shadow";
}
- if (!sampler.IsIndexed()) {
+ if (!sampler.is_indexed) {
code.AddLine("{} {} {};", description, sampler_type, name);
} else {
- code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.Size());
+ code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.size);
}
}
if (!ir.GetSamplers().empty()) {
@@ -946,14 +946,14 @@ private:
u32 binding = device.GetBaseBindings(stage).image;
for (const auto& image : ir.GetImages()) {
std::string qualifier = "coherent volatile";
- if (image.IsRead() && !image.IsWritten()) {
+ if (image.is_read && !image.is_written) {
qualifier += " readonly";
- } else if (image.IsWritten() && !image.IsRead()) {
+ } else if (image.is_written && !image.is_read) {
qualifier += " writeonly";
}
- const char* format = image.IsAtomic() ? "r32ui, " : "";
- const char* type_declaration = GetImageTypeDeclaration(image.GetType());
+ const char* format = image.is_atomic ? "r32ui, " : "";
+ const char* type_declaration = GetImageTypeDeclaration(image.type);
code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++,
qualifier, type_declaration, GetImage(image));
}
@@ -1337,8 +1337,8 @@ private:
ASSERT(meta);
const std::size_t count = operation.GetOperandsCount();
- const bool has_array = meta->sampler.IsArray();
- const bool has_shadow = meta->sampler.IsShadow();
+ const bool has_array = meta->sampler.is_array;
+ const bool has_shadow = meta->sampler.is_shadow;
std::string expr = "texture" + function_suffix;
if (!meta->aoffi.empty()) {
@@ -1346,7 +1346,7 @@ private:
} else if (!meta->ptp.empty()) {
expr += "Offsets";
}
- if (!meta->sampler.IsIndexed()) {
+ if (!meta->sampler.is_indexed) {
expr += '(' + GetSampler(meta->sampler) + ", ";
} else {
expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], ";
@@ -1484,8 +1484,8 @@ private:
dy += '(';
for (std::size_t index = 0; index < components; ++index) {
- const auto operand_x{derivates.at(index * 2)};
- const auto operand_y{derivates.at(index * 2 + 1)};
+ const auto& operand_x{derivates.at(index * 2)};
+ const auto& operand_y{derivates.at(index * 2 + 1)};
dx += Visit(operand_x).AsFloat();
dy += Visit(operand_y).AsFloat();
@@ -1870,6 +1870,14 @@ private:
return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type);
}
+ Expression LogicalAddCarry(Operation operation) {
+ const std::string carry = code.GenerateTemporary();
+ code.AddLine("uint {};", carry);
+ code.AddLine("uaddCarry({}, {}, {});", VisitOperand(operation, 0).AsUint(),
+ VisitOperand(operation, 1).AsUint(), carry);
+ return {fmt::format("({} != 0)", carry), Type::Bool};
+ }
+
Expression LogicalFIsNan(Operation operation) {
return GenerateUnary(operation, "isnan", Type::Bool, Type::Float);
}
@@ -1974,7 +1982,7 @@ private:
std::string expr = GenerateTexture(
operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}});
- if (meta->sampler.IsShadow()) {
+ if (meta->sampler.is_shadow) {
expr = "vec4(" + expr + ')';
}
return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1986,7 +1994,7 @@ private:
std::string expr = GenerateTexture(
operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}});
- if (meta->sampler.IsShadow()) {
+ if (meta->sampler.is_shadow) {
expr = "vec4(" + expr + ')';
}
return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1995,11 +2003,11 @@ private:
Expression TextureGather(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int;
- const bool separate_dc = meta.sampler.IsShadow();
+ const auto type = meta.sampler.is_shadow ? Type::Float : Type::Int;
+ const bool separate_dc = meta.sampler.is_shadow;
std::vector<TextureIR> ir;
- if (meta.sampler.IsShadow()) {
+ if (meta.sampler.is_shadow) {
ir = {TextureOffset{}};
} else {
ir = {TextureOffset{}, TextureArgument{type, meta.component}};
@@ -2044,7 +2052,7 @@ private:
constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"};
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
- UNIMPLEMENTED_IF(meta->sampler.IsArray());
+ UNIMPLEMENTED_IF(meta->sampler.is_array);
const std::size_t count = operation.GetOperandsCount();
std::string expr = "texelFetch(";
@@ -2065,7 +2073,7 @@ private:
}
expr += ')';
- if (meta->lod && !meta->sampler.IsBuffer()) {
+ if (meta->lod && !meta->sampler.is_buffer) {
expr += ", ";
expr += Visit(meta->lod).AsInt();
}
@@ -2076,12 +2084,10 @@ private:
}
Expression TextureGradient(Operation operation) {
- const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
- ASSERT(meta);
-
+ const auto& meta = std::get<MetaTexture>(operation.GetMeta());
std::string expr =
GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}});
- return {std::move(expr) + GetSwizzle(meta->element), Type::Float};
+ return {std::move(expr) + GetSwizzle(meta.element), Type::Float};
}
Expression ImageLoad(Operation operation) {
@@ -2441,6 +2447,8 @@ private:
&GLSLDecompiler::LogicalNotEqual<Type::Uint>,
&GLSLDecompiler::LogicalGreaterEqual<Type::Uint>,
+ &GLSLDecompiler::LogicalAddCarry,
+
&GLSLDecompiler::Logical2HLessThan<false>,
&GLSLDecompiler::Logical2HEqual<false>,
&GLSLDecompiler::Logical2HLessEqual<false>,
@@ -2598,11 +2606,11 @@ private:
}
std::string GetSampler(const Sampler& sampler) const {
- return AppendSuffix(static_cast<u32>(sampler.GetIndex()), "sampler");
+ return AppendSuffix(sampler.index, "sampler");
}
std::string GetImage(const Image& image) const {
- return AppendSuffix(static_cast<u32>(image.GetIndex()), "image");
+ return AppendSuffix(image.index, "image");
}
std::string AppendSuffix(u32 index, std::string_view name) const {
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index e7dbd810c..e8a178764 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -33,36 +33,19 @@ public:
}
private:
- u32 index{};
+ u32 index = 0;
};
-class GlobalMemoryEntry {
-public:
- explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read, bool is_written)
+struct GlobalMemoryEntry {
+ constexpr explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read,
+ bool is_written)
: cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{
is_written} {}
- u32 GetCbufIndex() const {
- return cbuf_index;
- }
-
- u32 GetCbufOffset() const {
- return cbuf_offset;
- }
-
- bool IsRead() const {
- return is_read;
- }
-
- bool IsWritten() const {
- return is_written;
- }
-
-private:
- u32 cbuf_index{};
- u32 cbuf_offset{};
- bool is_read{};
- bool is_written{};
+ u32 cbuf_index = 0;
+ u32 cbuf_offset = 0;
+ bool is_read = false;
+ bool is_written = false;
};
struct ShaderEntries {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 2bb376555..648b1e71b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -2,10 +2,12 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <cstring>
#include <tuple>
#include <boost/functional/hash.hpp>
+#include "common/cityhash.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -13,289 +15,349 @@ namespace Vulkan {
namespace {
-constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) {
- const FixedPipelineState::StencilFace front_stencil(
- regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass,
- regs.stencil_front_func_func);
- const FixedPipelineState::StencilFace back_stencil =
- regs.stencil_two_side_enable
- ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail,
- regs.stencil_back_op_zpass,
- regs.stencil_back_func_func)
- : front_stencil;
- return FixedPipelineState::DepthStencil(
- regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1,
- regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil);
-}
-
-constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) {
- return FixedPipelineState::InputAssembly(
- regs.draw.topology, regs.primitive_restart.enabled,
- regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f);
-}
-
-constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState(
- const Maxwell& regs, std::size_t render_target) {
- const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target];
- const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0};
-
- const FixedPipelineState::BlendingAttachment default_blending(
- false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
- Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
- Maxwell::Blend::Factor::Zero, components);
- if (render_target >= regs.rt_control.count) {
- return default_blending;
- }
+constexpr std::size_t POINT = 0;
+constexpr std::size_t LINE = 1;
+constexpr std::size_t POLYGON = 2;
+constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
+ POINT, // Points
+ LINE, // Lines
+ LINE, // LineLoop
+ LINE, // LineStrip
+ POLYGON, // Triangles
+ POLYGON, // TriangleStrip
+ POLYGON, // TriangleFan
+ POLYGON, // Quads
+ POLYGON, // QuadStrip
+ POLYGON, // Polygon
+ LINE, // LinesAdjacency
+ LINE, // LineStripAdjacency
+ POLYGON, // TrianglesAdjacency
+ POLYGON, // TriangleStripAdjacency
+ POLYGON, // Patches
+};
- if (!regs.independent_blend_enable) {
- const auto& src = regs.blend;
- if (!src.enable[render_target]) {
- return default_blending;
- }
- return FixedPipelineState::BlendingAttachment(
- true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
- src.factor_source_a, src.factor_dest_a, components);
- }
+} // Anonymous namespace
- if (!regs.blend.enable[render_target]) {
- return default_blending;
+void FixedPipelineState::DepthStencil::Fill(const Maxwell& regs) noexcept {
+ raw = 0;
+ front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op_fail));
+ front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op_zfail));
+ front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op_zpass));
+ front.test_func.Assign(PackComparisonOp(regs.stencil_front_func_func));
+ if (regs.stencil_two_side_enable) {
+ back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op_fail));
+ back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op_zfail));
+ back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op_zpass));
+ back.test_func.Assign(PackComparisonOp(regs.stencil_back_func_func));
+ } else {
+ back.action_stencil_fail.Assign(front.action_stencil_fail);
+ back.action_depth_fail.Assign(front.action_depth_fail);
+ back.action_depth_pass.Assign(front.action_depth_pass);
+ back.test_func.Assign(front.test_func);
}
- const auto& src = regs.independent_blend[render_target];
- return FixedPipelineState::BlendingAttachment(
- true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
- src.factor_source_a, src.factor_dest_a, components);
-}
-
-constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) {
- return FixedPipelineState::ColorBlending(
- {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a},
- regs.rt_control.count,
- {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1),
- GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3),
- GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5),
- GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)});
-}
-
-constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) {
- return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim,
- regs.tess_mode.spacing, regs.tess_mode.cw != 0);
+ depth_test_enable.Assign(regs.depth_test_enable);
+ depth_write_enable.Assign(regs.depth_write_enabled);
+ depth_bounds_enable.Assign(regs.depth_bounds_enable);
+ stencil_enable.Assign(regs.stencil_enable);
+ depth_test_func.Assign(PackComparisonOp(regs.depth_test_func));
}
-constexpr std::size_t Point = 0;
-constexpr std::size_t Line = 1;
-constexpr std::size_t Polygon = 2;
-constexpr std::array PolygonOffsetEnableLUT = {
- Point, // Points
- Line, // Lines
- Line, // LineLoop
- Line, // LineStrip
- Polygon, // Triangles
- Polygon, // TriangleStrip
- Polygon, // TriangleFan
- Polygon, // Quads
- Polygon, // QuadStrip
- Polygon, // Polygon
- Line, // LinesAdjacency
- Line, // LineStripAdjacency
- Polygon, // TrianglesAdjacency
- Polygon, // TriangleStripAdjacency
- Polygon, // Patches
-};
-
-constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) {
+void FixedPipelineState::Rasterizer::Fill(const Maxwell& regs) noexcept {
+ const auto& clip = regs.view_volume_clip_control;
const std::array enabled_lut = {regs.polygon_offset_point_enable,
regs.polygon_offset_line_enable,
regs.polygon_offset_fill_enable};
- const auto topology = static_cast<std::size_t>(regs.draw.topology.Value());
- const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]];
-
- const auto& clip = regs.view_volume_clip_control;
- const bool depth_clamp_enabled = clip.depth_clamp_near == 1 || clip.depth_clamp_far == 1;
+ const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
- Maxwell::FrontFace front_face = regs.front_face;
+ u32 packed_front_face = PackFrontFace(regs.front_face);
if (regs.screen_y_control.triangle_rast_flip != 0 &&
regs.viewport_transform[0].scale_y > 0.0f) {
- if (front_face == Maxwell::FrontFace::CounterClockWise)
- front_face = Maxwell::FrontFace::ClockWise;
- else if (front_face == Maxwell::FrontFace::ClockWise)
- front_face = Maxwell::FrontFace::CounterClockWise;
+ // Flip front face
+ packed_front_face = 1 - packed_front_face;
}
- const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne;
- return FixedPipelineState::Rasterizer(regs.cull_test_enabled, depth_bias_enabled,
- depth_clamp_enabled, gl_ndc, regs.cull_face, front_face);
+ raw = 0;
+ topology.Assign(topology_index);
+ primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
+ cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0);
+ depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
+ depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
+ ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
+ cull_face.Assign(PackCullFace(regs.cull_face));
+ front_face.Assign(packed_front_face);
+ polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
+ patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
+ tessellation_primitive.Assign(static_cast<u32>(regs.tess_mode.prim.Value()));
+ tessellation_spacing.Assign(static_cast<u32>(regs.tess_mode.spacing.Value()));
+ tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
+ logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
+ logic_op.Assign(PackLogicOp(regs.logic_op.operation));
+ std::memcpy(&point_size, &regs.point_size, sizeof(point_size)); // TODO: C++20 std::bit_cast
}
-} // Anonymous namespace
-
-std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept {
- return (index << stride) ^ divisor;
+void FixedPipelineState::ColorBlending::Fill(const Maxwell& regs) noexcept {
+ for (std::size_t index = 0; index < std::size(attachments); ++index) {
+ attachments[index].Fill(regs, index);
+ }
}
-bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept {
- return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor);
-}
+void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size_t index) {
+ const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : index];
+
+ raw = 0;
+ mask_r.Assign(mask.R);
+ mask_g.Assign(mask.G);
+ mask_b.Assign(mask.B);
+ mask_a.Assign(mask.A);
+
+ // TODO: C++20 Use templated lambda to deduplicate code
+
+ if (!regs.independent_blend_enable) {
+ const auto& src = regs.blend;
+ if (!src.enable[index]) {
+ return;
+ }
+ equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
+ equation_a.Assign(PackBlendEquation(src.equation_a));
+ factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
+ factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
+ factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
+ factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
+ enable.Assign(1);
+ return;
+ }
-std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept {
- return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^
- (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^
- (static_cast<std::size_t>(offset) << 36);
+ if (!regs.blend.enable[index]) {
+ return;
+ }
+ const auto& src = regs.independent_blend[index];
+ equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
+ equation_a.Assign(PackBlendEquation(src.equation_a));
+ factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
+ factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
+ factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
+ factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
+ enable.Assign(1);
}
-bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept {
- return std::tie(index, buffer, type, size, offset) ==
- std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset);
+void FixedPipelineState::Fill(const Maxwell& regs) {
+ rasterizer.Fill(regs);
+ depth_stencil.Fill(regs);
+ color_blending.Fill(regs);
}
-std::size_t FixedPipelineState::StencilFace::Hash() const noexcept {
- return static_cast<std::size_t>(action_stencil_fail) ^
- (static_cast<std::size_t>(action_depth_fail) << 4) ^
- (static_cast<std::size_t>(action_depth_fail) << 20) ^
- (static_cast<std::size_t>(action_depth_pass) << 36);
+std::size_t FixedPipelineState::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
}
-bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept {
- return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) ==
- std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass,
- rhs.test_func);
+bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
+ return std::memcmp(this, &rhs, sizeof *this) == 0;
}
-std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept {
- return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^
- (static_cast<std::size_t>(src_rgb_func) << 10) ^
- (static_cast<std::size_t>(dst_rgb_func) << 15) ^
- (static_cast<std::size_t>(a_equation) << 20) ^
- (static_cast<std::size_t>(src_a_func) << 25) ^
- (static_cast<std::size_t>(dst_a_func) << 30) ^
- (static_cast<std::size_t>(components[0]) << 35) ^
- (static_cast<std::size_t>(components[1]) << 36) ^
- (static_cast<std::size_t>(components[2]) << 37) ^
- (static_cast<std::size_t>(components[3]) << 38);
+u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept {
+ // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8
+ // If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
+ // Perfect for a hash.
+ const u32 value = static_cast<u32>(op);
+ return value - (value >= 0x200 ? 0x200 : 1);
}
-bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const
- noexcept {
- return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func,
- dst_a_func, components) ==
- std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func,
- rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components);
+Maxwell::ComparisonOp FixedPipelineState::UnpackComparisonOp(u32 packed) noexcept {
+ // Read PackComparisonOp for the logic behind this.
+ return static_cast<Maxwell::ComparisonOp>(packed + 1);
}
-std::size_t FixedPipelineState::VertexInput::Hash() const noexcept {
- std::size_t hash = num_bindings ^ (num_attributes << 32);
- for (std::size_t i = 0; i < num_bindings; ++i) {
- boost::hash_combine(hash, bindings[i].Hash());
- }
- for (std::size_t i = 0; i < num_attributes; ++i) {
- boost::hash_combine(hash, attributes[i].Hash());
+u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp op) noexcept {
+ switch (op) {
+ case Maxwell::StencilOp::Keep:
+ case Maxwell::StencilOp::KeepOGL:
+ return 0;
+ case Maxwell::StencilOp::Zero:
+ case Maxwell::StencilOp::ZeroOGL:
+ return 1;
+ case Maxwell::StencilOp::Replace:
+ case Maxwell::StencilOp::ReplaceOGL:
+ return 2;
+ case Maxwell::StencilOp::Incr:
+ case Maxwell::StencilOp::IncrOGL:
+ return 3;
+ case Maxwell::StencilOp::Decr:
+ case Maxwell::StencilOp::DecrOGL:
+ return 4;
+ case Maxwell::StencilOp::Invert:
+ case Maxwell::StencilOp::InvertOGL:
+ return 5;
+ case Maxwell::StencilOp::IncrWrap:
+ case Maxwell::StencilOp::IncrWrapOGL:
+ return 6;
+ case Maxwell::StencilOp::DecrWrap:
+ case Maxwell::StencilOp::DecrWrapOGL:
+ return 7;
}
- return hash;
+ return 0;
}
-bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept {
- return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(),
- rhs.bindings.begin() + rhs.num_bindings) &&
- std::equal(attributes.begin(), attributes.begin() + num_attributes,
- rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes);
+Maxwell::StencilOp FixedPipelineState::UnpackStencilOp(u32 packed) noexcept {
+ static constexpr std::array LUT = {Maxwell::StencilOp::Keep, Maxwell::StencilOp::Zero,
+ Maxwell::StencilOp::Replace, Maxwell::StencilOp::Incr,
+ Maxwell::StencilOp::Decr, Maxwell::StencilOp::Invert,
+ Maxwell::StencilOp::IncrWrap, Maxwell::StencilOp::DecrWrap};
+ return LUT[packed];
}
-std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept {
- std::size_t point_size_int = 0;
- std::memcpy(&point_size_int, &point_size, sizeof(point_size));
- return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^
- static_cast<std::size_t>(primitive_restart_enable);
+u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept {
+ // FrontAndBack is 0x408, by substracting 0x406 in it we get 2.
+ // Individual cull faces are in 0x404 and 0x405, substracting 0x404 we get 0 and 1.
+ const u32 value = static_cast<u32>(cull);
+ return value - (value == 0x408 ? 0x406 : 0x404);
}
-bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept {
- return std::tie(topology, primitive_restart_enable, point_size) ==
- std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size);
+Maxwell::CullFace FixedPipelineState::UnpackCullFace(u32 packed) noexcept {
+ static constexpr std::array LUT = {Maxwell::CullFace::Front, Maxwell::CullFace::Back,
+ Maxwell::CullFace::FrontAndBack};
+ return LUT[packed];
}
-std::size_t FixedPipelineState::Tessellation::Hash() const noexcept {
- return static_cast<std::size_t>(patch_control_points) ^
- (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^
- (static_cast<std::size_t>(clockwise) << 10);
+u32 FixedPipelineState::PackFrontFace(Maxwell::FrontFace face) noexcept {
+ return static_cast<u32>(face) - 0x900;
}
-bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept {
- return std::tie(patch_control_points, primitive, spacing, clockwise) ==
- std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise);
+Maxwell::FrontFace FixedPipelineState::UnpackFrontFace(u32 packed) noexcept {
+ return static_cast<Maxwell::FrontFace>(packed + 0x900);
}
-std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept {
- return static_cast<std::size_t>(cull_enable) ^
- (static_cast<std::size_t>(depth_bias_enable) << 1) ^
- (static_cast<std::size_t>(depth_clamp_enable) << 2) ^
- (static_cast<std::size_t>(ndc_minus_one_to_one) << 3) ^
- (static_cast<std::size_t>(cull_face) << 24) ^
- (static_cast<std::size_t>(front_face) << 48);
+u32 FixedPipelineState::PackPolygonMode(Maxwell::PolygonMode mode) noexcept {
+ return static_cast<u32>(mode) - 0x1B00;
}
-bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept {
- return std::tie(cull_enable, depth_bias_enable, depth_clamp_enable, ndc_minus_one_to_one,
- cull_face, front_face) ==
- std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.depth_clamp_enable,
- rhs.ndc_minus_one_to_one, rhs.cull_face, rhs.front_face);
+Maxwell::PolygonMode FixedPipelineState::UnpackPolygonMode(u32 packed) noexcept {
+ return static_cast<Maxwell::PolygonMode>(packed + 0x1B00);
}
-std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept {
- std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^
- (static_cast<std::size_t>(depth_write_enable) << 1) ^
- (static_cast<std::size_t>(depth_bounds_enable) << 2) ^
- (static_cast<std::size_t>(stencil_enable) << 3) ^
- (static_cast<std::size_t>(depth_test_function) << 4);
- boost::hash_combine(hash, front_stencil.Hash());
- boost::hash_combine(hash, back_stencil.Hash());
- return hash;
+u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOperation op) noexcept {
+ return static_cast<u32>(op) - 0x1500;
}
-bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept {
- return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function,
- stencil_enable, front_stencil, back_stencil) ==
- std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable,
- rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil,
- rhs.back_stencil);
+Maxwell::LogicOperation FixedPipelineState::UnpackLogicOp(u32 packed) noexcept {
+ return static_cast<Maxwell::LogicOperation>(packed + 0x1500);
}
-std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept {
- std::size_t hash = attachments_count << 13;
- for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) {
- boost::hash_combine(hash, attachments[rt].Hash());
+u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noexcept {
+ switch (equation) {
+ case Maxwell::Blend::Equation::Add:
+ case Maxwell::Blend::Equation::AddGL:
+ return 0;
+ case Maxwell::Blend::Equation::Subtract:
+ case Maxwell::Blend::Equation::SubtractGL:
+ return 1;
+ case Maxwell::Blend::Equation::ReverseSubtract:
+ case Maxwell::Blend::Equation::ReverseSubtractGL:
+ return 2;
+ case Maxwell::Blend::Equation::Min:
+ case Maxwell::Blend::Equation::MinGL:
+ return 3;
+ case Maxwell::Blend::Equation::Max:
+ case Maxwell::Blend::Equation::MaxGL:
+ return 4;
}
- return hash;
+ return 0;
}
-bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept {
- return std::equal(attachments.begin(), attachments.begin() + attachments_count,
- rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count);
+Maxwell::Blend::Equation FixedPipelineState::UnpackBlendEquation(u32 packed) noexcept {
+ static constexpr std::array LUT = {
+ Maxwell::Blend::Equation::Add, Maxwell::Blend::Equation::Subtract,
+ Maxwell::Blend::Equation::ReverseSubtract, Maxwell::Blend::Equation::Min,
+ Maxwell::Blend::Equation::Max};
+ return LUT[packed];
}
-std::size_t FixedPipelineState::Hash() const noexcept {
- std::size_t hash = 0;
- boost::hash_combine(hash, vertex_input.Hash());
- boost::hash_combine(hash, input_assembly.Hash());
- boost::hash_combine(hash, tessellation.Hash());
- boost::hash_combine(hash, rasterizer.Hash());
- boost::hash_combine(hash, depth_stencil.Hash());
- boost::hash_combine(hash, color_blending.Hash());
- return hash;
-}
-
-bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
- return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil,
- color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly,
- rhs.tessellation, rhs.rasterizer, rhs.depth_stencil,
- rhs.color_blending);
+u32 FixedPipelineState::PackBlendFactor(Maxwell::Blend::Factor factor) noexcept {
+ switch (factor) {
+ case Maxwell::Blend::Factor::Zero:
+ case Maxwell::Blend::Factor::ZeroGL:
+ return 0;
+ case Maxwell::Blend::Factor::One:
+ case Maxwell::Blend::Factor::OneGL:
+ return 1;
+ case Maxwell::Blend::Factor::SourceColor:
+ case Maxwell::Blend::Factor::SourceColorGL:
+ return 2;
+ case Maxwell::Blend::Factor::OneMinusSourceColor:
+ case Maxwell::Blend::Factor::OneMinusSourceColorGL:
+ return 3;
+ case Maxwell::Blend::Factor::SourceAlpha:
+ case Maxwell::Blend::Factor::SourceAlphaGL:
+ return 4;
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha:
+ case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
+ return 5;
+ case Maxwell::Blend::Factor::DestAlpha:
+ case Maxwell::Blend::Factor::DestAlphaGL:
+ return 6;
+ case Maxwell::Blend::Factor::OneMinusDestAlpha:
+ case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
+ return 7;
+ case Maxwell::Blend::Factor::DestColor:
+ case Maxwell::Blend::Factor::DestColorGL:
+ return 8;
+ case Maxwell::Blend::Factor::OneMinusDestColor:
+ case Maxwell::Blend::Factor::OneMinusDestColorGL:
+ return 9;
+ case Maxwell::Blend::Factor::SourceAlphaSaturate:
+ case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
+ return 10;
+ case Maxwell::Blend::Factor::Source1Color:
+ case Maxwell::Blend::Factor::Source1ColorGL:
+ return 11;
+ case Maxwell::Blend::Factor::OneMinusSource1Color:
+ case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
+ return 12;
+ case Maxwell::Blend::Factor::Source1Alpha:
+ case Maxwell::Blend::Factor::Source1AlphaGL:
+ return 13;
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha:
+ case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
+ return 14;
+ case Maxwell::Blend::Factor::ConstantColor:
+ case Maxwell::Blend::Factor::ConstantColorGL:
+ return 15;
+ case Maxwell::Blend::Factor::OneMinusConstantColor:
+ case Maxwell::Blend::Factor::OneMinusConstantColorGL:
+ return 16;
+ case Maxwell::Blend::Factor::ConstantAlpha:
+ case Maxwell::Blend::Factor::ConstantAlphaGL:
+ return 17;
+ case Maxwell::Blend::Factor::OneMinusConstantAlpha:
+ case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
+ return 18;
+ }
+ return 0;
}
-FixedPipelineState GetFixedPipelineState(const Maxwell& regs) {
- FixedPipelineState fixed_state;
- fixed_state.input_assembly = GetInputAssemblyState(regs);
- fixed_state.tessellation = GetTessellationState(regs);
- fixed_state.rasterizer = GetRasterizerState(regs);
- fixed_state.depth_stencil = GetDepthStencilState(regs);
- fixed_state.color_blending = GetColorBlendingState(regs);
- return fixed_state;
+Maxwell::Blend::Factor FixedPipelineState::UnpackBlendFactor(u32 packed) noexcept {
+ static constexpr std::array LUT = {
+ Maxwell::Blend::Factor::Zero,
+ Maxwell::Blend::Factor::One,
+ Maxwell::Blend::Factor::SourceColor,
+ Maxwell::Blend::Factor::OneMinusSourceColor,
+ Maxwell::Blend::Factor::SourceAlpha,
+ Maxwell::Blend::Factor::OneMinusSourceAlpha,
+ Maxwell::Blend::Factor::DestAlpha,
+ Maxwell::Blend::Factor::OneMinusDestAlpha,
+ Maxwell::Blend::Factor::DestColor,
+ Maxwell::Blend::Factor::OneMinusDestColor,
+ Maxwell::Blend::Factor::SourceAlphaSaturate,
+ Maxwell::Blend::Factor::Source1Color,
+ Maxwell::Blend::Factor::OneMinusSource1Color,
+ Maxwell::Blend::Factor::Source1Alpha,
+ Maxwell::Blend::Factor::OneMinusSource1Alpha,
+ Maxwell::Blend::Factor::ConstantColor,
+ Maxwell::Blend::Factor::OneMinusConstantColor,
+ Maxwell::Blend::Factor::ConstantAlpha,
+ Maxwell::Blend::Factor::OneMinusConstantAlpha,
+ };
+ return LUT[packed];
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 4c8ba7f90..8652067a7 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -7,6 +7,7 @@
#include <array>
#include <type_traits>
+#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
@@ -16,93 +17,48 @@ namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-// TODO(Rodrigo): Optimize this structure.
-
struct FixedPipelineState {
- using PixelFormat = VideoCore::Surface::PixelFormat;
-
- struct VertexBinding {
- constexpr VertexBinding(u32 index, u32 stride, u32 divisor)
- : index{index}, stride{stride}, divisor{divisor} {}
- VertexBinding() = default;
-
- u32 index;
- u32 stride;
- u32 divisor;
-
- std::size_t Hash() const noexcept;
+ static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept;
+ static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept;
- bool operator==(const VertexBinding& rhs) const noexcept;
-
- bool operator!=(const VertexBinding& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ static u32 PackStencilOp(Maxwell::StencilOp op) noexcept;
+ static Maxwell::StencilOp UnpackStencilOp(u32 packed) noexcept;
- struct VertexAttribute {
- constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type,
- Maxwell::VertexAttribute::Size size, u32 offset)
- : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {}
- VertexAttribute() = default;
-
- u32 index;
- u32 buffer;
- Maxwell::VertexAttribute::Type type;
- Maxwell::VertexAttribute::Size size;
- u32 offset;
-
- std::size_t Hash() const noexcept;
-
- bool operator==(const VertexAttribute& rhs) const noexcept;
-
- bool operator!=(const VertexAttribute& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ static u32 PackCullFace(Maxwell::CullFace cull) noexcept;
+ static Maxwell::CullFace UnpackCullFace(u32 packed) noexcept;
- struct StencilFace {
- constexpr StencilFace(Maxwell::StencilOp action_stencil_fail,
- Maxwell::StencilOp action_depth_fail,
- Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func)
- : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail},
- action_depth_pass{action_depth_pass}, test_func{test_func} {}
- StencilFace() = default;
+ static u32 PackFrontFace(Maxwell::FrontFace face) noexcept;
+ static Maxwell::FrontFace UnpackFrontFace(u32 packed) noexcept;
- Maxwell::StencilOp action_stencil_fail;
- Maxwell::StencilOp action_depth_fail;
- Maxwell::StencilOp action_depth_pass;
- Maxwell::ComparisonOp test_func;
+ static u32 PackPolygonMode(Maxwell::PolygonMode mode) noexcept;
+ static Maxwell::PolygonMode UnpackPolygonMode(u32 packed) noexcept;
- std::size_t Hash() const noexcept;
+ static u32 PackLogicOp(Maxwell::LogicOperation op) noexcept;
+ static Maxwell::LogicOperation UnpackLogicOp(u32 packed) noexcept;
- bool operator==(const StencilFace& rhs) const noexcept;
+ static u32 PackBlendEquation(Maxwell::Blend::Equation equation) noexcept;
+ static Maxwell::Blend::Equation UnpackBlendEquation(u32 packed) noexcept;
- bool operator!=(const StencilFace& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ static u32 PackBlendFactor(Maxwell::Blend::Factor factor) noexcept;
+ static Maxwell::Blend::Factor UnpackBlendFactor(u32 packed) noexcept;
struct BlendingAttachment {
- constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation,
- Maxwell::Blend::Factor src_rgb_func,
- Maxwell::Blend::Factor dst_rgb_func,
- Maxwell::Blend::Equation a_equation,
- Maxwell::Blend::Factor src_a_func,
- Maxwell::Blend::Factor dst_a_func,
- std::array<bool, 4> components)
- : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func},
- dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func},
- dst_a_func{dst_a_func}, components{components} {}
- BlendingAttachment() = default;
-
- bool enable;
- Maxwell::Blend::Equation rgb_equation;
- Maxwell::Blend::Factor src_rgb_func;
- Maxwell::Blend::Factor dst_rgb_func;
- Maxwell::Blend::Equation a_equation;
- Maxwell::Blend::Factor src_a_func;
- Maxwell::Blend::Factor dst_a_func;
- std::array<bool, 4> components;
+ union {
+ u32 raw;
+ BitField<0, 1, u32> mask_r;
+ BitField<1, 1, u32> mask_g;
+ BitField<2, 1, u32> mask_b;
+ BitField<3, 1, u32> mask_a;
+ BitField<4, 3, u32> equation_rgb;
+ BitField<7, 3, u32> equation_a;
+ BitField<10, 5, u32> factor_source_rgb;
+ BitField<15, 5, u32> factor_dest_rgb;
+ BitField<20, 5, u32> factor_source_a;
+ BitField<25, 5, u32> factor_dest_a;
+ BitField<30, 1, u32> enable;
+ };
+
+ void Fill(const Maxwell& regs, std::size_t index);
std::size_t Hash() const noexcept;
@@ -111,135 +67,178 @@ struct FixedPipelineState {
bool operator!=(const BlendingAttachment& rhs) const noexcept {
return !operator==(rhs);
}
- };
-
- struct VertexInput {
- std::size_t num_bindings = 0;
- std::size_t num_attributes = 0;
- std::array<VertexBinding, Maxwell::NumVertexArrays> bindings;
- std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
-
- std::size_t Hash() const noexcept;
- bool operator==(const VertexInput& rhs) const noexcept;
+ constexpr std::array<bool, 4> Mask() const noexcept {
+ return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
+ }
- bool operator!=(const VertexInput& rhs) const noexcept {
- return !operator==(rhs);
+ Maxwell::Blend::Equation EquationRGB() const noexcept {
+ return UnpackBlendEquation(equation_rgb.Value());
}
- };
- struct InputAssembly {
- constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable,
- float point_size)
- : topology{topology}, primitive_restart_enable{primitive_restart_enable},
- point_size{point_size} {}
- InputAssembly() = default;
+ Maxwell::Blend::Equation EquationAlpha() const noexcept {
+ return UnpackBlendEquation(equation_a.Value());
+ }
- Maxwell::PrimitiveTopology topology;
- bool primitive_restart_enable;
- float point_size;
+ Maxwell::Blend::Factor SourceRGBFactor() const noexcept {
+ return UnpackBlendFactor(factor_source_rgb.Value());
+ }
- std::size_t Hash() const noexcept;
+ Maxwell::Blend::Factor DestRGBFactor() const noexcept {
+ return UnpackBlendFactor(factor_dest_rgb.Value());
+ }
- bool operator==(const InputAssembly& rhs) const noexcept;
+ Maxwell::Blend::Factor SourceAlphaFactor() const noexcept {
+ return UnpackBlendFactor(factor_source_a.Value());
+ }
- bool operator!=(const InputAssembly& rhs) const noexcept {
- return !operator==(rhs);
+ Maxwell::Blend::Factor DestAlphaFactor() const noexcept {
+ return UnpackBlendFactor(factor_dest_a.Value());
}
};
- struct Tessellation {
- constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive,
- Maxwell::TessellationSpacing spacing, bool clockwise)
- : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing},
- clockwise{clockwise} {}
- Tessellation() = default;
-
- u32 patch_control_points;
- Maxwell::TessellationPrimitive primitive;
- Maxwell::TessellationSpacing spacing;
- bool clockwise;
-
- std::size_t Hash() const noexcept;
-
- bool operator==(const Tessellation& rhs) const noexcept;
+ struct VertexInput {
+ union Binding {
+ u16 raw;
+ BitField<0, 1, u16> enabled;
+ BitField<1, 12, u16> stride;
+ };
+
+ union Attribute {
+ u32 raw;
+ BitField<0, 1, u32> enabled;
+ BitField<1, 5, u32> buffer;
+ BitField<6, 14, u32> offset;
+ BitField<20, 3, u32> type;
+ BitField<23, 6, u32> size;
+
+ constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
+ return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
+ }
+
+ constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
+ return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
+ }
+ };
+
+ std::array<Binding, Maxwell::NumVertexArrays> bindings;
+ std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
+ std::array<Attribute, Maxwell::NumVertexAttributes> attributes;
+
+ void SetBinding(std::size_t index, bool enabled, u32 stride, u32 divisor) noexcept {
+ auto& binding = bindings[index];
+ binding.raw = 0;
+ binding.enabled.Assign(enabled ? 1 : 0);
+ binding.stride.Assign(static_cast<u16>(stride));
+ binding_divisors[index] = divisor;
+ }
- bool operator!=(const Tessellation& rhs) const noexcept {
- return !operator==(rhs);
+ void SetAttribute(std::size_t index, bool enabled, u32 buffer, u32 offset,
+ Maxwell::VertexAttribute::Type type,
+ Maxwell::VertexAttribute::Size size) noexcept {
+ auto& attribute = attributes[index];
+ attribute.raw = 0;
+ attribute.enabled.Assign(enabled ? 1 : 0);
+ attribute.buffer.Assign(buffer);
+ attribute.offset.Assign(offset);
+ attribute.type.Assign(static_cast<u32>(type));
+ attribute.size.Assign(static_cast<u32>(size));
}
};
struct Rasterizer {
- constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool depth_clamp_enable,
- bool ndc_minus_one_to_one, Maxwell::CullFace cull_face,
- Maxwell::FrontFace front_face)
- : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable},
- depth_clamp_enable{depth_clamp_enable}, ndc_minus_one_to_one{ndc_minus_one_to_one},
- cull_face{cull_face}, front_face{front_face} {}
- Rasterizer() = default;
-
- bool cull_enable;
- bool depth_bias_enable;
- bool depth_clamp_enable;
- bool ndc_minus_one_to_one;
- Maxwell::CullFace cull_face;
- Maxwell::FrontFace front_face;
-
- std::size_t Hash() const noexcept;
+ union {
+ u32 raw;
+ BitField<0, 4, u32> topology;
+ BitField<4, 1, u32> primitive_restart_enable;
+ BitField<5, 1, u32> cull_enable;
+ BitField<6, 1, u32> depth_bias_enable;
+ BitField<7, 1, u32> depth_clamp_disabled;
+ BitField<8, 1, u32> ndc_minus_one_to_one;
+ BitField<9, 2, u32> cull_face;
+ BitField<11, 1, u32> front_face;
+ BitField<12, 2, u32> polygon_mode;
+ BitField<14, 5, u32> patch_control_points_minus_one;
+ BitField<19, 2, u32> tessellation_primitive;
+ BitField<21, 2, u32> tessellation_spacing;
+ BitField<23, 1, u32> tessellation_clockwise;
+ BitField<24, 1, u32> logic_op_enable;
+ BitField<25, 4, u32> logic_op;
+ };
+
+ // TODO(Rodrigo): Move this to push constants
+ u32 point_size;
+
+ void Fill(const Maxwell& regs) noexcept;
+
+ constexpr Maxwell::PrimitiveTopology Topology() const noexcept {
+ return static_cast<Maxwell::PrimitiveTopology>(topology.Value());
+ }
- bool operator==(const Rasterizer& rhs) const noexcept;
+ Maxwell::CullFace CullFace() const noexcept {
+ return UnpackCullFace(cull_face.Value());
+ }
- bool operator!=(const Rasterizer& rhs) const noexcept {
- return !operator==(rhs);
+ Maxwell::FrontFace FrontFace() const noexcept {
+ return UnpackFrontFace(front_face.Value());
}
};
struct DepthStencil {
- constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable,
- bool depth_bounds_enable, bool stencil_enable,
- Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil,
- StencilFace back_stencil)
- : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable},
- depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable},
- depth_test_function{depth_test_function}, front_stencil{front_stencil},
- back_stencil{back_stencil} {}
- DepthStencil() = default;
-
- bool depth_test_enable;
- bool depth_write_enable;
- bool depth_bounds_enable;
- bool stencil_enable;
- Maxwell::ComparisonOp depth_test_function;
- StencilFace front_stencil;
- StencilFace back_stencil;
-
- std::size_t Hash() const noexcept;
-
- bool operator==(const DepthStencil& rhs) const noexcept;
-
- bool operator!=(const DepthStencil& rhs) const noexcept {
- return !operator==(rhs);
+ template <std::size_t Position>
+ union StencilFace {
+ BitField<Position + 0, 3, u32> action_stencil_fail;
+ BitField<Position + 3, 3, u32> action_depth_fail;
+ BitField<Position + 6, 3, u32> action_depth_pass;
+ BitField<Position + 9, 3, u32> test_func;
+
+ Maxwell::StencilOp ActionStencilFail() const noexcept {
+ return UnpackStencilOp(action_stencil_fail);
+ }
+
+ Maxwell::StencilOp ActionDepthFail() const noexcept {
+ return UnpackStencilOp(action_depth_fail);
+ }
+
+ Maxwell::StencilOp ActionDepthPass() const noexcept {
+ return UnpackStencilOp(action_depth_pass);
+ }
+
+ Maxwell::ComparisonOp TestFunc() const noexcept {
+ return UnpackComparisonOp(test_func);
+ }
+ };
+
+ union {
+ u32 raw;
+ StencilFace<0> front;
+ StencilFace<12> back;
+ BitField<24, 1, u32> depth_test_enable;
+ BitField<25, 1, u32> depth_write_enable;
+ BitField<26, 1, u32> depth_bounds_enable;
+ BitField<27, 1, u32> stencil_enable;
+ BitField<28, 3, u32> depth_test_func;
+ };
+
+ void Fill(const Maxwell& regs) noexcept;
+
+ Maxwell::ComparisonOp DepthTestFunc() const noexcept {
+ return UnpackComparisonOp(depth_test_func);
}
};
struct ColorBlending {
- constexpr ColorBlending(
- std::array<float, 4> blend_constants, std::size_t attachments_count,
- std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments)
- : attachments_count{attachments_count}, attachments{attachments} {}
- ColorBlending() = default;
-
- std::size_t attachments_count;
std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
- std::size_t Hash() const noexcept;
+ void Fill(const Maxwell& regs) noexcept;
+ };
- bool operator==(const ColorBlending& rhs) const noexcept;
+ VertexInput vertex_input;
+ Rasterizer rasterizer;
+ DepthStencil depth_stencil;
+ ColorBlending color_blending;
- bool operator!=(const ColorBlending& rhs) const noexcept {
- return !operator==(rhs);
- }
- };
+ void Fill(const Maxwell& regs);
std::size_t Hash() const noexcept;
@@ -248,27 +247,10 @@ struct FixedPipelineState {
bool operator!=(const FixedPipelineState& rhs) const noexcept {
return !operator==(rhs);
}
-
- VertexInput vertex_input;
- InputAssembly input_assembly;
- Tessellation tessellation;
- Rasterizer rasterizer;
- DepthStencil depth_stencil;
- ColorBlending color_blending;
};
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>);
-static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>);
+static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
static_assert(std::is_trivially_copyable_v<FixedPipelineState>);
-
-FixedPipelineState GetFixedPipelineState(const Maxwell& regs);
+static_assert(std::is_trivially_constructible_v<FixedPipelineState>);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp
new file mode 100644
index 000000000..435c8c1b8
--- /dev/null
+++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp
@@ -0,0 +1,220 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#ifdef HAS_NSIGHT_AFTERMATH
+
+#include <mutex>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+#include <fmt/format.h>
+
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+
+#include <GFSDK_Aftermath.h>
+#include <GFSDK_Aftermath_Defines.h>
+#include <GFSDK_Aftermath_GpuCrashDump.h>
+#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
+
+#include "common/common_paths.h"
+#include "common/common_types.h"
+#include "common/file_util.h"
+#include "common/logging/log.h"
+#include "common/scope_exit.h"
+
+#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
+
+namespace Vulkan {
+
+static constexpr char AFTERMATH_LIB_NAME[] = "GFSDK_Aftermath_Lib.x64.dll";
+
+NsightAftermathTracker::NsightAftermathTracker() = default;
+
+NsightAftermathTracker::~NsightAftermathTracker() {
+ if (initialized) {
+ (void)GFSDK_Aftermath_DisableGpuCrashDumps();
+ }
+}
+
+bool NsightAftermathTracker::Initialize() {
+ if (!dl.Open(AFTERMATH_LIB_NAME)) {
+ LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath DLL");
+ return false;
+ }
+
+ if (!dl.GetSymbol("GFSDK_Aftermath_DisableGpuCrashDumps",
+ &GFSDK_Aftermath_DisableGpuCrashDumps) ||
+ !dl.GetSymbol("GFSDK_Aftermath_EnableGpuCrashDumps",
+ &GFSDK_Aftermath_EnableGpuCrashDumps) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GetShaderDebugInfoIdentifier",
+ &GFSDK_Aftermath_GetShaderDebugInfoIdentifier) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GetShaderHashSpirv", &GFSDK_Aftermath_GetShaderHashSpirv) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_CreateDecoder",
+ &GFSDK_Aftermath_GpuCrashDump_CreateDecoder) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_DestroyDecoder",
+ &GFSDK_Aftermath_GpuCrashDump_DestroyDecoder) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GenerateJSON",
+ &GFSDK_Aftermath_GpuCrashDump_GenerateJSON) ||
+ !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GetJSON",
+ &GFSDK_Aftermath_GpuCrashDump_GetJSON)) {
+ LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath function pointers");
+ return false;
+ }
+
+ dump_dir = FileUtil::GetUserPath(FileUtil::UserPath::LogDir) + "gpucrash";
+
+ (void)FileUtil::DeleteDirRecursively(dump_dir);
+ if (!FileUtil::CreateDir(dump_dir)) {
+ LOG_ERROR(Render_Vulkan, "Failed to create Nsight Aftermath dump directory");
+ return false;
+ }
+
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_EnableGpuCrashDumps(
+ GFSDK_Aftermath_Version_API, GFSDK_Aftermath_GpuCrashDumpWatchedApiFlags_Vulkan,
+ GFSDK_Aftermath_GpuCrashDumpFeatureFlags_Default, GpuCrashDumpCallback,
+ ShaderDebugInfoCallback, CrashDumpDescriptionCallback, this))) {
+ LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_EnableGpuCrashDumps failed");
+ return false;
+ }
+
+ LOG_INFO(Render_Vulkan, "Nsight Aftermath dump directory is \"{}\"", dump_dir);
+
+ initialized = true;
+ return true;
+}
+
+void NsightAftermathTracker::SaveShader(const std::vector<u32>& spirv) const {
+ if (!initialized) {
+ return;
+ }
+
+ std::vector<u32> spirv_copy = spirv;
+ GFSDK_Aftermath_SpirvCode shader;
+ shader.pData = spirv_copy.data();
+ shader.size = static_cast<u32>(spirv_copy.size() * 4);
+
+ std::scoped_lock lock{mutex};
+
+ GFSDK_Aftermath_ShaderHash hash;
+ if (!GFSDK_Aftermath_SUCCEED(
+ GFSDK_Aftermath_GetShaderHashSpirv(GFSDK_Aftermath_Version_API, &shader, &hash))) {
+ LOG_ERROR(Render_Vulkan, "Failed to hash SPIR-V module");
+ return;
+ }
+
+ FileUtil::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb");
+ if (!file.IsOpen()) {
+ LOG_ERROR(Render_Vulkan, "Failed to dump SPIR-V module with hash={:016x}", hash.hash);
+ return;
+ }
+ if (file.WriteArray(spirv.data(), spirv.size()) != spirv.size()) {
+ LOG_ERROR(Render_Vulkan, "Failed to write SPIR-V module with hash={:016x}", hash.hash);
+ return;
+ }
+}
+
+void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump,
+ u32 gpu_crash_dump_size) {
+ std::scoped_lock lock{mutex};
+
+ LOG_CRITICAL(Render_Vulkan, "called");
+
+ GFSDK_Aftermath_GpuCrashDump_Decoder decoder;
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_CreateDecoder(
+ GFSDK_Aftermath_Version_API, gpu_crash_dump, gpu_crash_dump_size, &decoder))) {
+ LOG_ERROR(Render_Vulkan, "Failed to create decoder");
+ return;
+ }
+ SCOPE_EXIT({ GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder); });
+
+ u32 json_size = 0;
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON(
+ decoder, GFSDK_Aftermath_GpuCrashDumpDecoderFlags_ALL_INFO,
+ GFSDK_Aftermath_GpuCrashDumpFormatterFlags_NONE, nullptr, nullptr, nullptr, nullptr,
+ this, &json_size))) {
+ LOG_ERROR(Render_Vulkan, "Failed to generate JSON");
+ return;
+ }
+ std::vector<char> json(json_size);
+ if (!GFSDK_Aftermath_SUCCEED(
+ GFSDK_Aftermath_GpuCrashDump_GetJSON(decoder, json_size, json.data()))) {
+ LOG_ERROR(Render_Vulkan, "Failed to query JSON");
+ return;
+ }
+
+ const std::string base_name = [this] {
+ const int id = dump_id++;
+ if (id == 0) {
+ return fmt::format("{}/crash.nv-gpudmp", dump_dir);
+ } else {
+ return fmt::format("{}/crash_{}.nv-gpudmp", dump_dir, id);
+ }
+ }();
+
+ std::string_view dump_view(static_cast<const char*>(gpu_crash_dump), gpu_crash_dump_size);
+ if (FileUtil::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) {
+ LOG_ERROR(Render_Vulkan, "Failed to write dump file");
+ return;
+ }
+ const std::string_view json_view(json.data(), json.size());
+ if (FileUtil::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) {
+ LOG_ERROR(Render_Vulkan, "Failed to write JSON");
+ return;
+ }
+}
+
+void NsightAftermathTracker::OnShaderDebugInfoCallback(const void* shader_debug_info,
+ u32 shader_debug_info_size) {
+ std::scoped_lock lock{mutex};
+
+ GFSDK_Aftermath_ShaderDebugInfoIdentifier identifier;
+ if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GetShaderDebugInfoIdentifier(
+ GFSDK_Aftermath_Version_API, shader_debug_info, shader_debug_info_size, &identifier))) {
+ LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_GetShaderDebugInfoIdentifier failed");
+ return;
+ }
+
+ const std::string path =
+ fmt::format("{}/shader_{:016x}{:016x}.nvdbg", dump_dir, identifier.id[0], identifier.id[1]);
+ FileUtil::IOFile file(path, "wb");
+ if (!file.IsOpen()) {
+ LOG_ERROR(Render_Vulkan, "Failed to create file {}", path);
+ return;
+ }
+ if (file.WriteBytes(static_cast<const u8*>(shader_debug_info), shader_debug_info_size) !=
+ shader_debug_info_size) {
+ LOG_ERROR(Render_Vulkan, "Failed to write file {}", path);
+ return;
+ }
+}
+
+void NsightAftermathTracker::OnCrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description) {
+ add_description(GFSDK_Aftermath_GpuCrashDumpDescriptionKey_ApplicationName, "yuzu");
+}
+
+void NsightAftermathTracker::GpuCrashDumpCallback(const void* gpu_crash_dump,
+ u32 gpu_crash_dump_size, void* user_data) {
+ static_cast<NsightAftermathTracker*>(user_data)->OnGpuCrashDumpCallback(gpu_crash_dump,
+ gpu_crash_dump_size);
+}
+
+void NsightAftermathTracker::ShaderDebugInfoCallback(const void* shader_debug_info,
+ u32 shader_debug_info_size, void* user_data) {
+ static_cast<NsightAftermathTracker*>(user_data)->OnShaderDebugInfoCallback(
+ shader_debug_info, shader_debug_info_size);
+}
+
+void NsightAftermathTracker::CrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data) {
+ static_cast<NsightAftermathTracker*>(user_data)->OnCrashDumpDescriptionCallback(
+ add_description);
+}
+
+} // namespace Vulkan
+
+#endif // HAS_NSIGHT_AFTERMATH
diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h
new file mode 100644
index 000000000..afe7ae99e
--- /dev/null
+++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h
@@ -0,0 +1,87 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <mutex>
+#include <string>
+#include <vector>
+
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+
+#ifdef HAS_NSIGHT_AFTERMATH
+#include <GFSDK_Aftermath_Defines.h>
+#include <GFSDK_Aftermath_GpuCrashDump.h>
+#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
+#endif
+
+#include "common/common_types.h"
+#include "common/dynamic_library.h"
+
+namespace Vulkan {
+
+class NsightAftermathTracker {
+public:
+ NsightAftermathTracker();
+ ~NsightAftermathTracker();
+
+ NsightAftermathTracker(const NsightAftermathTracker&) = delete;
+ NsightAftermathTracker& operator=(const NsightAftermathTracker&) = delete;
+
+ // Delete move semantics because Aftermath initialization uses a pointer to this.
+ NsightAftermathTracker(NsightAftermathTracker&&) = delete;
+ NsightAftermathTracker& operator=(NsightAftermathTracker&&) = delete;
+
+ bool Initialize();
+
+ void SaveShader(const std::vector<u32>& spirv) const;
+
+private:
+#ifdef HAS_NSIGHT_AFTERMATH
+ static void GpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size,
+ void* user_data);
+
+ static void ShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size,
+ void* user_data);
+
+ static void CrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data);
+
+ void OnGpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size);
+
+ void OnShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size);
+
+ void OnCrashDumpDescriptionCallback(
+ PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description);
+
+ mutable std::mutex mutex;
+
+ std::string dump_dir;
+ int dump_id = 0;
+
+ bool initialized = false;
+
+ Common::DynamicLibrary dl;
+ PFN_GFSDK_Aftermath_DisableGpuCrashDumps GFSDK_Aftermath_DisableGpuCrashDumps;
+ PFN_GFSDK_Aftermath_EnableGpuCrashDumps GFSDK_Aftermath_EnableGpuCrashDumps;
+ PFN_GFSDK_Aftermath_GetShaderDebugInfoIdentifier GFSDK_Aftermath_GetShaderDebugInfoIdentifier;
+ PFN_GFSDK_Aftermath_GetShaderHashSpirv GFSDK_Aftermath_GetShaderHashSpirv;
+ PFN_GFSDK_Aftermath_GpuCrashDump_CreateDecoder GFSDK_Aftermath_GpuCrashDump_CreateDecoder;
+ PFN_GFSDK_Aftermath_GpuCrashDump_DestroyDecoder GFSDK_Aftermath_GpuCrashDump_DestroyDecoder;
+ PFN_GFSDK_Aftermath_GpuCrashDump_GenerateJSON GFSDK_Aftermath_GpuCrashDump_GenerateJSON;
+ PFN_GFSDK_Aftermath_GpuCrashDump_GetJSON GFSDK_Aftermath_GpuCrashDump_GetJSON;
+#endif
+};
+
+#ifndef HAS_NSIGHT_AFTERMATH
+inline NsightAftermathTracker::NsightAftermathTracker() = default;
+inline NsightAftermathTracker::~NsightAftermathTracker() = default;
+inline bool NsightAftermathTracker::Initialize() {
+ return false;
+}
+inline void NsightAftermathTracker::SaveShader(const std::vector<u32>&) const {}
+#endif
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 04532f8f8..59b441943 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -12,15 +12,12 @@
#include <fmt/format.h>
-#include "common/assert.h"
#include "common/dynamic_library.h"
#include "common/logging/log.h"
#include "common/telemetry.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/frontend/emu_window.h"
-#include "core/memory.h"
-#include "core/perf_stats.h"
#include "core/settings.h"
#include "core/telemetry_session.h"
#include "video_core/gpu.h"
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 18270909b..522b5bff8 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -5,7 +5,6 @@
#pragma once
#include <memory>
-#include <optional>
#include <string>
#include <vector>
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 5eb544aea..243640fab 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -4,7 +4,6 @@
#pragma once
-#include <array>
#include <memory>
#include <tuple>
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 81e1de2be..5b494da8c 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -5,11 +5,7 @@
#include <algorithm>
#include <cstring>
#include <memory>
-#include <optional>
-#include <tuple>
-#include "common/assert.h"
-#include "common/bit_util.h"
#include "core/core.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3cd2e2774..a54583e7d 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -5,14 +5,11 @@
#pragma once
#include <memory>
-#include <unordered_map>
-#include <vector>
#include "common/common_types.h"
#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
@@ -55,8 +52,6 @@ public:
protected:
VkBuffer ToHandle(const Buffer& buffer) override;
- void WriteBarrier() override {}
-
Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 7b0268033..da71e710c 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -6,7 +6,7 @@
#include <memory>
#include <optional>
#include <utility>
-#include <vector>
+
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 26bf834de..230b526bc 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -6,7 +6,7 @@
#include <optional>
#include <utility>
-#include <vector>
+
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 23beafa4f..8e1b46277 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -2,14 +2,12 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <memory>
#include <vector>
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
@@ -105,6 +103,8 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat
}
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
+ device.SaveShader(code);
+
VkShaderModuleCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
ci.pNext = nullptr;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 33b9af29e..6e2f22a4a 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -4,8 +4,6 @@
#pragma once
-#include <memory>
-
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index e9d528aa6..890fd52cf 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <memory>
#include <vector>
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index ab40c70f0..9efa66bef 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -4,10 +4,8 @@
#pragma once
-#include <memory>
#include <vector>
-#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 52d29e49d..0e4bbca97 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -4,11 +4,11 @@
#include <bitset>
#include <chrono>
-#include <cstdlib>
#include <optional>
#include <string_view>
#include <thread>
#include <unordered_set>
+#include <utility>
#include <vector>
#include "common/assert.h"
@@ -167,6 +167,7 @@ bool VKDevice::Create() {
VkPhysicalDeviceFeatures2 features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
features2.pNext = nullptr;
+ const void* first_next = &features2;
void** next = &features2.pNext;
auto& features = features2.features;
@@ -296,7 +297,19 @@ bool VKDevice::Create() {
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
}
- logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld);
+ VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv;
+ if (nv_device_diagnostics_config) {
+ nsight_aftermath_tracker.Initialize();
+
+ diagnostics_nv.sType = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV;
+ diagnostics_nv.pNext = &features2;
+ diagnostics_nv.flags = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV |
+ VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV |
+ VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV;
+ first_next = &diagnostics_nv;
+ }
+
+ logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld);
if (!logical) {
LOG_ERROR(Render_Vulkan, "Failed to create logical device");
return false;
@@ -344,17 +357,12 @@ VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFla
void VKDevice::ReportLoss() const {
LOG_CRITICAL(Render_Vulkan, "Device loss occured!");
- // Wait some time to let the log flush
- std::this_thread::sleep_for(std::chrono::seconds{1});
-
- if (!nv_device_diagnostic_checkpoints) {
- return;
- }
+ // Wait for the log to flush and for Nsight Aftermath to dump the results
+ std::this_thread::sleep_for(std::chrono::seconds{3});
+}
- [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld);
- // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be
- // executed. It can be done on a debugger by evaluating the expression:
- // *(VKGraphicsPipeline*)data[0]
+void VKDevice::SaveShader(const std::vector<u32>& spirv) const {
+ nsight_aftermath_tracker.SaveShader(spirv);
}
bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
@@ -527,8 +535,8 @@ std::vector<const char*> VKDevice::LoadExtensions() {
Test(extension, has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME,
false);
if (Settings::values.renderer_debug) {
- Test(extension, nv_device_diagnostic_checkpoints,
- VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true);
+ Test(extension, nv_device_diagnostics_config,
+ VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME, true);
}
}
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index 60d64572a..c8640762d 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -10,6 +10,7 @@
#include <vector>
#include "common/common_types.h"
+#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -43,6 +44,9 @@ public:
/// Reports a device loss.
void ReportLoss() const;
+ /// Reports a shader to Nsight Aftermath.
+ void SaveShader(const std::vector<u32>& spirv) const;
+
/// Returns the dispatch loader with direct function pointers of the device.
const vk::DeviceDispatch& GetDispatchLoader() const {
return dld;
@@ -78,11 +82,6 @@ public:
return present_family;
}
- /// Returns true if the device is integrated with the host CPU.
- bool IsIntegrated() const {
- return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
- }
-
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
u32 GetApiVersion() const {
return properties.apiVersion;
@@ -173,11 +172,6 @@ public:
return ext_transform_feedback;
}
- /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints.
- bool IsNvDeviceDiagnosticCheckpoints() const {
- return nv_device_diagnostic_checkpoints;
- }
-
/// Returns the vendor name reported from Vulkan.
std::string_view GetVendorName() const {
return vendor_name;
@@ -233,7 +227,7 @@ private:
bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback.
- bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints.
+ bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
// Telemetry parameters
std::string vendor_name; ///< Device's driver name.
@@ -241,6 +235,9 @@ private:
/// Format properties dictionary.
std::unordered_map<VkFormat, VkFormatProperties> format_properties;
+
+ /// Nsight Aftermath GPU crash tracker
+ NsightAftermathTracker nsight_aftermath_tracker;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
new file mode 100644
index 000000000..a02be5487
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -0,0 +1,101 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <memory>
+#include <thread>
+
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_fence_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Vulkan {
+
+InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(payload, is_stubbed), device{device}, scheduler{scheduler} {}
+
+InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address,
+ u32 payload, bool is_stubbed)
+ : VideoCommon::FenceBase(address, payload, is_stubbed), device{device}, scheduler{scheduler} {}
+
+InnerFence::~InnerFence() = default;
+
+void InnerFence::Queue() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(!event);
+
+ event = device.GetLogical().CreateEvent();
+ ticks = scheduler.Ticks();
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+ });
+}
+
+bool InnerFence::IsSignaled() const {
+ if (is_stubbed) {
+ return true;
+ }
+ ASSERT(event);
+ return IsEventSignalled();
+}
+
+void InnerFence::Wait() {
+ if (is_stubbed) {
+ return;
+ }
+ ASSERT(event);
+
+ if (ticks >= scheduler.Ticks()) {
+ scheduler.Flush();
+ }
+ while (!IsEventSignalled()) {
+ std::this_thread::yield();
+ }
+}
+
+bool InnerFence::IsEventSignalled() const {
+ switch (const VkResult result = event.GetStatus()) {
+ case VK_EVENT_SET:
+ return true;
+ case VK_EVENT_RESET:
+ return false;
+ default:
+ throw vk::Exception(result);
+ }
+}
+
+VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ const VKDevice& device, VKScheduler& scheduler,
+ VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
+ VKQueryCache& query_cache)
+ : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache),
+ device{device}, scheduler{scheduler} {}
+
+Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
+ return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed);
+}
+
+Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
+ return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed);
+}
+
+void VKFenceManager::QueueFence(Fence& fence) {
+ fence->Queue();
+}
+
+bool VKFenceManager::IsFenceSignaled(Fence& fence) const {
+ return fence->IsSignaled();
+}
+
+void VKFenceManager::WaitFence(Fence& fence) {
+ fence->Wait();
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
new file mode 100644
index 000000000..04d07fe6a
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -0,0 +1,74 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+
+#include "video_core/fence_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Core {
+class System;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Vulkan {
+
+class VKBufferCache;
+class VKDevice;
+class VKQueryCache;
+class VKScheduler;
+class VKTextureCache;
+
+class InnerFence : public VideoCommon::FenceBase {
+public:
+ explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload,
+ bool is_stubbed);
+ explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address,
+ u32 payload, bool is_stubbed);
+ ~InnerFence();
+
+ void Queue();
+
+ bool IsSignaled() const;
+
+ void Wait();
+
+private:
+ bool IsEventSignalled() const;
+
+ const VKDevice& device;
+ VKScheduler& scheduler;
+ vk::Event event;
+ u64 ticks = 0;
+};
+using Fence = std::shared_ptr<InnerFence>;
+
+using GenericFenceManager =
+ VideoCommon::FenceManager<Fence, VKTextureCache, VKBufferCache, VKQueryCache>;
+
+class VKFenceManager final : public GenericFenceManager {
+public:
+ explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ const VKDevice& device, VKScheduler& scheduler,
+ VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
+ VKQueryCache& query_cache);
+
+protected:
+ Fence CreateFence(u32 value, bool is_stubbed) override;
+ Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ void QueueFence(Fence& fence) override;
+ bool IsFenceSignaled(Fence& fence) const override;
+ void WaitFence(Fence& fence) override;
+
+private:
+ const VKDevice& device;
+ VKScheduler& scheduler;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index b540b838d..1ac981974 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -6,7 +6,6 @@
#include <cstring>
#include <vector>
-#include "common/assert.h"
#include "common/common_types.h"
#include "common/microprofile.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -26,12 +25,13 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
namespace {
-VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
+template <class StencilFace>
+VkStencilOpState GetStencilFaceState(const StencilFace& face) {
VkStencilOpState state;
- state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail);
- state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass);
- state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail);
- state.compareOp = MaxwellToVK::ComparisonOp(face.test_func);
+ state.failOp = MaxwellToVK::StencilOp(face.ActionStencilFail());
+ state.passOp = MaxwellToVK::StencilOp(face.ActionDepthPass());
+ state.depthFailOp = MaxwellToVK::StencilOp(face.ActionDepthFail());
+ state.compareOp = MaxwellToVK::ComparisonOp(face.TestFunc());
state.compareMask = 0;
state.writeMask = 0;
state.reference = 0;
@@ -147,6 +147,8 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
continue;
}
+ device.SaveShader(stage->code);
+
ci.codeSize = stage->code.size() * sizeof(u32);
ci.pCode = stage->code.data();
modules.push_back(device.GetLogical().CreateShaderModule(ci));
@@ -157,43 +159,47 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
const SPIRVProgram& program) const {
const auto& vi = fixed_state.vertex_input;
- const auto& ia = fixed_state.input_assembly;
const auto& ds = fixed_state.depth_stencil;
const auto& cd = fixed_state.color_blending;
- const auto& ts = fixed_state.tessellation;
const auto& rs = fixed_state.rasterizer;
std::vector<VkVertexInputBindingDescription> vertex_bindings;
std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
- for (std::size_t i = 0; i < vi.num_bindings; ++i) {
- const auto& binding = vi.bindings[i];
- const bool instanced = binding.divisor != 0;
+ for (std::size_t index = 0; index < std::size(vi.bindings); ++index) {
+ const auto& binding = vi.bindings[index];
+ if (!binding.enabled) {
+ continue;
+ }
+ const bool instanced = vi.binding_divisors[index] != 0;
const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
auto& vertex_binding = vertex_bindings.emplace_back();
- vertex_binding.binding = binding.index;
+ vertex_binding.binding = static_cast<u32>(index);
vertex_binding.stride = binding.stride;
vertex_binding.inputRate = rate;
if (instanced) {
auto& binding_divisor = vertex_binding_divisors.emplace_back();
- binding_divisor.binding = binding.index;
- binding_divisor.divisor = binding.divisor;
+ binding_divisor.binding = static_cast<u32>(index);
+ binding_divisor.divisor = vi.binding_divisors[index];
}
}
std::vector<VkVertexInputAttributeDescription> vertex_attributes;
const auto& input_attributes = program[0]->entries.attributes;
- for (std::size_t i = 0; i < vi.num_attributes; ++i) {
- const auto& attribute = vi.attributes[i];
- if (input_attributes.find(attribute.index) == input_attributes.end()) {
+ for (std::size_t index = 0; index < std::size(vi.attributes); ++index) {
+ const auto& attribute = vi.attributes[index];
+ if (!attribute.enabled) {
+ continue;
+ }
+ if (input_attributes.find(static_cast<u32>(index)) == input_attributes.end()) {
// Skip attributes not used by the vertex shaders.
continue;
}
auto& vertex_attribute = vertex_attributes.emplace_back();
- vertex_attribute.location = attribute.index;
+ vertex_attribute.location = static_cast<u32>(index);
vertex_attribute.binding = attribute.buffer;
- vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size);
+ vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size());
vertex_attribute.offset = attribute.offset;
}
@@ -219,15 +225,15 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_ci.pNext = nullptr;
input_assembly_ci.flags = 0;
- input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
+ input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, rs.Topology());
input_assembly_ci.primitiveRestartEnable =
- ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology);
+ rs.primitive_restart_enable != 0 && SupportsPrimitiveRestart(input_assembly_ci.topology);
VkPipelineTessellationStateCreateInfo tessellation_ci;
tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
tessellation_ci.pNext = nullptr;
tessellation_ci.flags = 0;
- tessellation_ci.patchControlPoints = ts.patch_control_points;
+ tessellation_ci.patchControlPoints = rs.patch_control_points_minus_one.Value() + 1;
VkPipelineViewportStateCreateInfo viewport_ci;
viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
@@ -242,12 +248,12 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_ci.pNext = nullptr;
rasterization_ci.flags = 0;
- rasterization_ci.depthClampEnable = rs.depth_clamp_enable;
+ rasterization_ci.depthClampEnable = rs.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE;
rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_ci.cullMode =
- rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE;
- rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face);
+ rs.cull_enable ? MaxwellToVK::CullFace(rs.CullFace()) : VK_CULL_MODE_NONE;
+ rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.FrontFace());
rasterization_ci.depthBiasEnable = rs.depth_bias_enable;
rasterization_ci.depthBiasConstantFactor = 0.0f;
rasterization_ci.depthBiasClamp = 0.0f;
@@ -271,40 +277,38 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
depth_stencil_ci.flags = 0;
depth_stencil_ci.depthTestEnable = ds.depth_test_enable;
depth_stencil_ci.depthWriteEnable = ds.depth_write_enable;
- depth_stencil_ci.depthCompareOp = ds.depth_test_enable
- ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
- : VK_COMPARE_OP_ALWAYS;
+ depth_stencil_ci.depthCompareOp =
+ ds.depth_test_enable ? MaxwellToVK::ComparisonOp(ds.DepthTestFunc()) : VK_COMPARE_OP_ALWAYS;
depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable;
depth_stencil_ci.stencilTestEnable = ds.stencil_enable;
- depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil);
- depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil);
+ depth_stencil_ci.front = GetStencilFaceState(ds.front);
+ depth_stencil_ci.back = GetStencilFaceState(ds.back);
depth_stencil_ci.minDepthBounds = 0.0f;
depth_stencil_ci.maxDepthBounds = 0.0f;
std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
- const std::size_t num_attachments =
- std::min(cd.attachments_count, renderpass_params.color_attachments.size());
- for (std::size_t i = 0; i < num_attachments; ++i) {
- static constexpr std::array component_table = {
+ const auto num_attachments = static_cast<std::size_t>(renderpass_params.num_color_attachments);
+ for (std::size_t index = 0; index < num_attachments; ++index) {
+ static constexpr std::array COMPONENT_TABLE = {
VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
VK_COLOR_COMPONENT_A_BIT};
- const auto& blend = cd.attachments[i];
+ const auto& blend = cd.attachments[index];
VkColorComponentFlags color_components = 0;
- for (std::size_t j = 0; j < component_table.size(); ++j) {
- if (blend.components[j]) {
- color_components |= component_table[j];
+ for (std::size_t i = 0; i < COMPONENT_TABLE.size(); ++i) {
+ if (blend.Mask()[i]) {
+ color_components |= COMPONENT_TABLE[i];
}
}
- VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i];
- attachment.blendEnable = blend.enable;
- attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func);
- attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func);
- attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation);
- attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func);
- attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func);
- attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation);
+ VkPipelineColorBlendAttachmentState& attachment = cb_attachments[index];
+ attachment.blendEnable = blend.enable != 0;
+ attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor());
+ attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor());
+ attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB());
+ attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor());
+ attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor());
+ attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha());
attachment.colorWriteMask = color_components;
}
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 7aba70960..a1d699a6c 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -5,16 +5,13 @@
#pragma once
#include <array>
-#include <memory>
#include <optional>
-#include <unordered_map>
#include <vector>
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 6a9e658bf..b4c650a63 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -118,8 +118,7 @@ private:
};
VKMemoryManager::VKMemoryManager(const VKDevice& device)
- : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
- is_memory_unified{GetMemoryUnified(properties)} {}
+ : device{device}, properties{device.GetPhysical().GetMemoryProperties()} {}
VKMemoryManager::~VKMemoryManager() = default;
@@ -209,16 +208,6 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
return {};
}
-bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
- for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
- if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
- // Memory is considered unified when heaps are device local only.
- return false;
- }
- }
- return true;
-}
-
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
const vk::DeviceMemory& memory, u64 begin, u64 end)
: device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 5b6858e9b..1af88e3d4 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -40,11 +40,6 @@ public:
/// Commits memory required by the image and binds it.
VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
- /// Returns true if the memory allocations are done always in host visible and coherent memory.
- bool IsMemoryUnified() const {
- return is_memory_unified;
- }
-
private:
/// Allocates a chunk of memory.
bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
@@ -53,12 +48,8 @@ private:
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags wanted_properties);
- /// Returns true if the device uses an unified memory model.
- static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
-
- const VKDevice& device; ///< Device handler.
- const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
- const bool is_memory_unified; ///< True if memory model is unified.
+ const VKDevice& device; ///< Device handler.
+ const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
};
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 90e3a8edd..fe45ed269 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -22,17 +22,22 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/shader/compiler_settings.h"
+#include "video_core/shader/memory_util.h"
namespace Vulkan {
MICROPROFILE_DECLARE(Vulkan_PipelineCache);
using Tegra::Engines::ShaderType;
+using VideoCommon::Shader::GetShaderAddress;
+using VideoCommon::Shader::GetShaderCode;
+using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
+using VideoCommon::Shader::ProgramCode;
+using VideoCommon::Shader::STAGE_MAIN_OFFSET;
namespace {
@@ -45,60 +50,6 @@ constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
VideoCommon::Shader::CompileDepth::FullDecompile};
-/// Gets the address for the specified shader stage program
-GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
- const auto& gpu{system.GPU().Maxwell3D()};
- const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
- return gpu.regs.code_address.CodeAddress() + shader_config.offset;
-}
-
-/// Gets if the current instruction offset is a scheduler instruction
-constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
- // Sched instructions appear once every 4 instructions.
- constexpr std::size_t SchedPeriod = 4;
- const std::size_t absolute_offset = offset - main_offset;
- return (absolute_offset % SchedPeriod) == 0;
-}
-
-/// Calculates the size of a program stream
-std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
- const std::size_t start_offset = is_compute ? 0 : 10;
- // This is the encoded version of BRA that jumps to itself. All Nvidia
- // shaders end with one.
- constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
- constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
- std::size_t offset = start_offset;
- while (offset < program.size()) {
- const u64 instruction = program[offset];
- if (!IsSchedInstruction(offset, start_offset)) {
- if ((instruction & mask) == self_jumping_branch) {
- // End on Maxwell's "nop" instruction
- break;
- }
- if (instruction == 0) {
- break;
- }
- }
- ++offset;
- }
- // The last instruction is included in the program size
- return std::min(offset + 1, program.size());
-}
-
-/// Gets the shader program code from memory for the specified address
-ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
- const u8* host_ptr, bool is_compute) {
- ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
- ASSERT_OR_EXECUTE(host_ptr != nullptr, {
- std::fill(program_code.begin(), program_code.end(), 0);
- return program_code;
- });
- memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(),
- program_code.size() * sizeof(u64));
- program_code.resize(CalculateProgramSize(program_code, is_compute));
- return program_code;
-}
-
constexpr std::size_t GetStageFromProgram(std::size_t program) {
return program == 0 ? 0 : program - 1;
}
@@ -133,7 +84,7 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
u32 count = 1;
if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
// Combined image samplers can be arrayed.
- count = container[i].Size();
+ count = container[i].size;
}
VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
entry.binding = binding++;
@@ -161,6 +112,24 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
} // Anonymous namespace
+std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
+std::size_t ComputePipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage,
GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code,
u32 main_offset)
@@ -207,18 +176,22 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
const GPUVAddr program_addr{GetShaderAddress(system, program)};
const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
ASSERT(cpu_addr);
- auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr;
+ auto shader = cpu_addr ? TryGet(*cpu_addr) : null_shader;
if (!shader) {
const auto host_ptr{memory_manager.GetPointer(program_addr)};
// No shader found - create a new one
- constexpr u32 stage_offset = 10;
+ constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1);
- auto code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
+ ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr,
std::move(code), stage_offset);
- Register(shader);
+ if (cpu_addr) {
+ Register(shader);
+ } else {
+ null_shader = shader;
+ }
}
shaders[index] = std::move(shader);
}
@@ -261,17 +234,20 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
ASSERT(cpu_addr);
- auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr;
+ auto shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel;
if (!shader) {
// No shader found - create a new one
const auto host_ptr = memory_manager.GetPointer(program_addr);
- auto code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
- constexpr u32 kernel_main_offset = 0;
+ ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute,
program_addr, *cpu_addr, std::move(code),
- kernel_main_offset);
- Register(shader);
+ KERNEL_MAIN_OFFSET);
+ if (cpu_addr) {
+ Register(shader);
+ } else {
+ null_kernel = shader;
+ }
}
Specialization specialization;
@@ -329,12 +305,14 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
const auto& gpu = system.GPU().Maxwell3D();
Specialization specialization;
- if (fixed_state.input_assembly.topology == Maxwell::PrimitiveTopology::Points) {
- ASSERT(fixed_state.input_assembly.point_size != 0.0f);
- specialization.point_size = fixed_state.input_assembly.point_size;
+ if (fixed_state.rasterizer.Topology() == Maxwell::PrimitiveTopology::Points) {
+ float point_size;
+ std::memcpy(&point_size, &fixed_state.rasterizer.point_size, sizeof(float));
+ specialization.point_size = point_size;
+ ASSERT(point_size != 0.0f);
}
for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
- specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].type;
+ specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].Type();
}
specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
@@ -383,7 +361,7 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
for (u32 i = 0; i < count; ++i) {
- const u32 num_samplers = container[i].Size();
+ const u32 num_samplers = container[i].size;
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
entry.dstBinding = binding;
entry.dstArrayElement = 0;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 7ccdb7083..0b5796fef 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -7,7 +7,6 @@
#include <array>
#include <cstddef>
#include <memory>
-#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
@@ -22,12 +21,11 @@
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/wrapper.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
-#include "video_core/surface.h"
namespace Core {
class System;
@@ -47,46 +45,40 @@ class CachedShader;
using Shader = std::shared_ptr<CachedShader>;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using ProgramCode = std::vector<u64>;
-
struct GraphicsPipelineCacheKey {
FixedPipelineState fixed_state;
- std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
RenderPassParams renderpass_params;
+ std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
+ u64 padding; // This is necessary for unique object representations
- std::size_t Hash() const noexcept {
- std::size_t hash = fixed_state.Hash();
- for (const auto& shader : shaders) {
- boost::hash_combine(hash, shader);
- }
- boost::hash_combine(hash, renderpass_params.Hash());
- return hash;
- }
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
- bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
- return std::tie(fixed_state, shaders, renderpass_params) ==
- std::tie(rhs.fixed_state, rhs.shaders, rhs.renderpass_params);
+ bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
struct ComputePipelineCacheKey {
- GPUVAddr shader{};
- u32 shared_memory_size{};
- std::array<u32, 3> workgroup_size{};
-
- std::size_t Hash() const noexcept {
- return static_cast<std::size_t>(shader) ^
- ((static_cast<std::size_t>(shared_memory_size) >> 7) << 40) ^
- static_cast<std::size_t>(workgroup_size[0]) ^
- (static_cast<std::size_t>(workgroup_size[1]) << 16) ^
- (static_cast<std::size_t>(workgroup_size[2]) << 24);
- }
+ GPUVAddr shader;
+ u32 shared_memory_size;
+ std::array<u32, 3> workgroup_size;
+
+ std::size_t Hash() const noexcept;
- bool operator==(const ComputePipelineCacheKey& rhs) const noexcept {
- return std::tie(shader, shared_memory_size, workgroup_size) ==
- std::tie(rhs.shader, rhs.shared_memory_size, rhs.workgroup_size);
+ bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
+
+ bool operator!=(const ComputePipelineCacheKey& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<ComputePipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<ComputePipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
} // namespace Vulkan
@@ -113,7 +105,8 @@ namespace Vulkan {
class CachedShader final : public RasterizerCacheObject {
public:
explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
- VAddr cpu_addr, ProgramCode program_code, u32 main_offset);
+ VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code,
+ u32 main_offset);
~CachedShader();
GPUVAddr GetGpuAddr() const {
@@ -145,7 +138,7 @@ private:
Tegra::Engines::ShaderType stage);
GPUVAddr gpu_addr{};
- ProgramCode program_code;
+ VideoCommon::Shader::ProgramCode program_code;
VideoCommon::Shader::Registry registry;
VideoCommon::Shader::ShaderIR shader_ir;
ShaderEntries entries;
@@ -182,6 +175,9 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
VKRenderPassCache& renderpass_cache;
+ Shader null_shader{};
+ Shader null_kernel{};
+
std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
GraphicsPipelineCacheKey last_graphics_key;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 0966c7ff7..bc91c48cc 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -4,7 +4,6 @@
#include <algorithm>
#include <cstddef>
-#include <cstdint>
#include <utility>
#include <vector>
@@ -113,8 +112,19 @@ u64 HostCounter::BlockingQuery() const {
if (ticks >= cache.Scheduler().Ticks()) {
cache.Scheduler().Flush();
}
- return cache.Device().GetLogical().GetQueryResult<u64>(
- query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+ u64 data;
+ const VkResult result = cache.Device().GetLogical().GetQueryResults(
+ query.first, query.second, 1, sizeof(data), &data, sizeof(data),
+ VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+ switch (result) {
+ case VK_SUCCESS:
+ return data;
+ case VK_ERROR_DEVICE_LOST:
+ cache.Device().ReportLoss();
+ [[fallthrough]];
+ default:
+ throw vk::Exception(result);
+ }
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b63784f4b..40119e6d3 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -5,7 +5,6 @@
#pragma once
#include <cstddef>
-#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 857bea19f..8b009fc22 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -9,14 +9,13 @@
#include <vector>
#include <boost/container/static_vector.hpp>
-#include <boost/functional/hash.hpp>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "core/core.h"
-#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -118,14 +117,13 @@ template <typename Engine, typename Entry>
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
std::size_t stage, std::size_t index = 0) {
const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
- if (entry.IsBindless()) {
- const Tegra::Texture::TextureHandle tex_handle =
- engine.AccessConstBuffer32(stage_type, entry.GetBuffer(), entry.GetOffset());
+ if (entry.is_bindless) {
+ const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset);
return engine.GetTextureInfo(tex_handle);
}
const auto& gpu_profile = engine.AccessGuestDriverProfile();
const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
- const u32 offset = entry.GetOffset() + entry_offset;
+ const u32 offset = entry.offset + entry_offset;
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
return engine.GetStageTexture(stage_type, offset);
} else {
@@ -292,14 +290,16 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind
staging_pool(device, memory_manager, scheduler), descriptor_pool(device),
update_descriptor_queue(device, scheduler), renderpass_cache(device),
quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
- uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
+ uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
texture_cache(system, *this, device, resource_manager, memory_manager, scheduler,
staging_pool),
pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue,
renderpass_cache),
buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool),
- sampler_cache(device), query_cache(system, *this, device, scheduler) {
+ sampler_cache(device),
+ fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache),
+ query_cache(system, *this, device, scheduler), wfi_event{device.GetLogical().CreateEvent()} {
scheduler.SetQueryCache(query_cache);
}
@@ -313,7 +313,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
query_cache.UpdateCounters();
const auto& gpu = system.GPU().Maxwell3D();
- GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)};
+ GraphicsPipelineCacheKey key;
+ key.fixed_state.Fill(gpu.regs);
buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed));
@@ -331,10 +332,11 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
buffer_cache.Unmap();
- const auto texceptions = UpdateAttachments();
+ const Texceptions texceptions = UpdateAttachments();
SetupImageTransitions(texceptions, color_attachments, zeta_attachment);
key.renderpass_params = GetRenderPassParams(texceptions);
+ key.padding = 0;
auto& pipeline = pipeline_cache.GetGraphicsPipeline(key);
scheduler.BindGraphicsPipeline(pipeline.GetHandle());
@@ -347,11 +349,6 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
buffer_bindings.Bind(scheduler);
- if (device.IsNvDeviceDiagnosticCheckpoints()) {
- scheduler.Record(
- [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); });
- }
-
BeginTransformFeedback();
const auto pipeline_layout = pipeline.GetLayout();
@@ -365,6 +362,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
});
EndTransformFeedback();
+
+ system.GPU().TickWork();
}
void RasterizerVulkan::Clear() {
@@ -453,10 +452,12 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
query_cache.UpdateCounters();
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
- const ComputePipelineCacheKey key{
- code_addr,
- launch_desc.shared_alloc,
- {launch_desc.block_dim_x, launch_desc.block_dim_y, launch_desc.block_dim_z}};
+ ComputePipelineCacheKey key;
+ key.shader = code_addr;
+ key.shared_memory_size = launch_desc.shared_alloc;
+ key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y,
+ launch_desc.block_dim_z};
+
auto& pipeline = pipeline_cache.GetComputePipeline(key);
// Compute dispatches can't be executed inside a renderpass
@@ -478,11 +479,6 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
- if (device.IsNvDeviceDiagnosticCheckpoints()) {
- scheduler.Record(
- [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); });
- }
-
scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(),
layout = pipeline.GetLayout(),
@@ -514,6 +510,13 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) {
query_cache.FlushRegion(addr, size);
}
+bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size) {
+ if (!Settings::IsGPULevelHigh()) {
+ return buffer_cache.MustFlushRegion(addr, size);
+ }
+ return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
+}
+
void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
if (addr == 0 || size == 0) {
return;
@@ -524,11 +527,72 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
query_cache.InvalidateRegion(addr, size);
}
+void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
+ if (addr == 0 || size == 0) {
+ return;
+ }
+ texture_cache.OnCPUWrite(addr, size);
+ pipeline_cache.InvalidateRegion(addr, size);
+ buffer_cache.OnCPUWrite(addr, size);
+ query_cache.InvalidateRegion(addr, size);
+}
+
+void RasterizerVulkan::SyncGuestHost() {
+ texture_cache.SyncGuestHost();
+ buffer_cache.SyncGuestHost();
+}
+
+void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ gpu.MemoryManager().Write<u32>(addr, value);
+ return;
+ }
+ fence_manager.SignalSemaphore(addr, value);
+}
+
+void RasterizerVulkan::SignalSyncPoint(u32 value) {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ gpu.IncrementSyncPoint(value);
+ return;
+ }
+ fence_manager.SignalSyncPoint(value);
+}
+
+void RasterizerVulkan::ReleaseFences() {
+ auto& gpu{system.GPU()};
+ if (!gpu.IsAsync()) {
+ return;
+ }
+ fence_manager.WaitPendingFences();
+}
+
void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size) {
FlushRegion(addr, size);
InvalidateRegion(addr, size);
}
+void RasterizerVulkan::WaitForIdle() {
+ // Everything but wait pixel operations. This intentionally includes FRAGMENT_SHADER_BIT because
+ // fragment shaders can still write storage buffers.
+ VkPipelineStageFlags flags =
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
+ if (device.IsExtTransformFeedbackSupported()) {
+ flags |= VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
+ }
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([event = *wfi_event, flags](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetEvent(event, flags);
+ cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
+ });
+}
+
void RasterizerVulkan::FlushCommands() {
if (draw_counter > 0) {
draw_counter = 0;
@@ -609,7 +673,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
Texceptions texceptions;
for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
if (update_rendertargets) {
- color_attachments[rt] = texture_cache.GetColorBufferSurface(rt);
+ color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true);
}
if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
texceptions[rt] = true;
@@ -617,7 +681,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
}
if (update_rendertargets) {
- zeta_attachment = texture_cache.GetDepthBufferSurface();
+ zeta_attachment = texture_cache.GetDepthBufferSurface(true);
}
if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
texceptions[ZETA_TEXCEPTION_INDEX] = true;
@@ -645,7 +709,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
- const auto try_push = [&](const View& view) {
+ const auto try_push = [&key](const View& view) {
if (!view) {
return false;
}
@@ -656,7 +720,9 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
return true;
};
- for (std::size_t index = 0; index < std::size(color_attachments); ++index) {
+ const auto& regs = system.GPU().Maxwell3D().regs;
+ const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
+ for (std::size_t index = 0; index < num_attachments; ++index) {
if (try_push(color_attachments[index])) {
texture_cache.MarkColorBufferInUse(index);
}
@@ -807,42 +873,49 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
BufferBindings& buffer_bindings) {
const auto& regs = system.GPU().Maxwell3D().regs;
- for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexAttributes); ++index) {
+ for (std::size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
const auto& attrib = regs.vertex_attrib_format[index];
if (!attrib.IsValid()) {
+ vertex_input.SetAttribute(index, false, 0, 0, {}, {});
continue;
}
- const auto& buffer = regs.vertex_array[attrib.buffer];
+ [[maybe_unused]] const auto& buffer = regs.vertex_array[attrib.buffer];
ASSERT(buffer.IsEnabled());
- vertex_input.attributes[vertex_input.num_attributes++] =
- FixedPipelineState::VertexAttribute(index, attrib.buffer, attrib.type, attrib.size,
- attrib.offset);
+ vertex_input.SetAttribute(index, true, attrib.buffer, attrib.offset, attrib.type.Value(),
+ attrib.size.Value());
}
- for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexArrays); ++index) {
+ for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
const auto& vertex_array = regs.vertex_array[index];
if (!vertex_array.IsEnabled()) {
+ vertex_input.SetBinding(index, false, 0, 0);
continue;
}
+ vertex_input.SetBinding(
+ index, true, vertex_array.stride,
+ regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0);
const GPUVAddr start{vertex_array.StartAddress()};
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
- ASSERT(end > start);
- const std::size_t size{end - start + 1};
+ ASSERT(end >= start);
+ const std::size_t size{end - start};
+ if (size == 0) {
+ buffer_bindings.AddVertexBinding(DefaultBuffer(), 0);
+ continue;
+ }
const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
-
- vertex_input.bindings[vertex_input.num_bindings++] = FixedPipelineState::VertexBinding(
- index, vertex_array.stride,
- regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0);
buffer_bindings.AddVertexBinding(buffer, offset);
}
}
void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawParameters& params,
bool is_indexed) {
+ if (params.num_vertices == 0) {
+ return;
+ }
const auto& regs = system.GPU().Maxwell3D().regs;
switch (regs.draw.topology) {
case Maxwell::PrimitiveTopology::Quads: {
@@ -918,7 +991,7 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
MICROPROFILE_SCOPE(Vulkan_Textures);
const auto& gpu = system.GPU().Maxwell3D();
for (const auto& entry : entries.samplers) {
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(gpu, entry, stage, i);
SetupTexture(texture, entry);
}
@@ -970,7 +1043,7 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_Textures);
const auto& gpu = system.GPU().KeplerCompute();
for (const auto& entry : entries.samplers) {
- for (std::size_t i = 0; i < entry.Size(); ++i) {
+ for (std::size_t i = 0; i < entry.size; ++i) {
const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
SetupTexture(texture, entry);
}
@@ -990,8 +1063,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
const Tegra::Engines::ConstBufferInfo& buffer) {
if (!buffer.enabled) {
// Set values to zero to unbind buffers
- update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
- sizeof(float));
+ update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE);
return;
}
@@ -1014,7 +1086,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
if (size == 0) {
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
// because Vulkan doesn't like empty buffers.
- constexpr std::size_t dummy_size = 4;
+ // Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the
+ // default buffer.
+ static constexpr std::size_t dummy_size = 4;
const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
return;
@@ -1051,7 +1125,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
auto view = texture_cache.GetImageSurface(tic, entry);
- if (entry.IsWritten()) {
+ if (entry.is_written) {
view->MarkAsModified(texture_cache.Tick());
}
@@ -1179,7 +1253,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
DEBUG_ASSERT(end >= start);
- size += (end - start + 1) * regs.vertex_array[index].enable;
+ size += (end - start) * regs.vertex_array[index].enable;
}
return size;
}
@@ -1202,28 +1276,54 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize(
}
RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const {
- using namespace VideoCore::Surface;
-
const auto& regs = system.GPU().Maxwell3D().regs;
- RenderPassParams renderpass_params;
+ const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
- for (std::size_t rt = 0; rt < static_cast<std::size_t>(regs.rt_control.count); ++rt) {
+ RenderPassParams params;
+ params.color_formats = {};
+ std::size_t color_texceptions = 0;
+
+ std::size_t index = 0;
+ for (std::size_t rt = 0; rt < num_attachments; ++rt) {
const auto& rendertarget = regs.rt[rt];
if (rendertarget.Address() == 0 || rendertarget.format == Tegra::RenderTargetFormat::NONE) {
continue;
}
- renderpass_params.color_attachments.push_back(RenderPassParams::ColorAttachment{
- static_cast<u32>(rt), PixelFormatFromRenderTargetFormat(rendertarget.format),
- texceptions[rt]});
+ params.color_formats[index] = static_cast<u8>(rendertarget.format);
+ color_texceptions |= (texceptions[rt] ? 1ULL : 0ULL) << index;
+ ++index;
}
+ params.num_color_attachments = static_cast<u8>(index);
+ params.texceptions = static_cast<u8>(color_texceptions);
+
+ params.zeta_format = regs.zeta_enable ? static_cast<u8>(regs.zeta.format) : 0;
+ params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
+ return params;
+}
- renderpass_params.has_zeta = regs.zeta_enable;
- if (renderpass_params.has_zeta) {
- renderpass_params.zeta_pixel_format = PixelFormatFromDepthFormat(regs.zeta.format);
- renderpass_params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
+VkBuffer RasterizerVulkan::DefaultBuffer() {
+ if (default_buffer) {
+ return *default_buffer;
}
- return renderpass_params;
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = DEFAULT_BUFFER_SIZE;
+ ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ default_buffer = device.GetLogical().CreateBuffer(ci);
+ default_buffer_commit = memory_manager.Commit(default_buffer, false);
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
+ cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0);
+ });
+ return *default_buffer;
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index d9108f862..0ed0e48c6 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -14,13 +14,13 @@
#include <boost/functional/hash.hpp>
#include "common/common_types.h"
-#include "video_core/memory_manager.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/vk_fence_manager.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -118,8 +118,15 @@ public:
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void FlushAll() override;
void FlushRegion(VAddr addr, u64 size) override;
+ bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
+ void OnCPUWrite(VAddr addr, u64 size) override;
+ void SyncGuestHost() override;
+ void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void SignalSyncPoint(u32 value) override;
+ void ReleaseFences() override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+ void WaitForIdle() override;
void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -148,6 +155,7 @@ private:
using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
+ static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float);
void FlushWork();
@@ -240,6 +248,8 @@ private:
RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
+ VkBuffer DefaultBuffer();
+
Core::System& system;
Core::Frontend::EmuWindow& render_window;
VKScreenInfo& screen_info;
@@ -261,8 +271,13 @@ private:
VKPipelineCache pipeline_cache;
VKBufferCache buffer_cache;
VKSamplerCache sampler_cache;
+ VKFenceManager fence_manager;
VKQueryCache query_cache;
+ vk::Buffer default_buffer;
+ VKMemoryCommit default_buffer_commit;
+ vk::Event wfi_event;
+
std::array<View, Maxwell::NumRenderTargets> color_attachments;
View zeta_attachment;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 4e5286a69..3f71d005e 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -2,9 +2,11 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <cstring>
#include <memory>
#include <vector>
+#include "common/cityhash.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -13,6 +15,15 @@
namespace Vulkan {
+std::size_t RenderPassParams::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool RenderPassParams::operator==(const RenderPassParams& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
VKRenderPassCache::~VKRenderPassCache() = default;
@@ -27,20 +38,22 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
}
vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
+ using namespace VideoCore::Surface;
std::vector<VkAttachmentDescription> descriptors;
std::vector<VkAttachmentReference> color_references;
- for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) {
- const auto attachment = params.color_attachments[rt];
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, attachment.pixel_format);
+ const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
+ for (std::size_t rt = 0; rt < num_attachments; ++rt) {
+ const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
+ const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
+ const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
- static_cast<u32>(attachment.pixel_format));
+ static_cast<int>(pixel_format));
- // TODO(Rodrigo): Add eMayAlias when it's needed.
- const auto color_layout = attachment.is_texception
- ? VK_IMAGE_LAYOUT_GENERAL
- : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ // TODO(Rodrigo): Add MAY_ALIAS_BIT when it's needed.
+ const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentDescription& descriptor = descriptors.emplace_back();
descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
descriptor.format = format.format;
@@ -58,15 +71,17 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
}
VkAttachmentReference zeta_attachment_ref;
- if (params.has_zeta) {
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format);
+ const bool has_zeta = params.zeta_format != 0;
+ if (has_zeta) {
+ const auto guest_format = static_cast<Tegra::DepthFormat>(params.zeta_format);
+ const PixelFormat pixel_format = PixelFormatFromDepthFormat(guest_format);
+ const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
- static_cast<u32>(params.zeta_pixel_format));
+ static_cast<int>(pixel_format));
- const auto zeta_layout = params.zeta_texception
- ? VK_IMAGE_LAYOUT_GENERAL
- : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ const VkImageLayout zeta_layout = params.zeta_texception != 0
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentDescription& descriptor = descriptors.emplace_back();
descriptor.flags = 0;
descriptor.format = format.format;
@@ -78,7 +93,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
descriptor.initialLayout = zeta_layout;
descriptor.finalLayout = zeta_layout;
- zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size());
+ zeta_attachment_ref.attachment = static_cast<u32>(num_attachments);
zeta_attachment_ref.layout = zeta_layout;
}
@@ -90,7 +105,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
subpass_description.pColorAttachments = color_references.data();
subpass_description.pResolveAttachments = nullptr;
- subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr;
+ subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr;
subpass_description.preserveAttachmentCount = 0;
subpass_description.pPreserveAttachments = nullptr;
@@ -101,7 +116,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
- if (params.has_zeta) {
+ if (has_zeta) {
access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
index 921b6efb5..8b0fec720 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
@@ -4,8 +4,7 @@
#pragma once
-#include <memory>
-#include <tuple>
+#include <type_traits>
#include <unordered_map>
#include <boost/container/static_vector.hpp>
@@ -19,51 +18,25 @@ namespace Vulkan {
class VKDevice;
-// TODO(Rodrigo): Optimize this structure for faster hashing
-
struct RenderPassParams {
- struct ColorAttachment {
- u32 index = 0;
- VideoCore::Surface::PixelFormat pixel_format = VideoCore::Surface::PixelFormat::Invalid;
- bool is_texception = false;
-
- std::size_t Hash() const noexcept {
- return static_cast<std::size_t>(pixel_format) |
- static_cast<std::size_t>(is_texception) << 6 |
- static_cast<std::size_t>(index) << 7;
- }
-
- bool operator==(const ColorAttachment& rhs) const noexcept {
- return std::tie(index, pixel_format, is_texception) ==
- std::tie(rhs.index, rhs.pixel_format, rhs.is_texception);
- }
- };
-
- boost::container::static_vector<ColorAttachment,
- Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
- color_attachments{};
- // TODO(Rodrigo): Unify has_zeta into zeta_pixel_format and zeta_component_type.
- VideoCore::Surface::PixelFormat zeta_pixel_format = VideoCore::Surface::PixelFormat::Invalid;
- bool has_zeta = false;
- bool zeta_texception = false;
-
- std::size_t Hash() const noexcept {
- std::size_t hash = 0;
- for (const auto& rt : color_attachments) {
- boost::hash_combine(hash, rt.Hash());
- }
- boost::hash_combine(hash, zeta_pixel_format);
- boost::hash_combine(hash, has_zeta);
- boost::hash_combine(hash, zeta_texception);
- return hash;
- }
+ std::array<u8, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_formats;
+ u8 num_color_attachments;
+ u8 texceptions;
+
+ u8 zeta_format;
+ u8 zeta_texception;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const RenderPassParams& rhs) const noexcept;
- bool operator==(const RenderPassParams& rhs) const {
- return std::tie(color_attachments, zeta_pixel_format, has_zeta, zeta_texception) ==
- std::tie(rhs.color_attachments, rhs.zeta_pixel_format, rhs.has_zeta,
- rhs.zeta_texception);
+ bool operator!=(const RenderPassParams& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<RenderPassParams>);
+static_assert(std::is_trivially_copyable_v<RenderPassParams>);
+static_assert(std::is_trivially_constructible_v<RenderPassParams>);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 07bbcf520..2687d8d95 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -2,11 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <cstring>
-#include <optional>
#include <unordered_map>
-#include "common/assert.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 900f551b3..82ec9180e 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -8,7 +8,6 @@
#include <thread>
#include <utility>
-#include "common/assert.h"
#include "common/microprofile.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -166,7 +165,15 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
submit_info.pCommandBuffers = current_cmdbuf.address();
submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
submit_info.pSignalSemaphores = &semaphore;
- device.GetGraphicsQueue().Submit(submit_info, *current_fence);
+ switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
+ case VK_SUCCESS:
+ break;
+ case VK_ERROR_DEVICE_LOST:
+ device.ReportLoss();
+ [[fallthrough]];
+ default:
+ vk::Check(result);
+ }
}
void VKScheduler::AllocateNewContext() {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 82a8adc69..970a65566 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -7,7 +7,6 @@
#include <atomic>
#include <condition_variable>
#include <memory>
-#include <optional>
#include <stack>
#include <thread>
#include <utility>
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index aaa138f52..18678968c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -103,8 +103,8 @@ struct GenericVaryingDescription {
};
spv::Dim GetSamplerDim(const Sampler& sampler) {
- ASSERT(!sampler.IsBuffer());
- switch (sampler.GetType()) {
+ ASSERT(!sampler.is_buffer);
+ switch (sampler.type) {
case Tegra::Shader::TextureType::Texture1D:
return spv::Dim::Dim1D;
case Tegra::Shader::TextureType::Texture2D:
@@ -114,13 +114,13 @@ spv::Dim GetSamplerDim(const Sampler& sampler) {
case Tegra::Shader::TextureType::TextureCube:
return spv::Dim::Cube;
default:
- UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<u32>(sampler.GetType()));
+ UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<int>(sampler.type));
return spv::Dim::Dim2D;
}
}
std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
- switch (image.GetType()) {
+ switch (image.type) {
case Tegra::Shader::ImageType::Texture1D:
return {spv::Dim::Dim1D, false};
case Tegra::Shader::ImageType::TextureBuffer:
@@ -134,7 +134,7 @@ std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
case Tegra::Shader::ImageType::Texture3D:
return {spv::Dim::Dim3D, false};
default:
- UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType()));
+ UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<int>(image.type));
return {spv::Dim::Dim2D, false};
}
}
@@ -879,11 +879,11 @@ private:
u32 DeclareTexelBuffers(u32 binding) {
for (const auto& sampler : ir.GetSamplers()) {
- if (!sampler.IsBuffer()) {
+ if (!sampler.is_buffer) {
continue;
}
- ASSERT(!sampler.IsArray());
- ASSERT(!sampler.IsShadow());
+ ASSERT(!sampler.is_array);
+ ASSERT(!sampler.is_shadow);
constexpr auto dim = spv::Dim::Buffer;
constexpr int depth = 0;
@@ -894,23 +894,23 @@ private:
const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
+ AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
Decorate(id, spv::Decoration::Binding, binding++);
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id});
+ texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id});
}
return binding;
}
u32 DeclareSamplers(u32 binding) {
for (const auto& sampler : ir.GetSamplers()) {
- if (sampler.IsBuffer()) {
+ if (sampler.is_buffer) {
continue;
}
const auto dim = GetSamplerDim(sampler);
- const int depth = sampler.IsShadow() ? 1 : 0;
- const int arrayed = sampler.IsArray() ? 1 : 0;
+ const int depth = sampler.is_shadow ? 1 : 0;
+ const int arrayed = sampler.is_array ? 1 : 0;
constexpr bool ms = false;
constexpr int sampled = 1;
constexpr auto format = spv::ImageFormat::Unknown;
@@ -918,17 +918,17 @@ private:
const Id sampler_type = TypeSampledImage(image_type);
const Id sampler_pointer_type =
TypePointer(spv::StorageClass::UniformConstant, sampler_type);
- const Id type = sampler.IsIndexed()
- ? TypeArray(sampler_type, Constant(t_uint, sampler.Size()))
+ const Id type = sampler.is_indexed
+ ? TypeArray(sampler_type, Constant(t_uint, sampler.size))
: sampler_type;
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
+ AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
Decorate(id, spv::Decoration::Binding, binding++);
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- sampled_images.emplace(sampler.GetIndex(), SampledImage{image_type, sampler_type,
- sampler_pointer_type, id});
+ sampled_images.emplace(
+ sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id});
}
return binding;
}
@@ -943,17 +943,17 @@ private:
const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex())));
+ AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
Decorate(id, spv::Decoration::Binding, binding++);
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- if (image.IsRead() && !image.IsWritten()) {
+ if (image.is_read && !image.is_written) {
Decorate(id, spv::Decoration::NonWritable);
- } else if (image.IsWritten() && !image.IsRead()) {
+ } else if (image.is_written && !image.is_read) {
Decorate(id, spv::Decoration::NonReadable);
}
- images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id});
+ images.emplace(image.index, StorageImage{image_type, id});
}
return binding;
}
@@ -1584,6 +1584,15 @@ private:
return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat};
}
+ Expression LogicalAddCarry(Operation operation) {
+ const Id op_a = AsUint(Visit(operation[0]));
+ const Id op_b = AsUint(Visit(operation[1]));
+
+ const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
+ const Id carry = OpCompositeExtract(t_uint, result, 1);
+ return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool};
+ }
+
Expression LogicalAssign(Operation operation) {
const Node& dest = operation[0];
const Node& src = operation[1];
@@ -1611,11 +1620,11 @@ private:
Id GetTextureSampler(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- ASSERT(!meta.sampler.IsBuffer());
+ ASSERT(!meta.sampler.is_buffer);
- const auto& entry = sampled_images.at(meta.sampler.GetIndex());
+ const auto& entry = sampled_images.at(meta.sampler.index);
Id sampler = entry.variable;
- if (meta.sampler.IsIndexed()) {
+ if (meta.sampler.is_indexed) {
const Id index = AsInt(Visit(meta.index));
sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
}
@@ -1624,8 +1633,8 @@ private:
Id GetTextureImage(Operation operation) {
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const u32 index = meta.sampler.GetIndex();
- if (meta.sampler.IsBuffer()) {
+ const u32 index = meta.sampler.index;
+ if (meta.sampler.is_buffer) {
const auto& entry = texel_buffers.at(index);
return OpLoad(entry.image_type, entry.image);
} else {
@@ -1636,7 +1645,7 @@ private:
Id GetImage(Operation operation) {
const auto& meta = std::get<MetaImage>(operation.GetMeta());
- const auto entry = images.at(meta.image.GetIndex());
+ const auto entry = images.at(meta.image.index);
return OpLoad(entry.image_type, entry.image);
}
@@ -1652,7 +1661,7 @@ private:
}
if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) {
// Add array coordinate for textures
- if (meta->sampler.IsArray()) {
+ if (meta->sampler.is_array) {
Id array = AsInt(Visit(meta->array));
if (type == Type::Float) {
array = OpConvertSToF(t_float, array);
@@ -1758,7 +1767,7 @@ private:
operands.push_back(GetOffsetCoordinates(operation));
}
- if (meta.sampler.IsShadow()) {
+ if (meta.sampler.is_shadow) {
const Id dref = AsFloat(Visit(meta.depth_compare));
return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
Type::Float};
@@ -1773,7 +1782,7 @@ private:
const Id coords = GetCoordinates(operation, Type::Float);
Id texture{};
- if (meta.sampler.IsShadow()) {
+ if (meta.sampler.is_shadow) {
texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords,
AsFloat(Visit(meta.depth_compare)));
} else {
@@ -1800,8 +1809,8 @@ private:
}
const Id lod = AsUint(Visit(operation[0]));
- const std::size_t coords_count = [&]() {
- switch (const auto type = meta.sampler.GetType(); type) {
+ const std::size_t coords_count = [&meta] {
+ switch (const auto type = meta.sampler.type) {
case Tegra::Shader::TextureType::Texture1D:
return 1;
case Tegra::Shader::TextureType::Texture2D:
@@ -1810,7 +1819,7 @@ private:
case Tegra::Shader::TextureType::Texture3D:
return 3;
default:
- UNREACHABLE_MSG("Invalid texture type={}", static_cast<u32>(type));
+ UNREACHABLE_MSG("Invalid texture type={}", static_cast<int>(type));
return 2;
}
}();
@@ -1853,7 +1862,7 @@ private:
const Id image = GetTextureImage(operation);
const Id coords = GetCoordinates(operation, Type::Int);
Id fetch;
- if (meta.lod && !meta.sampler.IsBuffer()) {
+ if (meta.lod && !meta.sampler.is_buffer) {
fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod,
AsInt(Visit(meta.lod)));
} else {
@@ -2518,6 +2527,8 @@ private:
&SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>,
&SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>,
+ &SPIRVDecompiler::LogicalAddCarry,
+
&SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>,
&SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>,
&SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>,
@@ -2969,7 +2980,7 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written);
}
for (const auto& sampler : ir.GetSamplers()) {
- if (sampler.IsBuffer()) {
+ if (sampler.is_buffer) {
entries.texel_buffers.emplace_back(sampler);
} else {
entries.samplers.emplace_back(sampler);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index ffea4709e..f4c05ac3c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -5,11 +5,7 @@
#pragma once
#include <array>
-#include <bitset>
-#include <memory>
#include <set>
-#include <type_traits>
-#include <utility>
#include <vector>
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index 784839327..112df9c71 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -4,8 +4,7 @@
#include <cstring>
#include <memory>
-#include <vector>
-#include "common/alignment.h"
+
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.h b/src/video_core/renderer_vulkan/vk_shader_util.h
index be38d6697..d1d3f3cae 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.h
+++ b/src/video_core/renderer_vulkan/vk_shader_util.h
@@ -4,7 +4,6 @@
#pragma once
-#include <vector>
#include "common/common_types.h"
#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 94d954d7a..45c180221 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -39,8 +39,7 @@ VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator
VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler)
- : device{device}, memory_manager{memory_manager}, scheduler{scheduler},
- is_device_integrated{device.IsIntegrated()} {}
+ : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
VKStagingBufferPool::~VKStagingBufferPool() = default;
@@ -56,9 +55,7 @@ void VKStagingBufferPool::TickFrame() {
current_delete_level = (current_delete_level + 1) % NumLevels;
ReleaseCache(true);
- if (!is_device_integrated) {
- ReleaseCache(false);
- }
+ ReleaseCache(false);
}
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
@@ -81,7 +78,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
ci.size = 1ULL << log2;
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
@@ -95,7 +92,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
}
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
- return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers;
+ return host_visible ? host_staging_buffers : device_staging_buffers;
}
void VKStagingBufferPool::ReleaseCache(bool host_visible) {
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index a0840ff8c..3c4901437 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -5,8 +5,6 @@
#pragma once
#include <climits>
-#include <unordered_map>
-#include <utility>
#include <vector>
#include "common/common_types.h"
@@ -71,7 +69,6 @@ private:
const VKDevice& device;
VKMemoryManager& memory_manager;
VKScheduler& scheduler;
- const bool is_device_integrated;
StagingBuffersCache host_staging_buffers;
StagingBuffersCache device_staging_buffers;
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index 38a93a01a..868447af2 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <limits>
#include <optional>
#include <tuple>
#include <vector>
@@ -22,22 +23,38 @@ namespace {
constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
-constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
+constexpr u64 PREFERRED_STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
-std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
- VkMemoryPropertyFlags wanted) {
- const auto properties = device.GetPhysical().GetMemoryProperties();
- for (u32 i = 0; i < properties.memoryTypeCount; i++) {
- if (!(filter & (1 << i))) {
- continue;
- }
- if ((properties.memoryTypes[i].propertyFlags & wanted) == wanted) {
+/// Find a memory type with the passed requirements
+std::optional<u32> FindMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
+ VkMemoryPropertyFlags wanted,
+ u32 filter = std::numeric_limits<u32>::max()) {
+ for (u32 i = 0; i < properties.memoryTypeCount; ++i) {
+ const auto flags = properties.memoryTypes[i].propertyFlags;
+ if ((flags & wanted) == wanted && (filter & (1U << i)) != 0) {
return i;
}
}
return std::nullopt;
}
+/// Get the preferred host visible memory type.
+u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
+ u32 filter = std::numeric_limits<u32>::max()) {
+ // Prefer device local host visible allocations. Both AMD and Nvidia now provide one.
+ // Otherwise search for a host visible allocation.
+ static constexpr auto HOST_MEMORY =
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ static constexpr auto DYNAMIC_MEMORY = HOST_MEMORY | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ std::optional preferred_type = FindMemoryType(properties, DYNAMIC_MEMORY);
+ if (!preferred_type) {
+ preferred_type = FindMemoryType(properties, HOST_MEMORY);
+ ASSERT_MSG(preferred_type, "No host visible and coherent memory type found");
+ }
+ return preferred_type.value_or(0);
+}
+
} // Anonymous namespace
VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
@@ -51,7 +68,7 @@ VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
VKStreamBuffer::~VKStreamBuffer() = default;
std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
- ASSERT(size <= STREAM_BUFFER_SIZE);
+ ASSERT(size <= stream_buffer_size);
mapped_size = size;
if (alignment > 0) {
@@ -61,7 +78,7 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
WaitPendingOperations(offset);
bool invalidated = false;
- if (offset + size > STREAM_BUFFER_SIZE) {
+ if (offset + size > stream_buffer_size) {
// The buffer would overflow, save the amount of used watches and reset the state.
invalidation_mark = current_watch_cursor;
current_watch_cursor = 0;
@@ -98,40 +115,37 @@ void VKStreamBuffer::Unmap(u64 size) {
}
void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
+ const auto memory_properties = device.GetPhysical().GetMemoryProperties();
+ const u32 preferred_type = GetMemoryType(memory_properties);
+ const u32 preferred_heap = memory_properties.memoryTypes[preferred_type].heapIndex;
+
+ // Substract from the preferred heap size some bytes to avoid getting out of memory.
+ const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
+ const VkDeviceSize allocable_size = heap_size - 4 * 1024 * 1024;
+
VkBufferCreateInfo buffer_ci;
buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_ci.pNext = nullptr;
buffer_ci.flags = 0;
- buffer_ci.size = STREAM_BUFFER_SIZE;
+ buffer_ci.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size);
buffer_ci.usage = usage;
buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_ci.queueFamilyIndexCount = 0;
buffer_ci.pQueueFamilyIndices = nullptr;
- const auto& dev = device.GetLogical();
- buffer = dev.CreateBuffer(buffer_ci);
-
- const auto& dld = device.GetDispatchLoader();
- const auto requirements = dev.GetBufferMemoryRequirements(*buffer);
- // Prefer device local host visible allocations (this should hit AMD's pinned memory).
- auto type =
- FindMemoryType(device, requirements.memoryTypeBits,
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
- if (!type) {
- // Otherwise search for a host visible allocation.
- type = FindMemoryType(device, requirements.memoryTypeBits,
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
- ASSERT_MSG(type, "No host visible and coherent memory type found");
- }
+ buffer = device.GetLogical().CreateBuffer(buffer_ci);
+
+ const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
+ const u32 required_flags = requirements.memoryTypeBits;
+ stream_buffer_size = static_cast<u64>(requirements.size);
+
VkMemoryAllocateInfo memory_ai;
memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_ai.pNext = nullptr;
memory_ai.allocationSize = requirements.size;
- memory_ai.memoryTypeIndex = *type;
+ memory_ai.memoryTypeIndex = GetMemoryType(memory_properties, required_flags);
- memory = dev.AllocateMemory(memory_ai);
+ memory = device.GetLogical().AllocateMemory(memory_ai);
buffer.BindMemory(*memory, 0);
}
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 58ce8b973..dfddf7ad6 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -56,8 +56,9 @@ private:
const VKDevice& device; ///< Vulkan device manager.
VKScheduler& scheduler; ///< Command scheduler.
- vk::Buffer buffer; ///< Mapped buffer.
- vk::DeviceMemory memory; ///< Memory allocation.
+ vk::Buffer buffer; ///< Mapped buffer.
+ vk::DeviceMemory memory; ///< Memory allocation.
+ u64 stream_buffer_size{}; ///< Stream buffer size.
u64 offset{}; ///< Buffer iterator.
u64 mapped_size{}; ///< Size reserved for the current copy.
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index de4c23120..55f43e61b 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -10,11 +10,9 @@
#include <variant>
#include <vector>
-#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "core/core.h"
-#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/morton.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
@@ -26,7 +24,6 @@
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
-#include "video_core/textures/convert.h"
namespace Vulkan {
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 115595f28..f211ccb1e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -7,19 +7,13 @@
#include <memory>
#include <unordered_map>
-#include "common/assert.h"
#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "common/math_util.h"
-#include "video_core/gpu.h"
-#include "video_core/rasterizer_cache.h"
#include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/texture_cache/surface_base.h"
#include "video_core/texture_cache/texture_cache.h"
-#include "video_core/textures/decoders.h"
namespace Core {
class System;
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 6ba2c9997..cc7e3dff4 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -4,7 +4,6 @@
#pragma once
-#include <type_traits>
#include <variant>
#include <boost/container/static_vector.hpp>
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 9b94dfff1..2ce9b0626 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
#include <exception>
#include <memory>
#include <optional>
@@ -16,6 +17,23 @@ namespace Vulkan::vk {
namespace {
+void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
+ std::stable_sort(devices.begin(), devices.end(), [&](auto lhs, auto rhs) {
+ // This will call Vulkan more than needed, but these calls are cheap.
+ const auto lhs_properties = vk::PhysicalDevice(lhs, dld).GetProperties();
+ const auto rhs_properties = vk::PhysicalDevice(rhs, dld).GetProperties();
+
+ // Prefer discrete GPUs, Nvidia over AMD, AMD over Intel, Intel over the rest.
+ const bool preferred =
+ (lhs_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
+ rhs_properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) ||
+ (lhs_properties.vendorID == 0x10DE && rhs_properties.vendorID != 0x10DE) ||
+ (lhs_properties.vendorID == 0x1002 && rhs_properties.vendorID != 0x1002) ||
+ (lhs_properties.vendorID == 0x8086 && rhs_properties.vendorID != 0x8086);
+ return !preferred;
+ });
+}
+
template <typename T>
bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name,
VkInstance instance = nullptr) noexcept {
@@ -61,14 +79,15 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCmdPipelineBarrier);
X(vkCmdPushConstants);
X(vkCmdSetBlendConstants);
- X(vkCmdSetCheckpointNV);
X(vkCmdSetDepthBias);
X(vkCmdSetDepthBounds);
+ X(vkCmdSetEvent);
X(vkCmdSetScissor);
X(vkCmdSetStencilCompareMask);
X(vkCmdSetStencilReference);
X(vkCmdSetStencilWriteMask);
X(vkCmdSetViewport);
+ X(vkCmdWaitEvents);
X(vkCreateBuffer);
X(vkCreateBufferView);
X(vkCreateCommandPool);
@@ -76,6 +95,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCreateDescriptorPool);
X(vkCreateDescriptorSetLayout);
X(vkCreateDescriptorUpdateTemplateKHR);
+ X(vkCreateEvent);
X(vkCreateFence);
X(vkCreateFramebuffer);
X(vkCreateGraphicsPipelines);
@@ -94,6 +114,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkDestroyDescriptorPool);
X(vkDestroyDescriptorSetLayout);
X(vkDestroyDescriptorUpdateTemplateKHR);
+ X(vkDestroyEvent);
X(vkDestroyFence);
X(vkDestroyFramebuffer);
X(vkDestroyImage);
@@ -113,10 +134,10 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkFreeMemory);
X(vkGetBufferMemoryRequirements);
X(vkGetDeviceQueue);
+ X(vkGetEventStatus);
X(vkGetFenceStatus);
X(vkGetImageMemoryRequirements);
X(vkGetQueryPoolResults);
- X(vkGetQueueCheckpointDataNV);
X(vkMapMemory);
X(vkQueueSubmit);
X(vkResetFences);
@@ -271,6 +292,10 @@ void Destroy(VkDevice device, VkDeviceMemory handle, const DeviceDispatch& dld)
dld.vkFreeMemory(device, handle, nullptr);
}
+void Destroy(VkDevice device, VkEvent handle, const DeviceDispatch& dld) noexcept {
+ dld.vkDestroyEvent(device, handle, nullptr);
+}
+
void Destroy(VkDevice device, VkFence handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyFence(device, handle, nullptr);
}
@@ -383,7 +408,8 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) {
return std::nullopt;
}
- return physical_devices;
+ SortPhysicalDevices(physical_devices, *dld);
+ return std::make_optional(std::move(physical_devices));
}
DebugCallback Instance::TryCreateDebugCallback(
@@ -409,17 +435,6 @@ DebugCallback Instance::TryCreateDebugCallback(
return DebugCallback(messenger, handle, *dld);
}
-std::vector<VkCheckpointDataNV> Queue::GetCheckpointDataNV(const DeviceDispatch& dld) const {
- if (!dld.vkGetQueueCheckpointDataNV) {
- return {};
- }
- u32 num;
- dld.vkGetQueueCheckpointDataNV(queue, &num, nullptr);
- std::vector<VkCheckpointDataNV> checkpoints(num);
- dld.vkGetQueueCheckpointDataNV(queue, &num, checkpoints.data());
- return checkpoints;
-}
-
void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const {
Check(dld->vkBindBufferMemory(owner, handle, memory, offset));
}
@@ -469,12 +484,11 @@ std::vector<VkImage> SwapchainKHR::GetImages() const {
}
Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
- Span<const char*> enabled_extensions,
- const VkPhysicalDeviceFeatures2& enabled_features,
+ Span<const char*> enabled_extensions, const void* next,
DeviceDispatch& dld) noexcept {
VkDeviceCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
- ci.pNext = &enabled_features;
+ ci.pNext = next;
ci.flags = 0;
ci.queueCreateInfoCount = queues_ci.size();
ci.pQueueCreateInfos = queues_ci.data();
@@ -613,6 +627,16 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
return ShaderModule(object, handle, *dld);
}
+Event Device::CreateEvent() const {
+ VkEventCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ VkEvent object;
+ Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
+ return Event(object, handle, *dld);
+}
+
SwapchainKHR Device::CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const {
VkSwapchainKHR object;
Check(dld->vkCreateSwapchainKHR(handle, &ci, nullptr, &object));
diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h
index fb3657819..98937a77a 100644
--- a/src/video_core/renderer_vulkan/wrapper.h
+++ b/src/video_core/renderer_vulkan/wrapper.h
@@ -197,14 +197,15 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;
PFN_vkCmdPushConstants vkCmdPushConstants;
PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;
- PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;
PFN_vkCmdSetDepthBias vkCmdSetDepthBias;
PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;
+ PFN_vkCmdSetEvent vkCmdSetEvent;
PFN_vkCmdSetScissor vkCmdSetScissor;
PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;
PFN_vkCmdSetStencilReference vkCmdSetStencilReference;
PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;
PFN_vkCmdSetViewport vkCmdSetViewport;
+ PFN_vkCmdWaitEvents vkCmdWaitEvents;
PFN_vkCreateBuffer vkCreateBuffer;
PFN_vkCreateBufferView vkCreateBufferView;
PFN_vkCreateCommandPool vkCreateCommandPool;
@@ -212,6 +213,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkCreateDescriptorPool vkCreateDescriptorPool;
PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;
PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
+ PFN_vkCreateEvent vkCreateEvent;
PFN_vkCreateFence vkCreateFence;
PFN_vkCreateFramebuffer vkCreateFramebuffer;
PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;
@@ -230,6 +232,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;
PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;
PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
+ PFN_vkDestroyEvent vkDestroyEvent;
PFN_vkDestroyFence vkDestroyFence;
PFN_vkDestroyFramebuffer vkDestroyFramebuffer;
PFN_vkDestroyImage vkDestroyImage;
@@ -249,10 +252,10 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkFreeMemory vkFreeMemory;
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
PFN_vkGetDeviceQueue vkGetDeviceQueue;
+ PFN_vkGetEventStatus vkGetEventStatus;
PFN_vkGetFenceStatus vkGetFenceStatus;
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
PFN_vkGetQueryPoolResults vkGetQueryPoolResults;
- PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;
PFN_vkMapMemory vkMapMemory;
PFN_vkQueueSubmit vkQueueSubmit;
PFN_vkResetFences vkResetFences;
@@ -281,6 +284,7 @@ void Destroy(VkDevice, VkDescriptorPool, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkDescriptorSetLayout, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkDescriptorUpdateTemplateKHR, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkDeviceMemory, const DeviceDispatch&) noexcept;
+void Destroy(VkDevice, VkEvent, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkFence, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkFramebuffer, const DeviceDispatch&) noexcept;
void Destroy(VkDevice, VkImage, const DeviceDispatch&) noexcept;
@@ -567,12 +571,8 @@ public:
/// Construct a queue handle.
constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {}
- /// Returns the checkpoint data.
- /// @note Returns an empty vector when the function pointer is not present.
- std::vector<VkCheckpointDataNV> GetCheckpointDataNV(const DeviceDispatch& dld) const;
-
- void Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const {
- Check(dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence));
+ VkResult Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const noexcept {
+ return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence);
}
VkResult Present(const VkPresentInfoKHR& present_info) const noexcept {
@@ -654,13 +654,21 @@ public:
std::vector<VkImage> GetImages() const;
};
+class Event : public Handle<VkEvent, VkDevice, DeviceDispatch> {
+ using Handle<VkEvent, VkDevice, DeviceDispatch>::Handle;
+
+public:
+ VkResult GetStatus() const noexcept {
+ return dld->vkGetEventStatus(owner, handle);
+ }
+};
+
class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> {
using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle;
public:
static Device Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
- Span<const char*> enabled_extensions,
- const VkPhysicalDeviceFeatures2& enabled_features,
+ Span<const char*> enabled_extensions, const void* next,
DeviceDispatch& dld) noexcept;
Queue GetQueue(u32 family_index) const noexcept;
@@ -702,6 +710,8 @@ public:
ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const;
+ Event CreateEvent() const;
+
SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const;
DeviceMemory TryAllocateMemory(const VkMemoryAllocateInfo& ai) const noexcept;
@@ -734,18 +744,11 @@ public:
dld->vkResetQueryPoolEXT(handle, query_pool, first, count);
}
- void GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size,
- void* data, VkDeviceSize stride, VkQueryResultFlags flags) const {
- Check(dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride,
- flags));
- }
-
- template <typename T>
- T GetQueryResult(VkQueryPool query_pool, u32 first, VkQueryResultFlags flags) const {
- static_assert(std::is_trivially_copyable_v<T>);
- T value;
- GetQueryResults(query_pool, first, 1, sizeof(T), &value, sizeof(T), flags);
- return value;
+ VkResult GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size,
+ void* data, VkDeviceSize stride, VkQueryResultFlags flags) const
+ noexcept {
+ return dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride,
+ flags);
}
};
@@ -920,10 +923,6 @@ public:
dld->vkCmdPushConstants(handle, layout, flags, offset, size, values);
}
- void SetCheckpointNV(const void* checkpoint_marker) const noexcept {
- dld->vkCmdSetCheckpointNV(handle, checkpoint_marker);
- }
-
void SetViewport(u32 first, Span<VkViewport> viewports) const noexcept {
dld->vkCmdSetViewport(handle, first, viewports.size(), viewports.data());
}
@@ -956,6 +955,19 @@ public:
dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds);
}
+ void SetEvent(VkEvent event, VkPipelineStageFlags stage_flags) const noexcept {
+ dld->vkCmdSetEvent(handle, event, stage_flags);
+ }
+
+ void WaitEvents(Span<VkEvent> events, VkPipelineStageFlags src_stage_mask,
+ VkPipelineStageFlags dst_stage_mask, Span<VkMemoryBarrier> memory_barriers,
+ Span<VkBufferMemoryBarrier> buffer_barriers,
+ Span<VkImageMemoryBarrier> image_barriers) const noexcept {
+ dld->vkCmdWaitEvents(handle, events.size(), events.data(), src_stage_mask, dst_stage_mask,
+ memory_barriers.size(), memory_barriers.data(), buffer_barriers.size(),
+ buffer_barriers.data(), image_barriers.size(), image_barriers.data());
+ }
+
void BindTransformFeedbackBuffersEXT(u32 first, u32 count, const VkBuffer* buffers,
const VkDeviceSize* offsets,
const VkDeviceSize* sizes) const noexcept {
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index 6d313963a..8d86020f6 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -13,6 +13,7 @@
#include "common/common_types.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/control_flow.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
@@ -115,17 +116,6 @@ Pred GetPredicate(u32 index, bool negated) {
return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL));
}
-/**
- * Returns whether the instruction at the specified offset is a 'sched' instruction.
- * Sched instructions always appear before a sequence of 3 instructions.
- */
-constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
- constexpr u32 SchedPeriod = 4;
- u32 absolute_offset = offset - main_offset;
-
- return (absolute_offset % SchedPeriod) == 0;
-}
-
enum class ParseResult : u32 {
ControlCaught,
BlockEnd,
@@ -587,8 +577,6 @@ bool TryQuery(CFGRebuildState& state) {
return true;
}
-} // Anonymous namespace
-
void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch_info) {
const auto get_expr = ([&](const Condition& cond) -> Expr {
Expr result{};
@@ -655,6 +643,8 @@ void DecompileShader(CFGRebuildState& state) {
state.manager->Decompile();
}
+} // Anonymous namespace
+
std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address,
const CompilerSettings& settings,
Registry& registry) {
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index 87ac9ac6c..a75a5cc63 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -13,6 +13,7 @@
#include "video_core/engines/shader_bytecode.h"
#include "video_core/engines/shader_header.h"
#include "video_core/shader/control_flow.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/node_helper.h"
#include "video_core/shader/shader_ir.h"
@@ -23,17 +24,6 @@ using Tegra::Shader::OpCode;
namespace {
-/**
- * Returns whether the instruction at the specified offset is a 'sched' instruction.
- * Sched instructions always appear before a sequence of 3 instructions.
- */
-constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
- constexpr u32 SchedPeriod = 4;
- u32 absolute_offset = offset - main_offset;
-
- return (absolute_offset % SchedPeriod) == 0;
-}
-
void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
const std::list<Sampler>& used_samplers) {
if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) {
@@ -42,11 +32,11 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
u32 count{};
std::vector<u32> bound_offsets;
for (const auto& sampler : used_samplers) {
- if (sampler.IsBindless()) {
+ if (sampler.is_bindless) {
continue;
}
++count;
- bound_offsets.emplace_back(sampler.GetOffset());
+ bound_offsets.emplace_back(sampler.offset);
}
if (count > 1) {
gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
@@ -56,14 +46,14 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
VideoCore::GuestDriverProfile& gpu_driver,
const std::list<Sampler>& used_samplers) {
- const u32 base_offset = sampler_to_deduce.GetOffset();
+ const u32 base_offset = sampler_to_deduce.offset;
u32 max_offset{std::numeric_limits<u32>::max()};
for (const auto& sampler : used_samplers) {
- if (sampler.IsBindless()) {
+ if (sampler.is_bindless) {
continue;
}
- if (sampler.GetOffset() > base_offset) {
- max_offset = std::min(sampler.GetOffset(), max_offset);
+ if (sampler.offset > base_offset) {
+ max_offset = std::min(sampler.offset, max_offset);
}
}
if (max_offset == std::numeric_limits<u32>::max()) {
@@ -363,14 +353,14 @@ void ShaderIR::PostDecode() {
return;
}
for (auto& sampler : used_samplers) {
- if (!sampler.IsIndexed()) {
+ if (!sampler.is_indexed) {
continue;
}
if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
- sampler.SetSize(*size);
+ sampler.size = *size;
} else {
LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
- sampler.SetSize(1);
+ sampler.size = 1;
}
}
}
diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp
index ee7d9a29d..a276aee44 100644
--- a/src/video_core/shader/decode/arithmetic_half.cpp
+++ b/src/video_core/shader/decode/arithmetic_half.cpp
@@ -19,22 +19,46 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
- opcode->get().GetId() == OpCode::Id::HADD2_R) {
+ bool negate_a = false;
+ bool negate_b = false;
+ bool absolute_a = false;
+ bool absolute_b = false;
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::HADD2_R:
if (instr.alu_half.ftz == 0) {
LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
}
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HADD2_C:
+ if (instr.alu_half.ftz == 0) {
+ LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
+ }
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 56) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_R:
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_C:
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
}
- const bool negate_a =
- opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
- const bool negate_b =
- opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
-
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a);
- op_a = GetOperandAbsNegHalf(op_a, instr.alu_half.abs_a, negate_a);
+ op_a = GetOperandAbsNegHalf(op_a, absolute_a, negate_a);
- auto [type_b, op_b] = [&]() -> std::tuple<HalfType, Node> {
+ auto [type_b, op_b] = [this, instr, opcode]() -> std::pair<HalfType, Node> {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HMUL2_C:
@@ -48,17 +72,16 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
}
}();
op_b = UnpackHalfFloat(op_b, type_b);
- // redeclaration to avoid a bug in clang with reusing local bindings in lambdas
- Node op_b_alt = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b);
+ op_b = GetOperandAbsNegHalf(op_b, absolute_b, negate_b);
- Node value = [&]() {
+ Node value = [this, opcode, op_a, op_b = op_b] {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HADD2_R:
- return Operation(OperationCode::HAdd, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
case OpCode::Id::HMUL2_C:
case OpCode::Id::HMUL2_R:
- return Operation(OperationCode::HMul, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
default:
UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName());
return Immediate(0);
diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp
index 0f4c3103a..a041519b7 100644
--- a/src/video_core/shader/decode/arithmetic_integer.cpp
+++ b/src/video_core/shader/decode/arithmetic_integer.cpp
@@ -35,15 +35,38 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
case OpCode::Id::IADD_C:
case OpCode::Id::IADD_R:
case OpCode::Id::IADD_IMM: {
- UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD saturation not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD.SAT");
+ UNIMPLEMENTED_IF_MSG(instr.iadd.x && instr.generates_cc, "IADD.X Rd.CC");
op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true);
op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true);
- const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b);
+ Node value = Operation(OperationCode::UAdd, op_a, op_b);
- SetInternalFlagsFromInteger(bb, value, instr.generates_cc);
- SetRegister(bb, instr.gpr0, value);
+ if (instr.iadd.x) {
+ Node carry = GetInternalFlag(InternalFlag::Carry);
+ Node x = Operation(OperationCode::Select, std::move(carry), Immediate(1), Immediate(0));
+ value = Operation(OperationCode::UAdd, std::move(value), std::move(x));
+ }
+
+ if (instr.generates_cc) {
+ const Node i0 = Immediate(0);
+
+ Node zero = Operation(OperationCode::LogicalIEqual, value, i0);
+ Node sign = Operation(OperationCode::LogicalILessThan, value, i0);
+ Node carry = Operation(OperationCode::LogicalAddCarry, op_a, op_b);
+
+ Node pos_a = Operation(OperationCode::LogicalIGreaterThan, op_a, i0);
+ Node pos_b = Operation(OperationCode::LogicalIGreaterThan, op_b, i0);
+ Node pos = Operation(OperationCode::LogicalAnd, std::move(pos_a), std::move(pos_b));
+ Node overflow = Operation(OperationCode::LogicalAnd, pos, sign);
+
+ SetInternalFlag(bb, InternalFlag::Zero, std::move(zero));
+ SetInternalFlag(bb, InternalFlag::Sign, std::move(sign));
+ SetInternalFlag(bb, InternalFlag::Carry, std::move(carry));
+ SetInternalFlag(bb, InternalFlag::Overflow, std::move(overflow));
+ }
+ SetRegister(bb, instr.gpr0, std::move(value));
break;
}
case OpCode::Id::IADD3_C:
@@ -249,8 +272,8 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
}
case OpCode::Id::LEA_IMM: {
const bool neg = instr.lea.imm.neg != 0;
- return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
- GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
Immediate(static_cast<u32>(instr.lea.imm.entry_b))};
}
case OpCode::Id::LEA_RZ: {
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index 85ee9aa5e..60b6ad72a 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -485,11 +485,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
const auto offset = static_cast<u32>(image.index.Value());
- const auto it =
- std::find_if(std::begin(used_images), std::end(used_images),
- [offset](const Image& entry) { return entry.GetOffset() == offset; });
+ const auto it = std::find_if(std::begin(used_images), std::end(used_images),
+ [offset](const Image& entry) { return entry.offset == offset; });
if (it != std::end(used_images)) {
- ASSERT(!it->IsBindless() && it->GetType() == it->GetType());
+ ASSERT(!it->is_bindless && it->type == type);
return *it;
}
@@ -505,13 +504,12 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
const auto buffer = std::get<1>(result);
const auto offset = std::get<2>(result);
- const auto it =
- std::find_if(std::begin(used_images), std::end(used_images),
- [buffer = buffer, offset = offset](const Image& entry) {
- return entry.GetBuffer() == buffer && entry.GetOffset() == offset;
- });
+ const auto it = std::find_if(std::begin(used_images), std::end(used_images),
+ [buffer, offset](const Image& entry) {
+ return entry.buffer == buffer && entry.offset == offset;
+ });
if (it != std::end(used_images)) {
- ASSERT(it->IsBindless() && it->GetType() == it->GetType());
+ ASSERT(it->is_bindless && it->type == type);
return *it;
}
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index 8112ead3e..9392f065b 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -479,7 +479,7 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackGlobalMemory(NodeBlock&
bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset)));
const GlobalMemoryBase descriptor{index, offset};
- const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor);
+ const auto& entry = used_global_memory.try_emplace(descriptor).first;
auto& usage = entry->second;
usage.is_written |= is_write;
usage.is_read |= is_read;
diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp
index 8d54cce34..6116c31aa 100644
--- a/src/video_core/shader/decode/register_set_predicate.cpp
+++ b/src/video_core/shader/decode/register_set_predicate.cpp
@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <utility>
+
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/engines/shader_bytecode.h"
@@ -10,20 +12,20 @@
namespace VideoCommon::Shader {
+using std::move;
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
namespace {
-constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7;
-}
+constexpr u64 NUM_CONDITION_CODES = 4;
+constexpr u64 NUM_PREDICATES = 7;
+} // namespace
u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr);
-
- const Node apply_mask = [&] {
+ Node apply_mask = [this, opcode, instr] {
switch (opcode->get().GetId()) {
case OpCode::Id::R2P_IMM:
case OpCode::Id::P2R_IMM:
@@ -34,39 +36,43 @@ u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
}
}();
- const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
+ const u32 offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
+
+ const bool cc = instr.p2r_r2p.mode == Tegra::Shader::R2pMode::Cc;
+ const u64 num_entries = cc ? NUM_CONDITION_CODES : NUM_PREDICATES;
+ const auto get_entry = [this, cc](u64 entry) {
+ return cc ? GetInternalFlag(static_cast<InternalFlag>(entry)) : GetPredicate(entry);
+ };
switch (opcode->get().GetId()) {
case OpCode::Id::R2P_IMM: {
- const Node mask = GetRegister(instr.gpr8);
+ Node mask = GetRegister(instr.gpr8);
- for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) {
- const auto shift = static_cast<u32>(pred);
+ for (u64 entry = 0; entry < num_entries; ++entry) {
+ const u32 shift = static_cast<u32>(entry);
- const Node apply_compare = BitfieldExtract(apply_mask, shift, 1);
- const Node condition =
- Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0));
+ Node apply = BitfieldExtract(apply_mask, shift, 1);
+ Node condition = Operation(OperationCode::LogicalUNotEqual, apply, Immediate(0));
- const Node value_compare = BitfieldExtract(mask, offset + shift, 1);
- const Node value =
- Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0));
+ Node compare = BitfieldExtract(mask, offset + shift, 1);
+ Node value = Operation(OperationCode::LogicalUNotEqual, move(compare), Immediate(0));
- const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value);
- bb.push_back(Conditional(condition, {code}));
+ Node code = Operation(OperationCode::LogicalAssign, get_entry(entry), move(value));
+ bb.push_back(Conditional(condition, {move(code)}));
}
break;
}
case OpCode::Id::P2R_IMM: {
Node value = Immediate(0);
- for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) {
- Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred),
+ for (u64 entry = 0; entry < num_entries; ++entry) {
+ Node bit = Operation(OperationCode::Select, get_entry(entry), Immediate(1U << entry),
Immediate(0));
- value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit));
+ value = Operation(OperationCode::UBitwiseOr, move(value), move(bit));
}
- value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask);
- value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8);
+ value = Operation(OperationCode::UBitwiseAnd, move(value), apply_mask);
+ value = BitfieldInsert(GetRegister(instr.gpr8), move(value), offset, 8);
- SetRegister(bb, instr.gpr0, std::move(value));
+ SetRegister(bb, instr.gpr0, move(value));
break;
}
default:
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 6c4a1358b..8f0bb996e 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -139,15 +139,15 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
}
const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
- const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare};
- const Sampler& sampler = *GetSampler(instr.sampler, info);
+ SamplerInfo info;
+ info.is_shadow = is_depth_compare;
+ const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
- auto coords_copy = coords;
- MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {},
- {}, {}, component, element, {}};
- values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
+ MetaTexture meta{*sampler, {}, depth_compare, aoffi, {}, {},
+ {}, {}, component, element, {}};
+ values[element] = Operation(OperationCode::TextureGather, meta, coords);
}
if (instr.tld4s.fp16_flag) {
@@ -165,19 +165,20 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
"AOFFI is not implemented");
const bool is_array = instr.txd.is_array != 0;
- u64 base_reg = instr.gpr8.Value();
const auto derivate_reg = instr.gpr20.Value();
const auto texture_type = instr.txd.texture_type.Value();
const auto coord_count = GetCoordCount(texture_type);
- Node index_var{};
- const Sampler* sampler =
- is_bindless ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false}})
- : GetSampler(instr.sampler, {{texture_type, is_array, false}});
+ u64 base_reg = instr.gpr8.Value();
+ Node index_var;
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ const std::optional<Sampler> sampler = is_bindless
+ ? GetBindlessSampler(base_reg, info, index_var)
+ : GetSampler(instr.sampler, info);
Node4 values;
- if (sampler == nullptr) {
- for (u32 element = 0; element < values.size(); ++element) {
- values[element] = Immediate(0);
- }
+ if (!sampler) {
+ std::generate(values.begin(), values.end(), [this] { return Immediate(0); });
WriteTexInstructionFloat(bb, instr, values);
break;
}
@@ -215,14 +216,12 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
is_bindless = true;
[[fallthrough]];
case OpCode::Id::TXQ: {
- // TODO: The new commits on the texture refactor, change the way samplers work.
- // Sadly, not all texture instructions specify the type of texture their sampler
- // uses. This must be fixed at a later instance.
- Node index_var{};
- const Sampler* sampler =
- is_bindless ? GetBindlessSampler(instr.gpr8, index_var) : GetSampler(instr.sampler);
-
- if (sampler == nullptr) {
+ Node index_var;
+ const std::optional<Sampler> sampler = is_bindless
+ ? GetBindlessSampler(instr.gpr8, {}, index_var)
+ : GetSampler(instr.sampler, {});
+
+ if (!sampler) {
u32 indexer = 0;
for (u32 element = 0; element < 4; ++element) {
if (!instr.txq.IsComponentEnabled(element)) {
@@ -268,13 +267,17 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
"NDV is not implemented");
- auto texture_type = instr.tmml.texture_type.Value();
+ const auto texture_type = instr.tmml.texture_type.Value();
const bool is_array = instr.tmml.array != 0;
- Node index_var{};
- const Sampler* sampler =
- is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler);
-
- if (sampler == nullptr) {
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ Node index_var;
+ const std::optional<Sampler> sampler =
+ is_bindless ? GetBindlessSampler(instr.gpr20, info, index_var)
+ : GetSampler(instr.sampler, info);
+
+ if (!sampler) {
u32 indexer = 0;
for (u32 element = 0; element < 2; ++element) {
if (!instr.tmml.IsComponentEnabled(element)) {
@@ -301,12 +304,11 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
break;
default:
- UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
+ UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<int>(texture_type));
// Fallback to interpreting as a 2D texture for now
coords.push_back(GetRegister(instr.gpr8.Value() + 0));
coords.push_back(GetRegister(instr.gpr8.Value() + 1));
- texture_type = TextureType::Texture2D;
}
u32 indexer = 0;
for (u32 element = 0; element < 2; ++element) {
@@ -355,98 +357,103 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
return pc;
}
-ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset,
+ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset,
std::optional<u32> buffer) {
- if (sampler_info) {
- return *sampler_info;
+ if (info.IsComplete()) {
+ return info;
}
const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset)
: registry.ObtainBoundSampler(offset);
if (!sampler) {
LOG_WARNING(HW_GPU, "Unknown sampler info");
- return SamplerInfo{TextureType::Texture2D, false, false, false};
- }
- return SamplerInfo{sampler->texture_type, sampler->is_array != 0, sampler->is_shadow != 0,
- sampler->is_buffer != 0};
+ info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D);
+ info.is_array = info.is_array.value_or(false);
+ info.is_shadow = info.is_shadow.value_or(false);
+ info.is_buffer = info.is_buffer.value_or(false);
+ return info;
+ }
+ info.type = info.type.value_or(sampler->texture_type);
+ info.is_array = info.is_array.value_or(sampler->is_array != 0);
+ info.is_shadow = info.is_shadow.value_or(sampler->is_shadow != 0);
+ info.is_buffer = info.is_buffer.value_or(sampler->is_buffer != 0);
+ return info;
}
-const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler,
- std::optional<SamplerInfo> sampler_info) {
+std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
+ SamplerInfo sampler_info) {
const auto offset = static_cast<u32>(sampler.index.Value());
const auto info = GetSamplerInfo(sampler_info, offset);
// If this sampler has already been used, return the existing mapping.
- const auto it =
- std::find_if(used_samplers.begin(), used_samplers.end(),
- [offset](const Sampler& entry) { return entry.GetOffset() == offset; });
+ const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
+ [offset](const Sampler& entry) { return entry.offset == offset; });
if (it != used_samplers.end()) {
- ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array &&
- it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer);
- return &*it;
+ ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
+ it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
+ return *it;
}
// Otherwise create a new mapping for this sampler
const auto next_index = static_cast<u32>(used_samplers.size());
- return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow,
- info.is_buffer, false);
+ return used_samplers.emplace_back(next_index, offset, *info.type, *info.is_array,
+ *info.is_shadow, *info.is_buffer, false);
}
-const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
- std::optional<SamplerInfo> sampler_info) {
+std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
+ Node& index_var) {
const Node sampler_register = GetRegister(reg);
const auto [base_node, tracked_sampler_info] =
TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
ASSERT(base_node != nullptr);
if (base_node == nullptr) {
- return nullptr;
+ return std::nullopt;
}
if (const auto bindless_sampler_info =
std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) {
const u32 buffer = bindless_sampler_info->GetIndex();
const u32 offset = bindless_sampler_info->GetOffset();
- const auto info = GetSamplerInfo(sampler_info, offset, buffer);
+ info = GetSamplerInfo(info, offset, buffer);
// If this sampler has already been used, return the existing mapping.
- const auto it =
- std::find_if(used_samplers.begin(), used_samplers.end(),
- [buffer = buffer, offset = offset](const Sampler& entry) {
- return entry.GetBuffer() == buffer && entry.GetOffset() == offset;
- });
+ const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
+ [buffer = buffer, offset = offset](const Sampler& entry) {
+ return entry.buffer == buffer && entry.offset == offset;
+ });
if (it != used_samplers.end()) {
- ASSERT(it->IsBindless() && it->GetType() == info.type &&
- it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow);
- return &*it;
+ ASSERT(it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
+ it->is_shadow == info.is_shadow);
+ return *it;
}
// Otherwise create a new mapping for this sampler
const auto next_index = static_cast<u32>(used_samplers.size());
- return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array,
- info.is_shadow, info.is_buffer, false);
- } else if (const auto array_sampler_info =
- std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
+ return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array,
+ *info.is_shadow, *info.is_buffer, false);
+ }
+ if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
const u32 base_offset = array_sampler_info->GetBaseOffset() / 4;
index_var = GetCustomVariable(array_sampler_info->GetIndexVar());
- const auto info = GetSamplerInfo(sampler_info, base_offset);
+ info = GetSamplerInfo(info, base_offset);
// If this sampler has already been used, return the existing mapping.
const auto it = std::find_if(
used_samplers.begin(), used_samplers.end(),
- [base_offset](const Sampler& entry) { return entry.GetOffset() == base_offset; });
+ [base_offset](const Sampler& entry) { return entry.offset == base_offset; });
if (it != used_samplers.end()) {
- ASSERT(!it->IsBindless() && it->GetType() == info.type &&
- it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow &&
- it->IsBuffer() == info.is_buffer && it->IsIndexed());
- return &*it;
+ ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
+ it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
+ it->is_indexed);
+ return *it;
}
uses_indexed_samplers = true;
// Otherwise create a new mapping for this sampler
const auto next_index = static_cast<u32>(used_samplers.size());
- return &used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array,
- info.is_shadow, info.is_buffer, true);
+ return used_samplers.emplace_back(next_index, base_offset, *info.type, *info.is_array,
+ *info.is_shadow, *info.is_buffer, true);
}
- return nullptr;
+ return std::nullopt;
}
void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
@@ -531,10 +538,16 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
"Illegal texture type");
- const SamplerInfo info{texture_type, is_array, is_shadow, false};
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ info.is_shadow = is_shadow;
+ info.is_buffer = false;
+
Node index_var;
- const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info)
- : GetSampler(instr.sampler, info);
+ const std::optional<Sampler> sampler = is_bindless
+ ? GetBindlessSampler(*bindless_reg, info, index_var)
+ : GetSampler(instr.sampler, info);
if (!sampler) {
return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
}
@@ -593,8 +606,9 @@ Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
++parameter_register;
}
- const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
- texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
+ const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array,
+ lod_bias_enabled, 4, 5);
+ const auto coord_count = std::get<0>(coord_counts);
// If enabled arrays index is always stored in the gpr8 field
const u64 array_register = instr.gpr8.Value();
// First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
@@ -632,8 +646,10 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
const bool lod_bias_enabled =
(process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
- const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
- texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
+ const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array,
+ lod_bias_enabled, 4, 4);
+ const auto coord_count = std::get<0>(coord_counts);
+
// If enabled arrays index is always stored in the gpr8 field
const u64 array_register = instr.gpr8.Value();
// First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
@@ -682,12 +698,17 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
u64 parameter_register = instr.gpr20.Value();
- const SamplerInfo info{texture_type, is_array, depth_compare, false};
- Node index_var{};
- const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, index_var, info)
- : GetSampler(instr.sampler, info);
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ info.is_shadow = depth_compare;
+
+ Node index_var;
+ const std::optional<Sampler> sampler =
+ is_bindless ? GetBindlessSampler(parameter_register++, info, index_var)
+ : GetSampler(instr.sampler, info);
Node4 values;
- if (sampler == nullptr) {
+ if (!sampler) {
for (u32 element = 0; element < values.size(); ++element) {
values[element] = Immediate(0);
}
@@ -742,12 +763,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
// const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
// const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
- const auto& sampler = *GetSampler(instr.sampler);
+ const std::optional<Sampler> sampler = GetSampler(instr.sampler, {});
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
auto coords_copy = coords;
- MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
+ MetaTexture meta{*sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
}
@@ -755,7 +776,11 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
}
Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
- const Sampler& sampler = *GetSampler(instr.sampler);
+ SamplerInfo info;
+ info.type = texture_type;
+ info.is_array = is_array;
+ info.is_shadow = false;
+ const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
const std::size_t type_coord_count = GetCoordCount(texture_type);
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
@@ -783,7 +808,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
auto coords_copy = coords;
- MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
+ MetaTexture meta{*sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
}
return values;
diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp
new file mode 100644
index 000000000..074f21691
--- /dev/null
+++ b/src/video_core/shader/memory_util.cpp
@@ -0,0 +1,77 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <cstddef>
+
+#include <boost/container_hash/hash.hpp>
+
+#include "common/common_types.h"
+#include "core/core.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/memory_manager.h"
+#include "video_core/shader/memory_util.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+GPUVAddr GetShaderAddress(Core::System& system,
+ Tegra::Engines::Maxwell3D::Regs::ShaderProgram program) {
+ const auto& gpu{system.GPU().Maxwell3D()};
+ const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
+ return gpu.regs.code_address.CodeAddress() + shader_config.offset;
+}
+
+bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
+ // Sched instructions appear once every 4 instructions.
+ constexpr std::size_t SchedPeriod = 4;
+ const std::size_t absolute_offset = offset - main_offset;
+ return (absolute_offset % SchedPeriod) == 0;
+}
+
+std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
+ // This is the encoded version of BRA that jumps to itself. All Nvidia
+ // shaders end with one.
+ static constexpr u64 SELF_JUMPING_BRANCH = 0xE2400FFFFF07000FULL;
+ static constexpr u64 MASK = 0xFFFFFFFFFF7FFFFFULL;
+
+ const std::size_t start_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
+ std::size_t offset = start_offset;
+ while (offset < program.size()) {
+ const u64 instruction = program[offset];
+ if (!IsSchedInstruction(offset, start_offset)) {
+ if ((instruction & MASK) == SELF_JUMPING_BRANCH) {
+ // End on Maxwell's "nop" instruction
+ break;
+ }
+ if (instruction == 0) {
+ break;
+ }
+ }
+ ++offset;
+ }
+ // The last instruction is included in the program size
+ return std::min(offset + 1, program.size());
+}
+
+ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
+ const u8* host_ptr, bool is_compute) {
+ ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
+ ASSERT_OR_EXECUTE(host_ptr != nullptr, { return code; });
+ memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
+ code.resize(CalculateProgramSize(code, is_compute));
+ return code;
+}
+
+u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
+ const ProgramCode& code_b) {
+ u64 unique_identifier = boost::hash_value(code);
+ if (is_a) {
+ // VertexA programs include two programs
+ boost::hash_combine(unique_identifier, boost::hash_value(code_b));
+ }
+ return unique_identifier;
+}
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/memory_util.h b/src/video_core/shader/memory_util.h
new file mode 100644
index 000000000..be90d24fd
--- /dev/null
+++ b/src/video_core/shader/memory_util.h
@@ -0,0 +1,47 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <vector>
+
+#include "common/common_types.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/engines/shader_type.h"
+
+namespace Core {
+class System;
+}
+
+namespace Tegra {
+class MemoryManager;
+}
+
+namespace VideoCommon::Shader {
+
+using ProgramCode = std::vector<u64>;
+
+constexpr u32 STAGE_MAIN_OFFSET = 10;
+constexpr u32 KERNEL_MAIN_OFFSET = 0;
+
+/// Gets the address for the specified shader stage program
+GPUVAddr GetShaderAddress(Core::System& system,
+ Tegra::Engines::Maxwell3D::Regs::ShaderProgram program);
+
+/// Gets if the current instruction offset is a scheduler instruction
+bool IsSchedInstruction(std::size_t offset, std::size_t main_offset);
+
+/// Calculates the size of a program stream
+std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute);
+
+/// Gets the shader program code from memory for the specified address
+ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
+ const u8* host_ptr, bool is_compute);
+
+/// Hashes one (or two) program streams
+u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
+ const ProgramCode& code_b = {});
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 3eee961f5..601c822d2 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -132,6 +132,8 @@ enum class OperationCode {
LogicalUNotEqual, /// (uint a, uint b) -> bool
LogicalUGreaterEqual, /// (uint a, uint b) -> bool
+ LogicalAddCarry, /// (uint a, uint b) -> bool
+
Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
@@ -265,76 +267,30 @@ class ArraySamplerNode;
using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>;
using TrackSampler = std::shared_ptr<TrackSamplerData>;
-class Sampler {
-public:
- /// This constructor is for bound samplers
+struct Sampler {
+ /// Bound samplers constructor
constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
: index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
is_buffer{is_buffer}, is_indexed{is_indexed} {}
- /// This constructor is for bindless samplers
+ /// Bindless samplers constructor
constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
- constexpr u32 GetIndex() const {
- return index;
- }
-
- constexpr u32 GetOffset() const {
- return offset;
- }
-
- constexpr u32 GetBuffer() const {
- return buffer;
- }
-
- constexpr Tegra::Shader::TextureType GetType() const {
- return type;
- }
-
- constexpr bool IsArray() const {
- return is_array;
- }
-
- constexpr bool IsShadow() const {
- return is_shadow;
- }
-
- constexpr bool IsBuffer() const {
- return is_buffer;
- }
-
- constexpr bool IsBindless() const {
- return is_bindless;
- }
-
- constexpr bool IsIndexed() const {
- return is_indexed;
- }
-
- constexpr u32 Size() const {
- return size;
- }
-
- constexpr void SetSize(u32 new_size) {
- size = new_size;
- }
-
-private:
- u32 index{}; ///< Emulated index given for the this sampler.
- u32 offset{}; ///< Offset in the const buffer from where the sampler is being read.
- u32 buffer{}; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
- u32 size{1}; ///< Size of the sampler.
+ u32 index = 0; ///< Emulated index given for the this sampler.
+ u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
+ u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
+ u32 size = 1; ///< Size of the sampler.
Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
- bool is_array{}; ///< Whether the texture is being sampled as an array texture or not.
- bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not.
- bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler.
- bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not.
- bool is_indexed{}; ///< Whether this sampler is an indexed array of textures.
+ bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
+ bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
+ bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
+ bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
+ bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
};
/// Represents a tracked bindless sampler into a direct const buffer
@@ -379,13 +335,13 @@ private:
u32 offset;
};
-class Image final {
+struct Image {
public:
- /// This constructor is for bound images
+ /// Bound images constructor
constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
: index{index}, offset{offset}, type{type} {}
- /// This constructor is for bindless samplers
+ /// Bindless samplers constructor
constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
@@ -403,53 +359,20 @@ public:
is_atomic = true;
}
- constexpr u32 GetIndex() const {
- return index;
- }
-
- constexpr u32 GetOffset() const {
- return offset;
- }
-
- constexpr u32 GetBuffer() const {
- return buffer;
- }
-
- constexpr Tegra::Shader::ImageType GetType() const {
- return type;
- }
-
- constexpr bool IsBindless() const {
- return is_bindless;
- }
-
- constexpr bool IsWritten() const {
- return is_written;
- }
-
- constexpr bool IsRead() const {
- return is_read;
- }
-
- constexpr bool IsAtomic() const {
- return is_atomic;
- }
-
-private:
- u32 index{};
- u32 offset{};
- u32 buffer{};
+ u32 index = 0;
+ u32 offset = 0;
+ u32 buffer = 0;
Tegra::Shader::ImageType type{};
- bool is_bindless{};
- bool is_written{};
- bool is_read{};
- bool is_atomic{};
+ bool is_bindless = false;
+ bool is_written = false;
+ bool is_read = false;
+ bool is_atomic = false;
};
struct GlobalMemoryBase {
- u32 cbuf_index{};
- u32 cbuf_offset{};
+ u32 cbuf_index = 0;
+ u32 cbuf_offset = 0;
bool operator<(const GlobalMemoryBase& rhs) const {
return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
@@ -463,7 +386,7 @@ struct MetaArithmetic {
/// Parameters describing a texture sampler
struct MetaTexture {
- const Sampler& sampler;
+ Sampler sampler;
Node array;
Node depth_compare;
std::vector<Node> aoffi;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index c6e7bdf50..15ae152f2 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -18,6 +18,7 @@
#include "video_core/engines/shader_header.h"
#include "video_core/shader/ast.h"
#include "video_core/shader/compiler_settings.h"
+#include "video_core/shader/memory_util.h"
#include "video_core/shader/node.h"
#include "video_core/shader/registry.h"
@@ -25,16 +26,13 @@ namespace VideoCommon::Shader {
struct ShaderBlock;
-using ProgramCode = std::vector<u64>;
-
constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
-class ConstBuffer {
-public:
- explicit ConstBuffer(u32 max_offset, bool is_indirect)
+struct ConstBuffer {
+ constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
: max_offset{max_offset}, is_indirect{is_indirect} {}
- ConstBuffer() = default;
+ constexpr ConstBuffer() = default;
void MarkAsUsed(u64 offset) {
max_offset = std::max(max_offset, static_cast<u32>(offset));
@@ -57,8 +55,8 @@ public:
}
private:
- u32 max_offset{};
- bool is_indirect{};
+ u32 max_offset = 0;
+ bool is_indirect = false;
};
struct GlobalMemoryUsage {
@@ -192,10 +190,14 @@ private:
friend class ASTDecoder;
struct SamplerInfo {
- Tegra::Shader::TextureType type;
- bool is_array;
- bool is_shadow;
- bool is_buffer;
+ std::optional<Tegra::Shader::TextureType> type;
+ std::optional<bool> is_array;
+ std::optional<bool> is_shadow;
+ std::optional<bool> is_buffer;
+
+ constexpr bool IsComplete() const noexcept {
+ return type && is_array && is_shadow && is_buffer;
+ }
};
void Decode();
@@ -328,16 +330,15 @@ private:
OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation);
/// Queries the missing sampler info from the execution context.
- SamplerInfo GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset,
+ SamplerInfo GetSamplerInfo(SamplerInfo info, u32 offset,
std::optional<u32> buffer = std::nullopt);
- /// Accesses a texture sampler
- const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler,
- std::optional<SamplerInfo> sampler_info = std::nullopt);
+ /// Accesses a texture sampler.
+ std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
/// Accesses a texture sampler for a bindless texture.
- const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
- std::optional<SamplerInfo> sampler_info = std::nullopt);
+ std::optional<Sampler> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
+ Node& index_var);
/// Accesses an image.
Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index 513e9bf49..eb97bfd41 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -153,21 +153,13 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co
if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) {
return {};
}
- s64 current_cursor = cursor;
- while (current_cursor > 0) {
- // Reduce the cursor in one to avoid infinite loops when the instruction sets the same
- // register that it uses as operand
- const auto [source, new_cursor] = TrackRegister(gpr, code, current_cursor - 1);
- current_cursor = new_cursor;
- if (!source) {
- continue;
- }
- const auto [base_address, index, offset] = TrackCbuf(source, code, current_cursor);
- if (base_address != nullptr) {
- return {base_address, index, offset};
- }
+ // Reduce the cursor in one to avoid infinite loops when the instruction sets the same
+ // register that it uses as operand
+ const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1);
+ if (!source) {
+ return {};
}
- return {};
+ return TrackCbuf(source, code, new_cursor);
}
if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) {
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index c5ab21f56..79e10ffbb 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -192,6 +192,22 @@ public:
index = index_;
}
+ void SetMemoryMarked(bool is_memory_marked_) {
+ is_memory_marked = is_memory_marked_;
+ }
+
+ bool IsMemoryMarked() const {
+ return is_memory_marked;
+ }
+
+ void SetSyncPending(bool is_sync_pending_) {
+ is_sync_pending = is_sync_pending_;
+ }
+
+ bool IsSyncPending() const {
+ return is_sync_pending;
+ }
+
void MarkAsPicked(bool is_picked_) {
is_picked = is_picked_;
}
@@ -303,6 +319,8 @@ private:
bool is_target{};
bool is_registered{};
bool is_picked{};
+ bool is_memory_marked{};
+ bool is_sync_pending{};
u32 index{NO_RT};
u64 modification_tick{};
};
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 0de499946..884fabffe 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -81,7 +81,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
params.pixel_format = lookup_table.GetPixelFormat(
tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
params.type = GetFormatType(params.pixel_format);
- if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
+ if (entry.is_shadow && params.type == SurfaceType::ColorTexture) {
switch (params.pixel_format) {
case PixelFormat::R16U:
case PixelFormat::R16F:
@@ -108,7 +108,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
params.emulated_levels = 1;
params.is_layered = false;
} else {
- params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
+ params.target = TextureTypeToSurfaceTarget(entry.type, entry.is_array);
params.width = tic.Width();
params.height = tic.Height();
params.depth = tic.Depth();
@@ -138,7 +138,7 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
params.type = GetFormatType(params.pixel_format);
params.type = GetFormatType(params.pixel_format);
- params.target = ImageTypeToSurfaceTarget(entry.GetType());
+ params.target = ImageTypeToSurfaceTarget(entry.type);
// TODO: on 1DBuffer we should use the tic info.
if (tic.IsBuffer()) {
params.target = SurfaceTarget::TextureBuffer;
@@ -248,12 +248,12 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
const VideoCommon::Shader::Sampler& entry) {
- return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
+ return TextureTypeToSurfaceTarget(entry.type, entry.is_array);
}
VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
const VideoCommon::Shader::Image& entry) {
- return ImageTypeToSurfaceTarget(entry.GetType());
+ return ImageTypeToSurfaceTarget(entry.type);
}
bool SurfaceParams::IsLayered() const {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 69ca08fd1..d6efc34b2 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -6,6 +6,7 @@
#include <algorithm>
#include <array>
+#include <list>
#include <memory>
#include <mutex>
#include <set>
@@ -62,6 +63,30 @@ public:
}
}
+ void OnCPUWrite(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : GetSurfacesInRegion(addr, size)) {
+ if (surface->IsMemoryMarked()) {
+ UnmarkMemory(surface);
+ surface->SetSyncPending(true);
+ marked_for_unregister.emplace_back(surface);
+ }
+ }
+ }
+
+ void SyncGuestHost() {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : marked_for_unregister) {
+ if (surface->IsRegistered()) {
+ surface->SetSyncPending(false);
+ Unregister(surface);
+ }
+ }
+ marked_for_unregister.clear();
+ }
+
/**
* Guarantees that rendertargets don't unregister themselves if the
* collide. Protection is currently only done on 3D slices.
@@ -85,10 +110,20 @@ public:
return a->GetModificationTick() < b->GetModificationTick();
});
for (const auto& surface : surfaces) {
+ mutex.unlock();
FlushSurface(surface);
+ mutex.lock();
}
}
+ bool MustFlushRegion(VAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ const auto surfaces = GetSurfacesInRegion(addr, size);
+ return std::any_of(surfaces.cbegin(), surfaces.cend(),
+ [](const TSurface& surface) { return surface->IsModified(); });
+ }
+
TView GetTextureSurface(const Tegra::Texture::TICEntry& tic,
const VideoCommon::Shader::Sampler& entry) {
std::lock_guard lock{mutex};
@@ -108,7 +143,7 @@ public:
}
const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)};
- const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false);
+ const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
if (guard_samplers) {
sampled_textures.push_back(surface);
}
@@ -128,7 +163,7 @@ public:
return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
}
const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)};
- const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false);
+ const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
if (guard_samplers) {
sampled_textures.push_back(surface);
}
@@ -143,7 +178,7 @@ public:
return any_rt;
}
- TView GetDepthBufferSurface() {
+ TView GetDepthBufferSurface(bool preserve_contents) {
std::lock_guard lock{mutex};
auto& maxwell3d = system.GPU().Maxwell3D();
if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) {
@@ -164,7 +199,7 @@ public:
return {};
}
const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
- auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true);
+ auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true);
if (depth_buffer.target)
depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
depth_buffer.target = surface_view.first;
@@ -174,7 +209,7 @@ public:
return surface_view.second;
}
- TView GetColorBufferSurface(std::size_t index) {
+ TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
std::lock_guard lock{mutex};
ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
auto& maxwell3d = system.GPU().Maxwell3D();
@@ -204,10 +239,17 @@ public:
return {};
}
- auto surface_view = GetSurface(gpu_addr, *cpu_addr,
- SurfaceParams::CreateForFramebuffer(system, index), true);
- if (render_targets[index].target)
- render_targets[index].target->MarkAsRenderTarget(false, NO_RT);
+ auto surface_view =
+ GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
+ preserve_contents, true);
+ if (render_targets[index].target) {
+ auto& surface = render_targets[index].target;
+ surface->MarkAsRenderTarget(false, NO_RT);
+ const auto& cr_params = surface->GetSurfaceParams();
+ if (!cr_params.is_tiled && Settings::values.use_asynchronous_gpu_emulation) {
+ AsyncFlushSurface(surface);
+ }
+ }
render_targets[index].target = surface_view.first;
render_targets[index].view = surface_view.second;
if (render_targets[index].target)
@@ -259,9 +301,9 @@ public:
const std::optional<VAddr> src_cpu_addr =
system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr);
std::pair<TSurface, TView> dst_surface =
- GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false);
+ GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
std::pair<TSurface, TView> src_surface =
- GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false);
+ GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
ImageBlit(src_surface.second, dst_surface.second, copy_config);
dst_surface.first->MarkAsModified(true, Tick());
}
@@ -284,6 +326,34 @@ public:
return ++ticks;
}
+ void CommitAsyncFlushes() {
+ committed_flushes.push_back(uncommitted_flushes);
+ uncommitted_flushes.reset();
+ }
+
+ bool HasUncommittedFlushes() const {
+ return uncommitted_flushes != nullptr;
+ }
+
+ bool ShouldWaitAsyncFlushes() const {
+ return !committed_flushes.empty() && committed_flushes.front() != nullptr;
+ }
+
+ void PopAsyncFlushes() {
+ if (committed_flushes.empty()) {
+ return;
+ }
+ auto& flush_list = committed_flushes.front();
+ if (!flush_list) {
+ committed_flushes.pop_front();
+ return;
+ }
+ for (TSurface& surface : *flush_list) {
+ FlushSurface(surface);
+ }
+ committed_flushes.pop_front();
+ }
+
protected:
explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
bool is_astc_supported)
@@ -345,9 +415,20 @@ protected:
surface->SetCpuAddr(*cpu_addr);
RegisterInnerCache(surface);
surface->MarkAsRegistered(true);
+ surface->SetMemoryMarked(true);
rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
}
+ void UnmarkMemory(TSurface surface) {
+ if (!surface->IsMemoryMarked()) {
+ return;
+ }
+ const std::size_t size = surface->GetSizeInBytes();
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ surface->SetMemoryMarked(false);
+ }
+
void Unregister(TSurface surface) {
if (guard_render_targets && surface->IsProtected()) {
return;
@@ -355,9 +436,11 @@ protected:
if (!guard_render_targets && surface->IsRenderTarget()) {
ManageRenderTargetUnregister(surface);
}
- const std::size_t size = surface->GetSizeInBytes();
- const VAddr cpu_addr = surface->GetCpuAddr();
- rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ UnmarkMemory(surface);
+ if (surface->IsSyncPending()) {
+ marked_for_unregister.remove(surface);
+ surface->SetSyncPending(false);
+ }
UnregisterInnerCache(surface);
surface->MarkAsRegistered(false);
ReserveSurface(surface->GetSurfaceParams(), surface);
@@ -417,7 +500,7 @@ private:
**/
RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params,
const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
return RecycleStrategy::Flush;
}
// 3D Textures decision
@@ -450,18 +533,22 @@ private:
* @param overlaps The overlapping surfaces registered in the cache.
* @param params The parameters for the new surface.
* @param gpu_addr The starting address of the new surface.
+ * @param preserve_contents Indicates that the new surface should be loaded from memory or left
+ * blank.
* @param untopological Indicates to the recycler that the texture has no way to match the
* overlaps due to topological reasons.
**/
std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
const SurfaceParams& params, const GPUVAddr gpu_addr,
+ const bool preserve_contents,
const MatchTopologyResult untopological) {
+ const bool do_load = preserve_contents && Settings::IsGPULevelExtreme();
for (auto& surface : overlaps) {
Unregister(surface);
}
switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
case RecycleStrategy::Ignore: {
- return InitializeSurface(gpu_addr, params, Settings::values.use_accurate_gpu_emulation);
+ return InitializeSurface(gpu_addr, params, do_load);
}
case RecycleStrategy::Flush: {
std::sort(overlaps.begin(), overlaps.end(),
@@ -471,7 +558,7 @@ private:
for (auto& surface : overlaps) {
FlushSurface(surface);
}
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
case RecycleStrategy::BufferCopy: {
auto new_surface = GetUncachedSurface(gpu_addr, params);
@@ -480,7 +567,7 @@ private:
}
default: {
UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, do_load);
}
}
}
@@ -509,7 +596,7 @@ private:
}
const auto& final_params = new_surface->GetSurfaceParams();
if (cr_params.type != final_params.type) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
BufferCopy(current_surface, new_surface);
}
} else {
@@ -598,7 +685,7 @@ private:
if (passed_tests == 0) {
return {};
// In Accurate GPU all tests should pass, else we recycle
- } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
+ } else if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) {
return {};
}
for (const auto& surface : overlaps) {
@@ -618,11 +705,14 @@ private:
* @param params The parameters on the new surface.
* @param gpu_addr The starting address of the new surface.
* @param cpu_addr The starting address of the new surface on physical memory.
+ * @param preserve_contents Indicates that the new surface should be loaded from memory or
+ * left blank.
*/
std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
const SurfaceParams& params,
const GPUVAddr gpu_addr,
- const VAddr cpu_addr) {
+ const VAddr cpu_addr,
+ bool preserve_contents) {
if (params.target == SurfaceTarget::Texture3D) {
bool failed = false;
if (params.num_levels > 1) {
@@ -668,11 +758,11 @@ private:
for (const auto& surface : overlaps) {
if (!surface->MatchTarget(params.target)) {
if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) {
- if (Settings::values.use_accurate_gpu_emulation) {
+ if (Settings::IsGPULevelExtreme()) {
return std::nullopt;
}
Unregister(surface);
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
return std::nullopt;
}
@@ -683,7 +773,7 @@ private:
return {{surface, surface->GetMainView()}};
}
}
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
}
@@ -706,10 +796,13 @@ private:
*
* @param gpu_addr The starting address of the candidate surface.
* @param params The parameters on the candidate surface.
+ * @param preserve_contents Indicates that the new surface should be loaded from memory or
+ * left blank.
* @param is_render Whether or not the surface is a render target.
**/
std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr,
- const SurfaceParams& params, bool is_render) {
+ const SurfaceParams& params, bool preserve_contents,
+ bool is_render) {
// Step 1
// Check Level 1 Cache for a fast structural match. If candidate surface
// matches at certain level we are pretty much done.
@@ -718,7 +811,8 @@ private:
const auto topological_result = current_surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
std::vector<TSurface> overlaps{current_surface};
- return RecycleSurface(overlaps, params, gpu_addr, topological_result);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
}
const auto struct_result = current_surface->MatchesStructure(params);
@@ -743,7 +837,7 @@ private:
// If none are found, we are done. we just load the surface and create it.
if (overlaps.empty()) {
- return InitializeSurface(gpu_addr, params);
+ return InitializeSurface(gpu_addr, params, preserve_contents);
}
// Step 3
@@ -753,13 +847,15 @@ private:
for (const auto& surface : overlaps) {
const auto topological_result = surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
- return RecycleSurface(overlaps, params, gpu_addr, topological_result);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
}
}
// Check if it's a 3D texture
if (params.block_depth > 0) {
- auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr);
+ auto surface =
+ Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
if (surface) {
return *surface;
}
@@ -779,7 +875,8 @@ private:
return *view;
}
}
- return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
}
// Now we check if the candidate is a mipmap/layer of the overlap
std::optional<TView> view =
@@ -803,7 +900,7 @@ private:
pair.first->EmplaceView(params, gpu_addr, candidate_size);
if (mirage_view)
return {pair.first, *mirage_view};
- return RecycleSurface(overlaps, params, gpu_addr,
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
MatchTopologyResult::FullMatch);
}
return {current_surface, *view};
@@ -819,7 +916,8 @@ private:
}
}
// We failed all the tests, recycle the overlaps into a new texture.
- return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch);
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
}
/**
@@ -977,10 +1075,10 @@ private:
}
std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
- bool do_load = true) {
+ bool preserve_contents) {
auto new_surface{GetUncachedSurface(gpu_addr, params)};
Register(new_surface);
- if (do_load) {
+ if (preserve_contents) {
LoadSurface(new_surface);
}
return {new_surface, new_surface->GetMainView()};
@@ -1074,7 +1172,7 @@ private:
/// Returns true the shader sampler entry is compatible with the TIC texture type.
static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type,
const VideoCommon::Shader::Sampler& entry) {
- const auto shader_type = entry.GetType();
+ const auto shader_type = entry.type;
switch (tic_type) {
case Tegra::Texture::TextureType::Texture1D:
case Tegra::Texture::TextureType::Texture1DArray:
@@ -1095,7 +1193,7 @@ private:
if (shader_type == Tegra::Shader::TextureType::TextureCube) {
return true;
}
- return shader_type == Tegra::Shader::TextureType::Texture2D && entry.IsArray();
+ return shader_type == Tegra::Shader::TextureType::Texture2D && entry.is_array;
}
UNREACHABLE();
return true;
@@ -1106,6 +1204,13 @@ private:
TView view;
};
+ void AsyncFlushSurface(TSurface& surface) {
+ if (!uncommitted_flushes) {
+ uncommitted_flushes = std::make_shared<std::list<TSurface>>();
+ }
+ uncommitted_flushes->push_back(surface);
+ }
+
VideoCore::RasterizerInterface& rasterizer;
FormatLookupTable format_lookup_table;
@@ -1150,6 +1255,11 @@ private:
std::unordered_map<u32, TSurface> invalid_cache;
std::vector<u8> invalid_memory;
+ std::list<TSurface> marked_for_unregister;
+
+ std::shared_ptr<std::list<TSurface>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::list<TSurface>>> committed_flushes;
+
StagingCache staging_cache;
std::recursive_mutex mutex;
};
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 7df5f1452..fae8638ec 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -11,6 +11,7 @@
#include "video_core/textures/texture.h"
namespace Tegra::Texture {
+namespace {
/**
* This table represents the internal swizzle of a gob,
@@ -174,6 +175,8 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
}
}
+} // Anonymous namespace
+
void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
u32 out_bytes_per_pixel, u8* const swizzled_data, u8* const unswizzled_data,
bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index e5eac3f3b..9f2d6d308 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -56,8 +56,7 @@ void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32
u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height,
u32 offset_x, u32 offset_y);
-void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
- const u32 block_height, const std::size_t copy_size, const u8* source_data,
- u8* swizzle_data);
+void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
+ std::size_t copy_size, const u8* source_data, u8* swizzle_data);
} // namespace Tegra::Texture
diff --git a/src/yuzu/applets/profile_select.cpp b/src/yuzu/applets/profile_select.cpp
index 6aff38735..4bc8ee726 100644
--- a/src/yuzu/applets/profile_select.cpp
+++ b/src/yuzu/applets/profile_select.cpp
@@ -17,6 +17,7 @@
#include "yuzu/applets/profile_select.h"
#include "yuzu/main.h"
+namespace {
QString FormatUserEntryText(const QString& username, Common::UUID uuid) {
return QtProfileSelectionDialog::tr(
"%1\n%2", "%1 is the profile username, %2 is the formatted UUID (e.g. "
@@ -41,6 +42,7 @@ QPixmap GetIcon(Common::UUID uuid) {
return icon.scaled(64, 64, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
}
+} // Anonymous namespace
QtProfileSelectionDialog::QtProfileSelectionDialog(QWidget* parent)
: QDialog(parent), profile_manager(std::make_unique<Service::Account::ProfileManager>()) {
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 946aa287a..75c6cf20b 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -12,7 +12,6 @@
#include "input_common/main.h"
#include "input_common/udp/client.h"
#include "yuzu/configuration/config.h"
-#include "yuzu/uisettings.h"
Config::Config() {
// TODO: Don't hardcode the path; let the frontend decide where to put the config files.
@@ -212,12 +211,13 @@ const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> Config::default
// This must be in alphabetical order according to action name as it must have the same order as
// UISetting::values.shortcuts, which is alphabetically ordered.
// clang-format off
-const std::array<UISettings::Shortcut, 15> default_hotkeys{{
+const std::array<UISettings::Shortcut, 15> Config::default_hotkeys{{
{QStringLiteral("Capture Screenshot"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+P"), Qt::ApplicationShortcut}},
+ {QStringLiteral("Change Docked Mode"), QStringLiteral("Main Window"), {QStringLiteral("F10"), Qt::ApplicationShortcut}},
{QStringLiteral("Continue/Pause Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F4"), Qt::WindowShortcut}},
{QStringLiteral("Decrease Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("-"), Qt::ApplicationShortcut}},
- {QStringLiteral("Exit yuzu"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Q"), Qt::WindowShortcut}},
{QStringLiteral("Exit Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("Esc"), Qt::WindowShortcut}},
+ {QStringLiteral("Exit yuzu"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Q"), Qt::WindowShortcut}},
{QStringLiteral("Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("F11"), Qt::WindowShortcut}},
{QStringLiteral("Increase Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("+"), Qt::ApplicationShortcut}},
{QStringLiteral("Load Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), Qt::ApplicationShortcut}},
@@ -227,7 +227,6 @@ const std::array<UISettings::Shortcut, 15> default_hotkeys{{
{QStringLiteral("Toggle Filter Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F"), Qt::WindowShortcut}},
{QStringLiteral("Toggle Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Z"), Qt::ApplicationShortcut}},
{QStringLiteral("Toggle Status Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+S"), Qt::WindowShortcut}},
- {QStringLiteral("Change Docked Mode"), QStringLiteral("Main Window"), {QStringLiteral("F10"), Qt::ApplicationShortcut}},
}};
// clang-format on
@@ -532,6 +531,8 @@ void Config::ReadDebuggingValues() {
Settings::values.reporting_services =
ReadSetting(QStringLiteral("reporting_services"), false).toBool();
Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool();
+ Settings::values.disable_cpu_opt =
+ ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool();
qt_config->endGroup();
}
@@ -637,11 +638,13 @@ void Config::ReadRendererValues() {
Settings::values.frame_limit = ReadSetting(QStringLiteral("frame_limit"), 100).toInt();
Settings::values.use_disk_shader_cache =
ReadSetting(QStringLiteral("use_disk_shader_cache"), true).toBool();
- Settings::values.use_accurate_gpu_emulation =
- ReadSetting(QStringLiteral("use_accurate_gpu_emulation"), false).toBool();
+ const int gpu_accuracy_level = ReadSetting(QStringLiteral("gpu_accuracy"), 0).toInt();
+ Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level);
Settings::values.use_asynchronous_gpu_emulation =
ReadSetting(QStringLiteral("use_asynchronous_gpu_emulation"), false).toBool();
Settings::values.use_vsync = ReadSetting(QStringLiteral("use_vsync"), true).toBool();
+ Settings::values.use_fast_gpu_time =
+ ReadSetting(QStringLiteral("use_fast_gpu_time"), true).toBool();
Settings::values.force_30fps_mode =
ReadSetting(QStringLiteral("force_30fps_mode"), false).toBool();
@@ -1003,6 +1006,7 @@ void Config::SaveDebuggingValues() {
WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false);
WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false);
WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false);
+ WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false);
qt_config->endGroup();
}
@@ -1079,11 +1083,12 @@ void Config::SaveRendererValues() {
WriteSetting(QStringLiteral("frame_limit"), Settings::values.frame_limit, 100);
WriteSetting(QStringLiteral("use_disk_shader_cache"), Settings::values.use_disk_shader_cache,
true);
- WriteSetting(QStringLiteral("use_accurate_gpu_emulation"),
- Settings::values.use_accurate_gpu_emulation, false);
+ WriteSetting(QStringLiteral("gpu_accuracy"), static_cast<int>(Settings::values.gpu_accuracy),
+ 0);
WriteSetting(QStringLiteral("use_asynchronous_gpu_emulation"),
Settings::values.use_asynchronous_gpu_emulation, false);
WriteSetting(QStringLiteral("use_vsync"), Settings::values.use_vsync, true);
+ WriteSetting(QStringLiteral("use_fast_gpu_time"), Settings::values.use_fast_gpu_time, true);
WriteSetting(QStringLiteral("force_30fps_mode"), Settings::values.force_30fps_mode, false);
// Cast to double because Qt's written float values are not human-readable
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h
index ba6888004..5cd2a5feb 100644
--- a/src/yuzu/configuration/config.h
+++ b/src/yuzu/configuration/config.h
@@ -9,6 +9,7 @@
#include <string>
#include <QVariant>
#include "core/settings.h"
+#include "yuzu/uisettings.h"
class QSettings;
@@ -26,6 +27,7 @@ public:
default_mouse_buttons;
static const std::array<int, Settings::NativeKeyboard::NumKeyboardKeys> default_keyboard_keys;
static const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> default_keyboard_mods;
+ static const std::array<UISettings::Shortcut, 15> default_hotkeys;
private:
void ReadValues();
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index 9631059c7..c2026763e 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -36,6 +36,7 @@ void ConfigureDebug::SetConfiguration() {
ui->homebrew_args_edit->setText(QString::fromStdString(Settings::values.program_args));
ui->reporting_services->setChecked(Settings::values.reporting_services);
ui->quest_flag->setChecked(Settings::values.quest_flag);
+ ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt);
ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn());
ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug);
}
@@ -48,6 +49,7 @@ void ConfigureDebug::ApplyConfiguration() {
Settings::values.program_args = ui->homebrew_args_edit->text().toStdString();
Settings::values.reporting_services = ui->reporting_services->isChecked();
Settings::values.quest_flag = ui->quest_flag->isChecked();
+ Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked();
Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked();
Debugger::ToggleConsole();
Log::Filter filter;
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index e028c4c80..e0d4c4a44 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -215,6 +215,13 @@
</property>
</widget>
</item>
+ <item>
+ <widget class="QCheckBox" name="disable_cpu_opt">
+ <property name="text">
+ <string>Disable CPU JIT optimizations</string>
+ </property>
+ </widget>
+ </item>
</layout>
</widget>
</item>
diff --git a/src/yuzu/configuration/configure_filesystem.cpp b/src/yuzu/configuration/configure_filesystem.cpp
index 29f540eb7..835ee821c 100644
--- a/src/yuzu/configuration/configure_filesystem.cpp
+++ b/src/yuzu/configuration/configure_filesystem.cpp
@@ -138,7 +138,7 @@ void ConfigureFilesystem::SetDirectory(DirectoryTarget target, QLineEdit* edit)
str = QFileDialog::getOpenFileName(this, caption, QFileInfo(edit->text()).dir().path(),
QStringLiteral("NX Gamecard;*.xci"));
} else {
- str = QFileDialog::getExistingDirectory(this, caption, edit->text());
+ str = QFileDialog::getExistingDirectory(this, caption, edit->text()) + QDir::separator();
}
if (str.isEmpty())
diff --git a/src/yuzu/configuration/configure_graphics_advanced.cpp b/src/yuzu/configuration/configure_graphics_advanced.cpp
index b9f429f84..5bb2ae555 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.cpp
+++ b/src/yuzu/configuration/configure_graphics_advanced.cpp
@@ -19,9 +19,10 @@ ConfigureGraphicsAdvanced::~ConfigureGraphicsAdvanced() = default;
void ConfigureGraphicsAdvanced::SetConfiguration() {
const bool runtime_lock = !Core::System::GetInstance().IsPoweredOn();
- ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation);
+ ui->gpu_accuracy->setCurrentIndex(static_cast<int>(Settings::values.gpu_accuracy));
ui->use_vsync->setEnabled(runtime_lock);
ui->use_vsync->setChecked(Settings::values.use_vsync);
+ ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time);
ui->force_30fps_mode->setEnabled(runtime_lock);
ui->force_30fps_mode->setChecked(Settings::values.force_30fps_mode);
ui->anisotropic_filtering_combobox->setEnabled(runtime_lock);
@@ -29,8 +30,10 @@ void ConfigureGraphicsAdvanced::SetConfiguration() {
}
void ConfigureGraphicsAdvanced::ApplyConfiguration() {
- Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked();
+ auto gpu_accuracy = static_cast<Settings::GPUAccuracy>(ui->gpu_accuracy->currentIndex());
+ Settings::values.gpu_accuracy = gpu_accuracy;
Settings::values.use_vsync = ui->use_vsync->isChecked();
+ Settings::values.use_fast_gpu_time = ui->use_fast_gpu_time->isChecked();
Settings::values.force_30fps_mode = ui->force_30fps_mode->isChecked();
Settings::values.max_anisotropy = ui->anisotropic_filtering_combobox->currentIndex();
}
diff --git a/src/yuzu/configuration/configure_graphics_advanced.ui b/src/yuzu/configuration/configure_graphics_advanced.ui
index 42eec278e..770b80c50 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.ui
+++ b/src/yuzu/configuration/configure_graphics_advanced.ui
@@ -23,11 +23,34 @@
</property>
<layout class="QVBoxLayout" name="verticalLayout_3">
<item>
- <widget class="QCheckBox" name="use_accurate_gpu_emulation">
- <property name="text">
- <string>Use accurate GPU emulation (slow)</string>
- </property>
- </widget>
+ <layout class="QHBoxLayout" name="horizontalLayout_2">
+ <item>
+ <widget class="QLabel" name="label_gpu_accuracy">
+ <property name="text">
+ <string>Accuracy Level:</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QComboBox" name="gpu_accuracy">
+ <item>
+ <property name="text">
+ <string notr="true">Normal</string>
+ </property>
+ </item>
+ <item>
+ <property name="text">
+ <string notr="true">High</string>
+ </property>
+ </item>
+ <item>
+ <property name="text">
+ <string notr="true">Extreme(very slow)</string>
+ </property>
+ </item>
+ </widget>
+ </item>
+ </layout>
</item>
<item>
<widget class="QCheckBox" name="use_vsync">
@@ -47,6 +70,13 @@
</widget>
</item>
<item>
+ <widget class="QCheckBox" name="use_fast_gpu_time">
+ <property name="text">
+ <string>Use Fast GPU Time</string>
+ </property>
+ </widget>
+ </item>
+ <item>
<layout class="QHBoxLayout" name="horizontalLayout_1">
<item>
<widget class="QLabel" name="af_label">
diff --git a/src/yuzu/configuration/configure_hotkeys.cpp b/src/yuzu/configuration/configure_hotkeys.cpp
index fa9052136..6f7fd4414 100644
--- a/src/yuzu/configuration/configure_hotkeys.cpp
+++ b/src/yuzu/configuration/configure_hotkeys.cpp
@@ -2,10 +2,12 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <QMenu>
#include <QMessageBox>
#include <QStandardItemModel>
#include "core/settings.h"
#include "ui_configure_hotkeys.h"
+#include "yuzu/configuration/config.h"
#include "yuzu/configuration/configure_hotkeys.h"
#include "yuzu/hotkeys.h"
#include "yuzu/util/sequence_dialog/sequence_dialog.h"
@@ -19,6 +21,9 @@ ConfigureHotkeys::ConfigureHotkeys(QWidget* parent)
model->setColumnCount(3);
connect(ui->hotkey_list, &QTreeView::doubleClicked, this, &ConfigureHotkeys::Configure);
+ connect(ui->hotkey_list, &QTreeView::customContextMenuRequested, this,
+ &ConfigureHotkeys::PopupContextMenu);
+ ui->hotkey_list->setContextMenuPolicy(Qt::CustomContextMenu);
ui->hotkey_list->setModel(model);
// TODO(Kloen): Make context configurable as well (hiding the column for now)
@@ -27,6 +32,10 @@ ConfigureHotkeys::ConfigureHotkeys(QWidget* parent)
ui->hotkey_list->setColumnWidth(0, 200);
ui->hotkey_list->resizeColumnToContents(1);
+ connect(ui->button_restore_defaults, &QPushButton::clicked, this,
+ &ConfigureHotkeys::RestoreDefaults);
+ connect(ui->button_clear_all, &QPushButton::clicked, this, &ConfigureHotkeys::ClearAll);
+
RetranslateUI();
}
@@ -71,7 +80,6 @@ void ConfigureHotkeys::Configure(QModelIndex index) {
}
index = index.sibling(index.row(), 1);
- auto* const model = ui->hotkey_list->model();
const auto previous_key = model->data(index);
SequenceDialog hotkey_dialog{this};
@@ -81,31 +89,33 @@ void ConfigureHotkeys::Configure(QModelIndex index) {
if (return_code == QDialog::Rejected || key_sequence.isEmpty()) {
return;
}
+ const auto [key_sequence_used, used_action] = IsUsedKey(key_sequence);
- if (IsUsedKey(key_sequence) && key_sequence != QKeySequence(previous_key.toString())) {
- QMessageBox::warning(this, tr("Conflicting Key Sequence"),
- tr("The entered key sequence is already assigned to another hotkey."));
+ if (key_sequence_used && key_sequence != QKeySequence(previous_key.toString())) {
+ QMessageBox::warning(
+ this, tr("Conflicting Key Sequence"),
+ tr("The entered key sequence is already assigned to: %1").arg(used_action));
} else {
model->setData(index, key_sequence.toString(QKeySequence::NativeText));
}
}
-bool ConfigureHotkeys::IsUsedKey(QKeySequence key_sequence) const {
- for (int r = 0; r < model->rowCount(); r++) {
+std::pair<bool, QString> ConfigureHotkeys::IsUsedKey(QKeySequence key_sequence) const {
+ for (int r = 0; r < model->rowCount(); ++r) {
const QStandardItem* const parent = model->item(r, 0);
- for (int r2 = 0; r2 < parent->rowCount(); r2++) {
+ for (int r2 = 0; r2 < parent->rowCount(); ++r2) {
const QStandardItem* const key_seq_item = parent->child(r2, 1);
const auto key_seq_str = key_seq_item->text();
const auto key_seq = QKeySequence::fromString(key_seq_str, QKeySequence::NativeText);
if (key_sequence == key_seq) {
- return true;
+ return std::make_pair(true, parent->child(r2, 0)->text());
}
}
}
- return false;
+ return std::make_pair(false, QString());
}
void ConfigureHotkeys::ApplyConfiguration(HotkeyRegistry& registry) {
@@ -128,3 +138,55 @@ void ConfigureHotkeys::ApplyConfiguration(HotkeyRegistry& registry) {
registry.SaveHotkeys();
}
+
+void ConfigureHotkeys::RestoreDefaults() {
+ for (int r = 0; r < model->rowCount(); ++r) {
+ const QStandardItem* parent = model->item(r, 0);
+
+ for (int r2 = 0; r2 < parent->rowCount(); ++r2) {
+ model->item(r, 0)->child(r2, 1)->setText(Config::default_hotkeys[r2].shortcut.first);
+ }
+ }
+}
+
+void ConfigureHotkeys::ClearAll() {
+ for (int r = 0; r < model->rowCount(); ++r) {
+ const QStandardItem* parent = model->item(r, 0);
+
+ for (int r2 = 0; r2 < parent->rowCount(); ++r2) {
+ model->item(r, 0)->child(r2, 1)->setText(tr(""));
+ }
+ }
+}
+
+void ConfigureHotkeys::PopupContextMenu(const QPoint& menu_location) {
+ QModelIndex index = ui->hotkey_list->indexAt(menu_location);
+ if (!index.parent().isValid()) {
+ return;
+ }
+
+ const auto selected = index.sibling(index.row(), 1);
+ QMenu context_menu;
+
+ QAction* restore_default = context_menu.addAction(tr("Restore Default"));
+ QAction* clear = context_menu.addAction(tr("Clear"));
+
+ connect(restore_default, &QAction::triggered, [this, selected] {
+ const QKeySequence& default_key_sequence = QKeySequence::fromString(
+ Config::default_hotkeys[selected.row()].shortcut.first, QKeySequence::NativeText);
+ const auto [key_sequence_used, used_action] = IsUsedKey(default_key_sequence);
+
+ if (key_sequence_used &&
+ default_key_sequence != QKeySequence(model->data(selected).toString())) {
+
+ QMessageBox::warning(
+ this, tr("Conflicting Key Sequence"),
+ tr("The default key sequence is already assigned to: %1").arg(used_action));
+ } else {
+ model->setData(selected, default_key_sequence.toString(QKeySequence::NativeText));
+ }
+ });
+ connect(clear, &QAction::triggered, [this, selected] { model->setData(selected, tr("")); });
+
+ context_menu.exec(ui->hotkey_list->viewport()->mapToGlobal(menu_location));
+}
diff --git a/src/yuzu/configuration/configure_hotkeys.h b/src/yuzu/configuration/configure_hotkeys.h
index 8f8c6173b..a2ec3323e 100644
--- a/src/yuzu/configuration/configure_hotkeys.h
+++ b/src/yuzu/configuration/configure_hotkeys.h
@@ -35,7 +35,11 @@ private:
void RetranslateUI();
void Configure(QModelIndex index);
- bool IsUsedKey(QKeySequence key_sequence) const;
+ std::pair<bool, QString> IsUsedKey(QKeySequence key_sequence) const;
+
+ void RestoreDefaults();
+ void ClearAll();
+ void PopupContextMenu(const QPoint& menu_location);
std::unique_ptr<Ui::ConfigureHotkeys> ui;
diff --git a/src/yuzu/configuration/configure_hotkeys.ui b/src/yuzu/configuration/configure_hotkeys.ui
index 0d0b70f38..6d9f861e3 100644
--- a/src/yuzu/configuration/configure_hotkeys.ui
+++ b/src/yuzu/configuration/configure_hotkeys.ui
@@ -6,8 +6,8 @@
<rect>
<x>0</x>
<y>0</y>
- <width>363</width>
- <height>388</height>
+ <width>439</width>
+ <height>510</height>
</rect>
</property>
<property name="windowTitle">
@@ -15,7 +15,7 @@
</property>
<layout class="QVBoxLayout" name="verticalLayout">
<item>
- <layout class="QVBoxLayout" name="verticalLayout_2">
+ <layout class="QHBoxLayout" name="horizontalLayout">
<item>
<widget class="QLabel" name="label_2">
<property name="text">
@@ -24,6 +24,37 @@
</widget>
</item>
<item>
+ <spacer name="horizontalSpacer">
+ <property name="orientation">
+ <enum>Qt::Horizontal</enum>
+ </property>
+ <property name="sizeHint" stdset="0">
+ <size>
+ <width>40</width>
+ <height>20</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ <item>
+ <widget class="QPushButton" name="button_clear_all">
+ <property name="text">
+ <string>Clear All</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QPushButton" name="button_restore_defaults">
+ <property name="text">
+ <string>Restore Defaults</string>
+ </property>
+ </widget>
+ </item>
+ </layout>
+ </item>
+ <item>
+ <layout class="QVBoxLayout" name="verticalLayout_2">
+ <item>
<widget class="QTreeView" name="hotkey_list">
<property name="editTriggers">
<set>QAbstractItemView::NoEditTriggers</set>
@@ -39,4 +70,4 @@
</widget>
<resources/>
<connections/>
-</ui> \ No newline at end of file
+</ui>
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index 15ac30f12..e4eb5594b 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -56,7 +56,6 @@ static void SetAnalogButton(const Common::ParamPackage& input_param,
if (analog_param.Get("engine", "") != "analog_from_button") {
analog_param = {
{"engine", "analog_from_button"},
- {"modifier_scale", "0.5"},
};
}
analog_param.Set(button_name, input_param.Serialize());
@@ -236,8 +235,10 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
widget->setVisible(false);
analog_map_stick = {ui->buttonLStickAnalog, ui->buttonRStickAnalog};
- analog_map_deadzone = {ui->sliderLStickDeadzone, ui->sliderRStickDeadzone};
- analog_map_deadzone_label = {ui->labelLStickDeadzone, ui->labelRStickDeadzone};
+ analog_map_deadzone_and_modifier_slider = {ui->sliderLStickDeadzoneAndModifier,
+ ui->sliderRStickDeadzoneAndModifier};
+ analog_map_deadzone_and_modifier_slider_label = {ui->labelLStickDeadzoneAndModifier,
+ ui->labelRStickDeadzoneAndModifier};
for (int button_id = 0; button_id < Settings::NativeButton::NumButtons; button_id++) {
auto* const button = button_map[button_id];
@@ -328,10 +329,18 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
InputCommon::Polling::DeviceType::Analog);
}
});
- connect(analog_map_deadzone[analog_id], &QSlider::valueChanged, [=] {
- const float deadzone = analog_map_deadzone[analog_id]->value() / 100.0f;
- analog_map_deadzone_label[analog_id]->setText(tr("Deadzone: %1").arg(deadzone));
- analogs_param[analog_id].Set("deadzone", deadzone);
+
+ connect(analog_map_deadzone_and_modifier_slider[analog_id], &QSlider::valueChanged, [=] {
+ const float slider_value = analog_map_deadzone_and_modifier_slider[analog_id]->value();
+ if (analogs_param[analog_id].Get("engine", "") == "sdl") {
+ analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
+ tr("Deadzone: %1%").arg(slider_value));
+ analogs_param[analog_id].Set("deadzone", slider_value / 100.0f);
+ } else {
+ analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
+ tr("Modifier Scale: %1%").arg(slider_value));
+ analogs_param[analog_id].Set("modifier_scale", slider_value / 100.0f);
+ }
});
}
@@ -517,20 +526,31 @@ void ConfigureInputPlayer::UpdateButtonLabels() {
analog_map_stick[analog_id]->setText(tr("Set Analog Stick"));
auto& param = analogs_param[analog_id];
- auto* const analog_deadzone_slider = analog_map_deadzone[analog_id];
- auto* const analog_deadzone_label = analog_map_deadzone_label[analog_id];
-
- if (param.Has("engine") && param.Get("engine", "") == "sdl") {
- if (!param.Has("deadzone")) {
- param.Set("deadzone", 0.1f);
+ auto* const analog_stick_slider = analog_map_deadzone_and_modifier_slider[analog_id];
+ auto* const analog_stick_slider_label =
+ analog_map_deadzone_and_modifier_slider_label[analog_id];
+
+ if (param.Has("engine")) {
+ if (param.Get("engine", "") == "sdl") {
+ if (!param.Has("deadzone")) {
+ param.Set("deadzone", 0.1f);
+ }
+
+ analog_stick_slider->setValue(static_cast<int>(param.Get("deadzone", 0.1f) * 100));
+ if (analog_stick_slider->value() == 0) {
+ analog_stick_slider_label->setText(tr("Deadzone: 0%"));
+ }
+ } else {
+ if (!param.Has("modifier_scale")) {
+ param.Set("modifier_scale", 0.5f);
+ }
+
+ analog_stick_slider->setValue(
+ static_cast<int>(param.Get("modifier_scale", 0.5f) * 100));
+ if (analog_stick_slider->value() == 0) {
+ analog_stick_slider_label->setText(tr("Modifier Scale: 0%"));
+ }
}
-
- analog_deadzone_slider->setValue(static_cast<int>(param.Get("deadzone", 0.1f) * 100));
- analog_deadzone_slider->setVisible(true);
- analog_deadzone_label->setVisible(true);
- } else {
- analog_deadzone_slider->setVisible(false);
- analog_deadzone_label->setVisible(false);
}
}
}
diff --git a/src/yuzu/configuration/configure_input_player.h b/src/yuzu/configuration/configure_input_player.h
index 045704e47..95afa5375 100644
--- a/src/yuzu/configuration/configure_input_player.h
+++ b/src/yuzu/configuration/configure_input_player.h
@@ -97,8 +97,10 @@ private:
/// Analog inputs are also represented each with a single button, used to configure with an
/// actual analog stick
std::array<QPushButton*, Settings::NativeAnalog::NumAnalogs> analog_map_stick;
- std::array<QSlider*, Settings::NativeAnalog::NumAnalogs> analog_map_deadzone;
- std::array<QLabel*, Settings::NativeAnalog::NumAnalogs> analog_map_deadzone_label;
+ std::array<QSlider*, Settings::NativeAnalog::NumAnalogs>
+ analog_map_deadzone_and_modifier_slider;
+ std::array<QLabel*, Settings::NativeAnalog::NumAnalogs>
+ analog_map_deadzone_and_modifier_slider_label;
static const std::array<std::string, ANALOG_SUB_BUTTONS_NUM> analog_sub_buttons;
diff --git a/src/yuzu/configuration/configure_input_player.ui b/src/yuzu/configuration/configure_input_player.ui
index 4b37746a1..f27a77180 100644
--- a/src/yuzu/configuration/configure_input_player.ui
+++ b/src/yuzu/configuration/configure_input_player.ui
@@ -171,11 +171,11 @@
</layout>
</item>
<item row="4" column="0" colspan="2">
- <layout class="QVBoxLayout" name="sliderRStickDeadzoneVerticalLayout">
+ <layout class="QVBoxLayout" name="sliderRStickDeadzoneAndModifierVerticalLayout">
<item>
- <layout class="QHBoxLayout" name="sliderRStickDeadzoneHorizontalLayout">
+ <layout class="QHBoxLayout" name="sliderRStickDeadzoneAndModifierHorizontalLayout">
<item>
- <widget class="QLabel" name="labelRStickDeadzone">
+ <widget class="QLabel" name="labelRStickDeadzoneAndModifier">
<property name="text">
<string>Deadzone: 0</string>
</property>
@@ -187,7 +187,7 @@
</layout>
</item>
<item>
- <widget class="QSlider" name="sliderRStickDeadzone">
+ <widget class="QSlider" name="sliderRStickDeadzoneAndModifier">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
@@ -784,14 +784,14 @@
</layout>
</item>
<item row="5" column="1" colspan="2">
- <layout class="QVBoxLayout" name="sliderLStickDeadzoneVerticalLayout">
+ <layout class="QVBoxLayout" name="sliderLStickDeadzoneAndModifierVerticalLayout">
<property name="sizeConstraint">
<enum>QLayout::SetDefaultConstraint</enum>
</property>
<item>
- <layout class="QHBoxLayout" name="sliderLStickDeadzoneHorizontalLayout">
+ <layout class="QHBoxLayout" name="sliderLStickDeadzoneAndModifierHorizontalLayout">
<item>
- <widget class="QLabel" name="labelLStickDeadzone">
+ <widget class="QLabel" name="labelLStickDeadzoneAndModifier">
<property name="text">
<string>Deadzone: 0</string>
</property>
@@ -803,7 +803,7 @@
</layout>
</item>
<item>
- <widget class="QSlider" name="sliderLStickDeadzone">
+ <widget class="QSlider" name="sliderLStickDeadzoneAndModifier">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
diff --git a/src/yuzu/game_list_p.h b/src/yuzu/game_list_p.h
index 3e6d5a7cd..0cd0054c8 100644
--- a/src/yuzu/game_list_p.h
+++ b/src/yuzu/game_list_p.h
@@ -126,13 +126,6 @@ public:
return GameListItem::data(role);
}
-
- /**
- * Override to prevent automatic sorting.
- */
- bool operator<(const QStandardItem& other) const override {
- return false;
- }
};
class GameListItemCompat : public GameListItem {
@@ -279,6 +272,13 @@ public:
return static_cast<int>(dir_type);
}
+ /**
+ * Override to prevent automatic sorting between folders and the addDir button.
+ */
+ bool operator<(const QStandardItem& other) const override {
+ return false;
+ }
+
private:
GameListItemType dir_type;
};
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 1e76f789c..0a6839b2d 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1325,7 +1325,9 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
FileSys::VirtualFile romfs;
if (*romfs_title_id == program_id) {
- romfs = file;
+ const u64 ivfc_offset = loader->ReadRomFSIVFCOffset();
+ FileSys::PatchManager pm{program_id};
+ romfs = pm.PatchRomFS(file, ivfc_offset, FileSys::ContentRecordType::Program);
} else {
romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS();
}
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index f4cd905c9..8476a5a16 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -388,12 +388,14 @@ void Config::ReadValues() {
static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100));
Settings::values.use_disk_shader_cache =
sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false);
- Settings::values.use_accurate_gpu_emulation =
- sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false);
+ const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0);
+ Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level);
Settings::values.use_asynchronous_gpu_emulation =
sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false);
Settings::values.use_vsync =
static_cast<u16>(sdl2_config->GetInteger("Renderer", "use_vsync", 1));
+ Settings::values.use_fast_gpu_time =
+ sdl2_config->GetBoolean("Renderer", "use_fast_gpu_time", true);
Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0));
Settings::values.bg_green =
@@ -425,6 +427,8 @@ void Config::ReadValues() {
Settings::values.reporting_services =
sdl2_config->GetBoolean("Debugging", "reporting_services", false);
Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false);
+ Settings::values.disable_cpu_opt =
+ sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false);
const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
std::stringstream ss(title_list);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index d63d7a58e..60b1a62fa 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -146,9 +146,9 @@ frame_limit =
# 0 (default): Off, 1 : On
use_disk_shader_cache =
-# Whether to use accurate GPU emulation
-# 0 (default): Off (fast), 1 : On (slow)
-use_accurate_gpu_emulation =
+# Which gpu accuracy level to use
+# 0 (Normal), 1 (High), 2 (Extreme)
+gpu_accuracy =
# Whether to use asynchronous GPU emulation
# 0 : Off (slow), 1 (default): On (fast)
@@ -280,6 +280,9 @@ dump_nso=false
# Determines whether or not yuzu will report to the game that the emulated console is in Kiosk Mode
# false: Retail/Normal Mode (default), true: Kiosk Mode
quest_flag =
+# Determines whether or not JIT CPU optimizations are enabled
+# false: Optimizations Enabled, true: Optimizations Disabled
+disable_cpu_opt =
[WebService]
# Whether or not to enable telemetry
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
index 19584360c..e5e684206 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
@@ -181,9 +181,10 @@ void EmuWindow_SDL2::PollEvents() {
const u32 current_time = SDL_GetTicks();
if (current_time > last_time + 2000) {
const auto results = Core::System::GetInstance().GetAndResetPerfStats();
- const auto title = fmt::format(
- "yuzu {} | {}-{} | FPS: {:.0f} ({:.0%})", Common::g_build_fullname,
- Common::g_scm_branch, Common::g_scm_desc, results.game_fps, results.emulation_speed);
+ const auto title =
+ fmt::format("yuzu {} | {}-{} | FPS: {:.0f} ({:.0f}%)", Common::g_build_fullname,
+ Common::g_scm_branch, Common::g_scm_desc, results.game_fps,
+ results.emulation_speed * 100.0);
SDL_SetWindowTitle(render_window, title.c_str());
last_time = current_time;
}
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp
index f2990910e..cb8e68a39 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp
@@ -29,6 +29,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(Core::System& system, bool fullscreen)
SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI);
SDL_SysWMinfo wm;
+ SDL_VERSION(&wm.version);
if (SDL_GetWindowWMInfo(render_window, &wm) == SDL_FALSE) {
LOG_CRITICAL(Frontend, "Failed to get information from the window manager");
std::exit(EXIT_FAILURE);
@@ -70,7 +71,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(Core::System& system, bool fullscreen)
EmuWindow_SDL2_VK::~EmuWindow_SDL2_VK() = default;
std::unique_ptr<Core::Frontend::GraphicsContext> EmuWindow_SDL2_VK::CreateSharedContext() const {
- return nullptr;
+ return std::make_unique<DummyContext>();
}
void EmuWindow_SDL2_VK::Present() {
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h
index b8021ebea..77a6ca72b 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h
@@ -22,3 +22,5 @@ public:
std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override;
};
+
+class DummyContext : public Core::Frontend::GraphicsContext {};
diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp
index ee2591c8f..3be58b15d 100644
--- a/src/yuzu_tester/config.cpp
+++ b/src/yuzu_tester/config.cpp
@@ -126,10 +126,12 @@ void Config::ReadValues() {
Settings::values.frame_limit = 100;
Settings::values.use_disk_shader_cache =
sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false);
- Settings::values.use_accurate_gpu_emulation =
- sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false);
+ const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0);
+ Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level);
Settings::values.use_asynchronous_gpu_emulation =
sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false);
+ Settings::values.use_fast_gpu_time =
+ sdl2_config->GetBoolean("Renderer", "use_fast_gpu_time", true);
Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0));
Settings::values.bg_green =