summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorFernando S <fsahmkow27@gmail.com>2022-10-06 21:29:53 +0200
committerGitHub <noreply@github.com>2022-10-06 21:29:53 +0200
commit1effa578f12f79d7816e3543291f302f126cc1d2 (patch)
tree14803b31b6817294d40d57446f6fa94c5ff3fe9a /src
parentMerge pull request #9025 from FernandoS27/slava-ukrayini (diff)
parentvulkan_blitter: Fix pool allocation double free. (diff)
downloadyuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.gz
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.bz2
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.lz
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.xz
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.tar.zst
yuzu-1effa578f12f79d7816e3543291f302f126cc1d2.zip
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt1
-rw-r--r--src/common/CMakeLists.txt4
-rw-r--r--src/common/address_space.cpp10
-rw-r--r--src/common/address_space.h150
-rw-r--r--src/common/address_space.inc366
-rw-r--r--src/common/algorithm.h8
-rw-r--r--src/common/hash.h7
-rw-r--r--src/common/multi_level_page_table.cpp9
-rw-r--r--src/common/multi_level_page_table.h78
-rw-r--r--src/common/multi_level_page_table.inc84
-rw-r--r--src/core/CMakeLists.txt10
-rw-r--r--src/core/core.cpp15
-rw-r--r--src/core/core.h19
-rw-r--r--src/core/hardware_interrupt_manager.cpp32
-rw-r--r--src/core/hardware_interrupt_manager.h32
-rw-r--r--src/core/hle/service/nvdrv/core/container.cpp50
-rw-r--r--src/core/hle/service/nvdrv/core/container.h52
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp272
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h175
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.cpp121
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.h134
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdevice.h8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp19
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.h15
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp492
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h191
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp363
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.h114
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp25
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp129
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h54
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp16
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.h6
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp81
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h23
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp20
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.h6
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp230
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h56
-rw-r--r--src/core/hle/service/nvdrv/nvdata.h17
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp130
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.h125
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.cpp31
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.h2
-rw-r--r--src/core/hle/service/nvdrv/syncpoint_manager.cpp38
-rw-r--r--src/core/hle/service/nvdrv/syncpoint_manager.h84
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_consumer.cpp9
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_consumer.h8
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp10
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.h9
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp29
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h1
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp16
-rw-r--r--src/core/hle/service/vi/display/vi_display.h7
-rw-r--r--src/core/hle/service/vi/vi.cpp1
-rw-r--r--src/core/memory.cpp9
-rw-r--r--src/core/memory.h1
-rw-r--r--src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp6
-rw-r--r--src/shader_recompiler/ir_opt/texture_pass.cpp98
-rw-r--r--src/shader_recompiler/shader_info.h4
-rw-r--r--src/video_core/CMakeLists.txt51
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h166
-rw-r--r--src/video_core/cdma_pusher.cpp29
-rw-r--r--src/video_core/cdma_pusher.h18
-rw-r--r--src/video_core/command_classes/host1x.cpp29
-rw-r--r--src/video_core/control/channel_state.cpp40
-rw-r--r--src/video_core/control/channel_state.h68
-rw-r--r--src/video_core/control/channel_state_cache.cpp14
-rw-r--r--src/video_core/control/channel_state_cache.h101
-rw-r--r--src/video_core/control/channel_state_cache.inc86
-rw-r--r--src/video_core/control/scheduler.cpp32
-rw-r--r--src/video_core/control/scheduler.h37
-rw-r--r--src/video_core/dma_pusher.cpp26
-rw-r--r--src/video_core/dma_pusher.h39
-rw-r--r--src/video_core/engines/engine_upload.cpp46
-rw-r--r--src/video_core/engines/engine_upload.h6
-rw-r--r--src/video_core/engines/kepler_compute.cpp13
-rw-r--r--src/video_core/engines/kepler_memory.cpp13
-rw-r--r--src/video_core/engines/maxwell_3d.cpp43
-rw-r--r--src/video_core/engines/maxwell_dma.cpp111
-rw-r--r--src/video_core/engines/maxwell_dma.h6
-rw-r--r--src/video_core/engines/puller.cpp306
-rw-r--r--src/video_core/engines/puller.h177
-rw-r--r--src/video_core/fence_manager.h104
-rw-r--r--src/video_core/gpu.cpp706
-rw-r--r--src/video_core/gpu.h93
-rw-r--r--src/video_core/gpu_thread.cpp22
-rw-r--r--src/video_core/gpu_thread.h14
-rw-r--r--src/video_core/host1x/codecs/codec.cpp (renamed from src/video_core/command_classes/codecs/codec.cpp)44
-rw-r--r--src/video_core/host1x/codecs/codec.h (renamed from src/video_core/command_classes/codecs/codec.h)21
-rw-r--r--src/video_core/host1x/codecs/h264.cpp (renamed from src/video_core/command_classes/codecs/h264.cpp)17
-rw-r--r--src/video_core/host1x/codecs/h264.h (renamed from src/video_core/command_classes/codecs/h264.h)16
-rw-r--r--src/video_core/host1x/codecs/vp8.cpp (renamed from src/video_core/command_classes/codecs/vp8.cpp)12
-rw-r--r--src/video_core/host1x/codecs/vp8.h (renamed from src/video_core/command_classes/codecs/vp8.h)15
-rw-r--r--src/video_core/host1x/codecs/vp9.cpp (renamed from src/video_core/command_classes/codecs/vp9.cpp)23
-rw-r--r--src/video_core/host1x/codecs/vp9.h (renamed from src/video_core/command_classes/codecs/vp9.h)22
-rw-r--r--src/video_core/host1x/codecs/vp9_types.h (renamed from src/video_core/command_classes/codecs/vp9_types.h)1
-rw-r--r--src/video_core/host1x/control.cpp33
-rw-r--r--src/video_core/host1x/control.h (renamed from src/video_core/command_classes/host1x.h)20
-rw-r--r--src/video_core/host1x/host1x.cpp17
-rw-r--r--src/video_core/host1x/host1x.h57
-rw-r--r--src/video_core/host1x/nvdec.cpp (renamed from src/video_core/command_classes/nvdec.cpp)11
-rw-r--r--src/video_core/host1x/nvdec.h (renamed from src/video_core/command_classes/nvdec.h)14
-rw-r--r--src/video_core/host1x/nvdec_common.h (renamed from src/video_core/command_classes/nvdec_common.h)4
-rw-r--r--src/video_core/host1x/sync_manager.cpp (renamed from src/video_core/command_classes/sync_manager.cpp)13
-rw-r--r--src/video_core/host1x/sync_manager.h (renamed from src/video_core/command_classes/sync_manager.h)12
-rw-r--r--src/video_core/host1x/syncpoint_manager.cpp96
-rw-r--r--src/video_core/host1x/syncpoint_manager.h98
-rw-r--r--src/video_core/host1x/vic.cpp (renamed from src/video_core/command_classes/vic.cpp)36
-rw-r--r--src/video_core/host1x/vic.h (renamed from src/video_core/command_classes/vic.h)13
-rw-r--r--src/video_core/macro/macro.cpp1
-rw-r--r--src/video_core/memory_manager.cpp754
-rw-r--r--src/video_core/memory_manager.h174
-rw-r--r--src/video_core/query_cache.h22
-rw-r--r--src/video_core/rasterizer_interface.h20
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.cpp20
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp13
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h6
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.cpp29
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp217
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h22
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp42
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h9
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.h83
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h2
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp2
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h28
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp34
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp123
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h27
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h5
-rw-r--r--src/video_core/shader_cache.cpp33
-rw-r--r--src/video_core/shader_cache.h15
-rw-r--r--src/video_core/surface.h8
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp2
-rw-r--r--src/video_core/texture_cache/formatter.h4
-rw-r--r--src/video_core/texture_cache/image_base.cpp13
-rw-r--r--src/video_core/texture_cache/image_base.h3
-rw-r--r--src/video_core/texture_cache/render_targets.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.cpp15
-rw-r--r--src/video_core/texture_cache/texture_cache.h222
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h85
-rw-r--r--src/video_core/texture_cache/util.cpp3
-rw-r--r--src/video_core/textures/decoders.cpp240
-rw-r--r--src/video_core/textures/decoders.h33
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.h20
-rw-r--r--src/yuzu/main.cpp6
169 files changed, 6498 insertions, 3194 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 54de1dc94..3575a3cb3 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -121,6 +121,7 @@ else()
if (ARCHITECTURE_x86_64)
add_compile_options("-mcx16")
+ add_compile_options("-fwrapv")
endif()
if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 3447fabd8..a02696873 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -17,6 +17,8 @@ endif ()
include(GenerateSCMRev)
add_library(common STATIC
+ address_space.cpp
+ address_space.h
algorithm.h
alignment.h
announce_multiplayer_room.h
@@ -81,6 +83,8 @@ add_library(common STATIC
microprofile.cpp
microprofile.h
microprofileui.h
+ multi_level_page_table.cpp
+ multi_level_page_table.h
nvidia_flags.cpp
nvidia_flags.h
page_table.cpp
diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp
new file mode 100644
index 000000000..866e78dbe
--- /dev/null
+++ b/src/common/address_space.cpp
@@ -0,0 +1,10 @@
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/address_space.inc"
+
+namespace Common {
+
+template class Common::FlatAllocator<u32, 0, 32>;
+
+}
diff --git a/src/common/address_space.h b/src/common/address_space.h
new file mode 100644
index 000000000..9222b2fdc
--- /dev/null
+++ b/src/common/address_space.h
@@ -0,0 +1,150 @@
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <concepts>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Common {
+template <typename VaType, size_t AddressSpaceBits>
+concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
+
+struct EmptyStruct {};
+
+/**
+ * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector
+ */
+template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
+requires AddressSpaceValid<VaType, AddressSpaceBits>
+class FlatAddressSpaceMap {
+public:
+ /// The maximum VA that this AS can technically reach
+ static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) +
+ ((1ULL << (AddressSpaceBits - 1)) - 1)};
+
+ explicit FlatAddressSpaceMap(VaType va_limit,
+ std::function<void(VaType, VaType)> unmap_callback = {});
+
+ FlatAddressSpaceMap() = default;
+
+ void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) {
+ std::scoped_lock lock(block_mutex);
+ MapLocked(virt, phys, size, extra_info);
+ }
+
+ void Unmap(VaType virt, VaType size) {
+ std::scoped_lock lock(block_mutex);
+ UnmapLocked(virt, size);
+ }
+
+ VaType GetVALimit() const {
+ return va_limit;
+ }
+
+protected:
+ /**
+ * @brief Represents a block of memory in the AS, the physical mapping is contiguous until
+ * another block with a different phys address is hit
+ */
+ struct Block {
+ /// VA of the block
+ VaType virt{UnmappedVa};
+ /// PA of the block, will increase 1-1 with VA until a new block is encountered
+ PaType phys{UnmappedPa};
+ [[no_unique_address]] ExtraBlockInfo extra_info;
+
+ Block() = default;
+
+ Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_)
+ : virt(virt_), phys(phys_), extra_info(extra_info_) {}
+
+ bool Valid() const {
+ return virt != UnmappedVa;
+ }
+
+ bool Mapped() const {
+ return phys != UnmappedPa;
+ }
+
+ bool Unmapped() const {
+ return phys == UnmappedPa;
+ }
+
+ bool operator<(const VaType& p_virt) const {
+ return virt < p_virt;
+ }
+ };
+
+ /**
+ * @brief Maps a PA range into the given AS region
+ * @note block_mutex MUST be locked when calling this
+ */
+ void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info);
+
+ /**
+ * @brief Unmaps the given range and merges it with other unmapped regions
+ * @note block_mutex MUST be locked when calling this
+ */
+ void UnmapLocked(VaType virt, VaType size);
+
+ std::mutex block_mutex;
+ std::vector<Block> blocks{Block{}};
+
+ /// a soft limit on the maximum VA of the AS
+ VaType va_limit{VaMaximum};
+
+private:
+ /// Callback called when the mappings in an region have changed
+ std::function<void(VaType, VaType)> unmap_callback{};
+};
+
+/**
+ * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an
+ * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
+ */
+template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
+requires AddressSpaceValid<VaType, AddressSpaceBits>
+class FlatAllocator
+ : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
+private:
+ using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
+
+public:
+ explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum);
+
+ /**
+ * @brief Allocates a region in the AS of the given size and returns its address
+ */
+ VaType Allocate(VaType size);
+
+ /**
+ * @brief Marks the given region in the AS as allocated
+ */
+ void AllocateFixed(VaType virt, VaType size);
+
+ /**
+ * @brief Frees an AS region so it can be used again
+ */
+ void Free(VaType virt, VaType size);
+
+ VaType GetVAStart() const {
+ return virt_start;
+ }
+
+private:
+ /// The base VA of the allocator, no allocations will be below this
+ VaType virt_start;
+
+ /**
+ * The end address for the initial linear allocation pass
+ * Once this reaches the AS limit the slower allocation path will be used
+ */
+ VaType current_linear_alloc_end;
+};
+} // namespace Common
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
new file mode 100644
index 000000000..2195dabd5
--- /dev/null
+++ b/src/common/address_space.inc
@@ -0,0 +1,366 @@
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/address_space.h"
+#include "common/assert.h"
+
+#define MAP_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
+ VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
+#define MAP_MEMBER_CONST() \
+ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
+ VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
+
+#define MM_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
+ FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
+
+#define ALLOC_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
+ FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
+#define ALLOC_MEMBER_CONST() \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> \
+ FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
+
+namespace Common {
+MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_,
+ std::function<void(VaType, VaType)> unmap_callback_)
+ : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} {
+ if (va_limit > VaMaximum) {
+ ASSERT_MSG(false, "Invalid VA limit!");
+ }
+}
+
+MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) {
+ VaType virt_end{virt + size};
+
+ if (virt_end > va_limit) {
+ ASSERT_MSG(false,
+ "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
+ virt_end, va_limit);
+ }
+
+ auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
+ if (block_end_successor == blocks.begin()) {
+ ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end);
+ }
+
+ auto block_end_predecessor{std::prev(block_end_successor)};
+
+ if (block_end_successor != blocks.end()) {
+ // We have blocks in front of us, if one is directly in front then we don't have to add a
+ // tail
+ if (block_end_successor->virt != virt_end) {
+ PaType tailPhys{[&]() -> PaType {
+ if constexpr (!PaContigSplit) {
+ // Always propagate unmapped regions rather than calculating offset
+ return block_end_predecessor->phys;
+ } else {
+ if (block_end_predecessor->Unmapped()) {
+ // Always propagate unmapped regions rather than calculating offset
+ return block_end_predecessor->phys;
+ } else {
+ return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
+ }
+ }
+ }()};
+
+ if (block_end_predecessor->virt >= virt) {
+ // If this block's start would be overlapped by the map then reuse it as a tail
+ // block
+ block_end_predecessor->virt = virt_end;
+ block_end_predecessor->phys = tailPhys;
+ block_end_predecessor->extra_info = block_end_predecessor->extra_info;
+
+ // No longer predecessor anymore
+ block_end_successor = block_end_predecessor--;
+ } else {
+ // Else insert a new one and we're done
+ blocks.insert(block_end_successor,
+ {Block(virt, phys, extra_info),
+ Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return;
+ }
+ }
+ } else {
+ // block_end_predecessor will always be unmapped as blocks has to be terminated by an
+ // unmapped chunk
+ if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) {
+ // Move the unmapped block start backwards
+ block_end_predecessor->virt = virt_end;
+
+ // No longer predecessor anymore
+ block_end_successor = block_end_predecessor--;
+ } else {
+ // Else insert a new one and we're done
+ blocks.insert(block_end_successor,
+ {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})});
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return;
+ }
+ }
+
+ auto block_start_successor{block_end_successor};
+
+ // Walk the block vector to find the start successor as this is more efficient than another
+ // binary search in most scenarios
+ while (std::prev(block_start_successor)->virt >= virt) {
+ block_start_successor--;
+ }
+
+ // Check that the start successor is either the end block or something in between
+ if (block_start_successor->virt > virt_end) {
+ ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
+ } else if (block_start_successor->virt == virt_end) {
+ // We need to create a new block as there are none spare that we would overwrite
+ blocks.insert(block_start_successor, Block(virt, phys, extra_info));
+ } else {
+ // Erase overwritten blocks
+ if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
+ blocks.erase(eraseStart, block_end_successor);
+ }
+
+ // Reuse a block that would otherwise be overwritten as a start block
+ block_start_successor->virt = virt;
+ block_start_successor->phys = phys;
+ block_start_successor->extra_info = extra_info;
+ }
+
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+}
+
+MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
+ VaType virt_end{virt + size};
+
+ if (virt_end > va_limit) {
+ ASSERT_MSG(false,
+ "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
+ virt_end, va_limit);
+ }
+
+ auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
+ if (block_end_successor == blocks.begin()) {
+ ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}",
+ virt_end);
+ }
+
+ auto block_end_predecessor{std::prev(block_end_successor)};
+
+ auto walk_back_to_predecessor{[&](auto iter) {
+ while (iter->virt >= virt) {
+ iter--;
+ }
+
+ return iter;
+ }};
+
+ auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) {
+ auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)};
+ auto block_start_successor{std::next(block_start_predecessor)};
+
+ auto eraseEnd{[&]() {
+ if (block_start_predecessor->Unmapped()) {
+ // If the start predecessor is unmapped then we can erase everything in our region
+ // and be done
+ return std::next(unmappedEnd);
+ } else {
+ // Else reuse the end predecessor as the start of our unmapped region then erase all
+ // up to it
+ unmappedEnd->virt = virt;
+ return unmappedEnd;
+ }
+ }()};
+
+ // We can't have two unmapped regions after each other
+ if (eraseEnd != blocks.end() &&
+ (eraseEnd == block_start_successor ||
+ (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) {
+ ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!");
+ }
+
+ blocks.erase(block_start_successor, eraseEnd);
+ }};
+
+ // We can avoid any splitting logic if these are the case
+ if (block_end_predecessor->Unmapped()) {
+ if (block_end_predecessor->virt > virt) {
+ erase_blocks_with_end_unmapped(block_end_predecessor);
+ }
+
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return; // The region is unmapped, bail out early
+ } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) {
+ erase_blocks_with_end_unmapped(block_end_successor);
+
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return; // The region is unmapped here and doesn't need splitting, bail out early
+ } else if (block_end_successor == blocks.end()) {
+ // This should never happen as the end should always follow an unmapped block
+ ASSERT_MSG(false, "Unexpected Memory Manager state!");
+ } else if (block_end_successor->virt != virt_end) {
+ // If one block is directly in front then we don't have to add a tail
+
+ // The previous block is mapped so we will need to add a tail with an offset
+ PaType tailPhys{[&]() {
+ if constexpr (PaContigSplit) {
+ return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
+ } else {
+ return block_end_predecessor->phys;
+ }
+ }()};
+
+ if (block_end_predecessor->virt >= virt) {
+ // If this block's start would be overlapped by the unmap then reuse it as a tail block
+ block_end_predecessor->virt = virt_end;
+ block_end_predecessor->phys = tailPhys;
+
+ // No longer predecessor anymore
+ block_end_successor = block_end_predecessor--;
+ } else {
+ blocks.insert(block_end_successor,
+ {Block(virt, UnmappedPa, {}),
+ Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ // The previous block is mapped and ends before
+ return;
+ }
+ }
+
+ // Walk the block vector to find the start predecessor as this is more efficient than another
+ // binary search in most scenarios
+ auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)};
+ auto block_start_successor{std::next(block_start_predecessor)};
+
+ if (block_start_successor->virt > virt_end) {
+ ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
+ } else if (block_start_successor->virt == virt_end) {
+ // There are no blocks between the start and the end that would let us skip inserting a new
+ // one for head
+
+ // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
+ if (block_start_predecessor->Mapped()) {
+ blocks.insert(block_start_successor, Block(virt, UnmappedPa, {}));
+ }
+ } else if (block_start_predecessor->Unmapped()) {
+ // If the previous block is unmapped
+ blocks.erase(block_start_successor, block_end_predecessor);
+ } else {
+ // Erase overwritten blocks, skipping the first one as we have written the unmapped start
+ // block there
+ if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
+ blocks.erase(eraseStart, block_end_successor);
+ }
+
+ // Add in the unmapped block header
+ block_start_successor->virt = virt;
+ block_start_successor->phys = UnmappedPa;
+ }
+
+ if (unmap_callback)
+ unmap_callback(virt, size);
+}
+
+ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_)
+ : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {}
+
+ALLOC_MEMBER(VaType)::Allocate(VaType size) {
+ std::scoped_lock lock(this->block_mutex);
+
+ VaType alloc_start{UnmappedVa};
+ VaType alloc_end{current_linear_alloc_end + size};
+
+ // Avoid searching backwards in the address space if possible
+ if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) {
+ auto alloc_end_successor{
+ std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)};
+ if (alloc_end_successor == this->blocks.begin()) {
+ ASSERT_MSG(false, "First block in AS map is invalid!");
+ }
+
+ auto alloc_end_predecessor{std::prev(alloc_end_successor)};
+ if (alloc_end_predecessor->virt <= current_linear_alloc_end) {
+ alloc_start = current_linear_alloc_end;
+ } else {
+ // Skip over fixed any mappings in front of us
+ while (alloc_end_successor != this->blocks.end()) {
+ if (alloc_end_successor->virt - alloc_end_predecessor->virt < size ||
+ alloc_end_predecessor->Mapped()) {
+ alloc_start = alloc_end_predecessor->virt;
+ break;
+ }
+
+ alloc_end_predecessor = alloc_end_successor++;
+
+ // Use the VA limit to calculate if we can fit in the final block since it has no
+ // successor
+ if (alloc_end_successor == this->blocks.end()) {
+ alloc_end = alloc_end_predecessor->virt + size;
+
+ if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) {
+ alloc_start = alloc_end_predecessor->virt;
+ }
+ }
+ }
+ }
+ }
+
+ if (alloc_start != UnmappedVa) {
+ current_linear_alloc_end = alloc_start + size;
+ } else { // If linear allocation overflows the AS then find a gap
+ if (this->blocks.size() <= 2) {
+ ASSERT_MSG(false, "Unexpected allocator state!");
+ }
+
+ auto search_predecessor{this->blocks.begin()};
+ auto search_successor{std::next(search_predecessor)};
+
+ while (search_successor != this->blocks.end() &&
+ (search_successor->virt - search_predecessor->virt < size ||
+ search_predecessor->Mapped())) {
+ search_predecessor = search_successor++;
+ }
+
+ if (search_successor != this->blocks.end()) {
+ alloc_start = search_predecessor->virt;
+ } else {
+ return {}; // AS is full
+ }
+ }
+
+ this->MapLocked(alloc_start, true, size, {});
+ return alloc_start;
+}
+
+ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
+ this->Map(virt, true, size);
+}
+
+ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
+ this->Unmap(virt, size);
+}
+} // namespace Common
diff --git a/src/common/algorithm.h b/src/common/algorithm.h
index 9ddfd637b..c27c9241d 100644
--- a/src/common/algorithm.h
+++ b/src/common/algorithm.h
@@ -24,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>>
return first != last && !comp(value, *first) ? first : last;
}
+template <typename T, typename Func, typename... Args>
+T FoldRight(T initial_value, Func&& func, Args&&... args) {
+ T value{initial_value};
+ const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); };
+ (std::invoke(high_func, std::forward<Args>(args)), ...);
+ return value;
+}
+
} // namespace Common
diff --git a/src/common/hash.h b/src/common/hash.h
index b6f3e6d6f..e8fe78b07 100644
--- a/src/common/hash.h
+++ b/src/common/hash.h
@@ -18,4 +18,11 @@ struct PairHash {
}
};
+template <typename T>
+struct IdentityHash {
+ [[nodiscard]] size_t operator()(T value) const noexcept {
+ return static_cast<size_t>(value);
+ }
+};
+
} // namespace Common
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp
new file mode 100644
index 000000000..46e362f3b
--- /dev/null
+++ b/src/common/multi_level_page_table.cpp
@@ -0,0 +1,9 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/multi_level_page_table.inc"
+
+namespace Common {
+template class Common::MultiLevelPageTable<u64>;
+template class Common::MultiLevelPageTable<u32>;
+} // namespace Common
diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h
new file mode 100644
index 000000000..31f6676a0
--- /dev/null
+++ b/src/common/multi_level_page_table.h
@@ -0,0 +1,78 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Common {
+
+template <typename BaseAddr>
+class MultiLevelPageTable final {
+public:
+ constexpr MultiLevelPageTable() = default;
+ explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits,
+ std::size_t page_bits);
+
+ ~MultiLevelPageTable() noexcept;
+
+ MultiLevelPageTable(const MultiLevelPageTable&) = delete;
+ MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete;
+
+ MultiLevelPageTable(MultiLevelPageTable&& other) noexcept
+ : address_space_bits{std::exchange(other.address_space_bits, 0)},
+ first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange(
+ other.page_bits, 0)},
+ first_level_shift{std::exchange(other.first_level_shift, 0)},
+ first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)},
+ first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr,
+ nullptr)} {}
+
+ MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept {
+ address_space_bits = std::exchange(other.address_space_bits, 0);
+ first_level_bits = std::exchange(other.first_level_bits, 0);
+ page_bits = std::exchange(other.page_bits, 0);
+ first_level_shift = std::exchange(other.first_level_shift, 0);
+ first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0);
+ alloc_size = std::exchange(other.alloc_size, 0);
+ first_level_map = std::move(other.first_level_map);
+ base_ptr = std::exchange(other.base_ptr, nullptr);
+ return *this;
+ }
+
+ void ReserveRange(u64 start, std::size_t size);
+
+ [[nodiscard]] const BaseAddr& operator[](std::size_t index) const {
+ return base_ptr[index];
+ }
+
+ [[nodiscard]] BaseAddr& operator[](std::size_t index) {
+ return base_ptr[index];
+ }
+
+ [[nodiscard]] BaseAddr* data() {
+ return base_ptr;
+ }
+
+ [[nodiscard]] const BaseAddr* data() const {
+ return base_ptr;
+ }
+
+private:
+ void AllocateLevel(u64 level);
+
+ std::size_t address_space_bits{};
+ std::size_t first_level_bits{};
+ std::size_t page_bits{};
+ std::size_t first_level_shift{};
+ std::size_t first_level_chunk_size{};
+ std::size_t alloc_size{};
+ std::vector<void*> first_level_map{};
+ BaseAddr* base_ptr{};
+};
+
+} // namespace Common
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc
new file mode 100644
index 000000000..8ac506fa0
--- /dev/null
+++ b/src/common/multi_level_page_table.inc
@@ -0,0 +1,84 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#endif
+
+#include "common/assert.h"
+#include "common/multi_level_page_table.h"
+
+namespace Common {
+
+template <typename BaseAddr>
+MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_,
+ std::size_t first_level_bits_,
+ std::size_t page_bits_)
+ : address_space_bits{address_space_bits_},
+ first_level_bits{first_level_bits_}, page_bits{page_bits_} {
+ if (page_bits == 0) {
+ return;
+ }
+ first_level_shift = address_space_bits - first_level_bits;
+ first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
+ alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr);
+ std::size_t first_level_size = 1ULL << first_level_bits;
+ first_level_map.resize(first_level_size, nullptr);
+#ifdef _WIN32
+ void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
+#else
+ void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
+
+ if (base == MAP_FAILED) {
+ base = nullptr;
+ }
+#endif
+
+ ASSERT(base);
+ base_ptr = reinterpret_cast<BaseAddr*>(base);
+}
+
+template <typename BaseAddr>
+MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept {
+ if (!base_ptr) {
+ return;
+ }
+#ifdef _WIN32
+ ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE));
+#else
+ ASSERT(munmap(base_ptr, alloc_size) == 0);
+#endif
+}
+
+template <typename BaseAddr>
+void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
+ const u64 new_start = start >> first_level_shift;
+ const u64 new_end = (start + size) >> first_level_shift;
+ for (u64 i = new_start; i <= new_end; i++) {
+ if (!first_level_map[i]) {
+ AllocateLevel(i);
+ }
+ }
+}
+
+template <typename BaseAddr>
+void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
+ void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
+#ifdef _WIN32
+ void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
+#else
+ void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
+
+ if (base == MAP_FAILED) {
+ base = nullptr;
+ }
+#endif
+ ASSERT(base);
+
+ first_level_map[level] = base;
+}
+
+} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 8e3fd4505..95302c419 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -138,8 +138,6 @@ add_library(core STATIC
frontend/emu_window.h
frontend/framebuffer_layout.cpp
frontend/framebuffer_layout.h
- hardware_interrupt_manager.cpp
- hardware_interrupt_manager.h
hid/emulated_console.cpp
hid/emulated_console.h
hid/emulated_controller.cpp
@@ -550,6 +548,12 @@ add_library(core STATIC
hle/service/ns/ns.h
hle/service/ns/pdm_qry.cpp
hle/service/ns/pdm_qry.h
+ hle/service/nvdrv/core/container.cpp
+ hle/service/nvdrv/core/container.h
+ hle/service/nvdrv/core/nvmap.cpp
+ hle/service/nvdrv/core/nvmap.h
+ hle/service/nvdrv/core/syncpoint_manager.cpp
+ hle/service/nvdrv/core/syncpoint_manager.h
hle/service/nvdrv/devices/nvdevice.h
hle/service/nvdrv/devices/nvdisp_disp0.cpp
hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -578,8 +582,6 @@ add_library(core STATIC
hle/service/nvdrv/nvdrv_interface.h
hle/service/nvdrv/nvmemp.cpp
hle/service/nvdrv/nvmemp.h
- hle/service/nvdrv/syncpoint_manager.cpp
- hle/service/nvdrv/syncpoint_manager.h
hle/service/nvflinger/binder.h
hle/service/nvflinger/buffer_item.h
hle/service/nvflinger/buffer_item_consumer.cpp
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 121092868..1deeee154 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -27,7 +27,6 @@
#include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs_concat.h"
#include "core/file_sys/vfs_real.h"
-#include "core/hardware_interrupt_manager.h"
#include "core/hid/hid_core.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_process.h"
@@ -51,6 +50,7 @@
#include "core/telemetry_session.h"
#include "core/tools/freezer.h"
#include "network/network.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/renderer_base.h"
#include "video_core/video_core.h"
@@ -215,6 +215,7 @@ struct System::Impl {
telemetry_session = std::make_unique<Core::TelemetrySession>();
+ host1x_core = std::make_unique<Tegra::Host1x::Host1x>(system);
gpu_core = VideoCore::CreateGPU(emu_window, system);
if (!gpu_core) {
return SystemResultStatus::ErrorVideoCore;
@@ -224,7 +225,6 @@ struct System::Impl {
service_manager = std::make_shared<Service::SM::ServiceManager>(kernel);
services = std::make_unique<Service::Services>(service_manager, system);
- interrupt_manager = std::make_unique<Hardware::InterruptManager>(system);
// Initialize time manager, which must happen after kernel is created
time_manager.Initialize();
@@ -373,6 +373,7 @@ struct System::Impl {
app_loader.reset();
audio_core.reset();
gpu_core.reset();
+ host1x_core.reset();
perf_stats.reset();
kernel.Shutdown();
memory.Reset();
@@ -450,7 +451,7 @@ struct System::Impl {
/// AppLoader used to load the current executing application
std::unique_ptr<Loader::AppLoader> app_loader;
std::unique_ptr<Tegra::GPU> gpu_core;
- std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
+ std::unique_ptr<Tegra::Host1x::Host1x> host1x_core;
std::unique_ptr<Core::DeviceMemory> device_memory;
std::unique_ptr<AudioCore::AudioCore> audio_core;
Core::Memory::Memory memory;
@@ -668,12 +669,12 @@ const Tegra::GPU& System::GPU() const {
return *impl->gpu_core;
}
-Core::Hardware::InterruptManager& System::InterruptManager() {
- return *impl->interrupt_manager;
+Tegra::Host1x::Host1x& System::Host1x() {
+ return *impl->host1x_core;
}
-const Core::Hardware::InterruptManager& System::InterruptManager() const {
- return *impl->interrupt_manager;
+const Tegra::Host1x::Host1x& System::Host1x() const {
+ return *impl->host1x_core;
}
VideoCore::RendererBase& System::Renderer() {
diff --git a/src/core/core.h b/src/core/core.h
index 0ce3b1d60..7843cc8ad 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -74,6 +74,9 @@ class TimeManager;
namespace Tegra {
class DebugContext;
class GPU;
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
} // namespace Tegra
namespace VideoCore {
@@ -88,10 +91,6 @@ namespace Core::Timing {
class CoreTiming;
}
-namespace Core::Hardware {
-class InterruptManager;
-}
-
namespace Core::HID {
class HIDCore;
}
@@ -260,6 +259,12 @@ public:
/// Gets an immutable reference to the GPU interface.
[[nodiscard]] const Tegra::GPU& GPU() const;
+ /// Gets a mutable reference to the Host1x interface
+ [[nodiscard]] Tegra::Host1x::Host1x& Host1x();
+
+ /// Gets an immutable reference to the Host1x interface.
+ [[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const;
+
/// Gets a mutable reference to the renderer.
[[nodiscard]] VideoCore::RendererBase& Renderer();
@@ -296,12 +301,6 @@ public:
/// Provides a constant reference to the core timing instance.
[[nodiscard]] const Timing::CoreTiming& CoreTiming() const;
- /// Provides a reference to the interrupt manager instance.
- [[nodiscard]] Core::Hardware::InterruptManager& InterruptManager();
-
- /// Provides a constant reference to the interrupt manager instance.
- [[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const;
-
/// Provides a reference to the kernel instance.
[[nodiscard]] Kernel::KernelCore& Kernel();
diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp
deleted file mode 100644
index d08cc3315..000000000
--- a/src/core/hardware_interrupt_manager.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "core/core.h"
-#include "core/core_timing.h"
-#include "core/hardware_interrupt_manager.h"
-#include "core/hle/service/nvdrv/nvdrv_interface.h"
-#include "core/hle/service/sm/sm.h"
-
-namespace Core::Hardware {
-
-InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
- gpu_interrupt_event = Core::Timing::CreateEvent(
- "GPUInterrupt",
- [this](std::uintptr_t message, u64 time,
- std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
- auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
- const u32 syncpt = static_cast<u32>(message >> 32);
- const u32 value = static_cast<u32>(message);
- nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
- return std::nullopt;
- });
-}
-
-InterruptManager::~InterruptManager() = default;
-
-void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
- const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
- system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg);
-}
-
-} // namespace Core::Hardware
diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h
deleted file mode 100644
index 5665c5918..000000000
--- a/src/core/hardware_interrupt_manager.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <memory>
-
-#include "common/common_types.h"
-
-namespace Core {
-class System;
-}
-
-namespace Core::Timing {
-struct EventType;
-}
-
-namespace Core::Hardware {
-
-class InterruptManager {
-public:
- explicit InterruptManager(Core::System& system);
- ~InterruptManager();
-
- void GPUInterruptSyncpt(u32 syncpoint_id, u32 value);
-
-private:
- Core::System& system;
- std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event;
-};
-
-} // namespace Core::Hardware
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp
new file mode 100644
index 000000000..37ca24f5d
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/container.cpp
@@ -0,0 +1,50 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "video_core/host1x/host1x.h"
+
+namespace Service::Nvidia::NvCore {
+
+struct ContainerImpl {
+ explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
+ : file{host1x_}, manager{host1x_}, device_file_data{} {}
+ NvMap file;
+ SyncpointManager manager;
+ Container::Host1xDeviceFileData device_file_data;
+};
+
+Container::Container(Tegra::Host1x::Host1x& host1x_) {
+ impl = std::make_unique<ContainerImpl>(host1x_);
+}
+
+Container::~Container() = default;
+
+NvMap& Container::GetNvMapFile() {
+ return impl->file;
+}
+
+const NvMap& Container::GetNvMapFile() const {
+ return impl->file;
+}
+
+Container::Host1xDeviceFileData& Container::Host1xDeviceFile() {
+ return impl->device_file_data;
+}
+
+const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const {
+ return impl->device_file_data;
+}
+
+SyncpointManager& Container::GetSyncpointManager() {
+ return impl->manager;
+}
+
+const SyncpointManager& Container::GetSyncpointManager() const {
+ return impl->manager;
+}
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h
new file mode 100644
index 000000000..b4b63ac90
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/container.h
@@ -0,0 +1,52 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <deque>
+#include <memory>
+#include <unordered_map>
+
+#include "core/hle/service/nvdrv/nvdata.h"
+
+namespace Tegra::Host1x {
+class Host1x;
+} // namespace Tegra::Host1x
+
+namespace Service::Nvidia::NvCore {
+
+class NvMap;
+class SyncpointManager;
+
+struct ContainerImpl;
+
+class Container {
+public:
+ explicit Container(Tegra::Host1x::Host1x& host1x);
+ ~Container();
+
+ NvMap& GetNvMapFile();
+
+ const NvMap& GetNvMapFile() const;
+
+ SyncpointManager& GetSyncpointManager();
+
+ const SyncpointManager& GetSyncpointManager() const;
+
+ struct Host1xDeviceFileData {
+ std::unordered_map<DeviceFD, u32> fd_to_id{};
+ std::deque<u32> syncpts_accumulated{};
+ u32 nvdec_next_id{};
+ u32 vic_next_id{};
+ };
+
+ Host1xDeviceFileData& Host1xDeviceFile();
+
+ const Host1xDeviceFileData& Host1xDeviceFile() const;
+
+private:
+ std::unique_ptr<ContainerImpl> impl;
+};
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
new file mode 100644
index 000000000..fbd8a74a5
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -0,0 +1,272 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/memory.h"
+#include "video_core/host1x/host1x.h"
+
+using Core::Memory::YUZU_PAGESIZE;
+
+namespace Service::Nvidia::NvCore {
+NvMap::Handle::Handle(u64 size_, Id id_)
+ : size(size_), aligned_size(size), orig_size(size), id(id_) {
+ flags.raw = 0;
+}
+
+NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
+ std::scoped_lock lock(mutex);
+
+ // Handles cannot be allocated twice
+ if (allocated) {
+ return NvResult::AccessDenied;
+ }
+
+ flags = pFlags;
+ kind = pKind;
+ align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
+
+ // This flag is only applicable for handles with an address passed
+ if (pAddress) {
+ flags.keep_uncached_after_free.Assign(0);
+ } else {
+ LOG_CRITICAL(Service_NVDRV,
+ "Mapping nvmap handles without a CPU side address is unimplemented!");
+ }
+
+ size = Common::AlignUp(size, YUZU_PAGESIZE);
+ aligned_size = Common::AlignUp(size, align);
+ address = pAddress;
+ allocated = true;
+
+ return NvResult::Success;
+}
+
+NvResult NvMap::Handle::Duplicate(bool internal_session) {
+ std::scoped_lock lock(mutex);
+ // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
+ if (!allocated) [[unlikely]] {
+ return NvResult::BadValue;
+ }
+
+ // If we internally use FromId the duplication tracking of handles won't work accurately due to
+ // us not implementing per-process handle refs.
+ if (internal_session) {
+ internal_dupes++;
+ } else {
+ dupes++;
+ }
+
+ return NvResult::Success;
+}
+
+NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
+
+void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
+ std::scoped_lock lock(handles_lock);
+
+ handles.emplace(handle_description->id, std::move(handle_description));
+}
+
+void NvMap::UnmapHandle(Handle& handle_description) {
+ // Remove pending unmap queue entry if needed
+ if (handle_description.unmap_queue_entry) {
+ unmap_queue.erase(*handle_description.unmap_queue_entry);
+ handle_description.unmap_queue_entry.reset();
+ }
+
+ // Free and unmap the handle from the SMMU
+ host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
+ handle_description.aligned_size);
+ host1x.Allocator().Free(handle_description.pin_virt_address,
+ static_cast<u32>(handle_description.aligned_size));
+ handle_description.pin_virt_address = 0;
+}
+
+bool NvMap::TryRemoveHandle(const Handle& handle_description) {
+ // No dupes left, we can remove from handle map
+ if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) {
+ std::scoped_lock lock(handles_lock);
+
+ auto it{handles.find(handle_description.id)};
+ if (it != handles.end()) {
+ handles.erase(it);
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+}
+
+NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
+ if (!size) [[unlikely]] {
+ return NvResult::BadValue;
+ }
+
+ u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
+ auto handle_description{std::make_shared<Handle>(size, id)};
+ AddHandle(handle_description);
+
+ result_out = handle_description;
+ return NvResult::Success;
+}
+
+std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
+ std::scoped_lock lock(handles_lock);
+ try {
+ return handles.at(handle);
+ } catch (std::out_of_range&) {
+ return nullptr;
+ }
+}
+
+VAddr NvMap::GetHandleAddress(Handle::Id handle) {
+ std::scoped_lock lock(handles_lock);
+ try {
+ return handles.at(handle)->address;
+ } catch (std::out_of_range&) {
+ return 0;
+ }
+}
+
+u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
+ auto handle_description{GetHandle(handle)};
+ if (!handle_description) [[unlikely]] {
+ return 0;
+ }
+
+ std::scoped_lock lock(handle_description->mutex);
+ if (!handle_description->pins) {
+ // If we're in the unmap queue we can just remove ourselves and return since we're already
+ // mapped
+ {
+ // Lock now to prevent our queue entry from being removed for allocation in-between the
+ // following check and erase
+ std::scoped_lock queueLock(unmap_queue_lock);
+ if (handle_description->unmap_queue_entry) {
+ unmap_queue.erase(*handle_description->unmap_queue_entry);
+ handle_description->unmap_queue_entry.reset();
+
+ handle_description->pins++;
+ return handle_description->pin_virt_address;
+ }
+ }
+
+ // If not then allocate some space and map it
+ u32 address{};
+ auto& smmu_allocator = host1x.Allocator();
+ auto& smmu_memory_manager = host1x.MemoryManager();
+ while (!(address =
+ smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) {
+ // Free handles until the allocation succeeds
+ std::scoped_lock queueLock(unmap_queue_lock);
+ if (auto freeHandleDesc{unmap_queue.front()}) {
+ // Handles in the unmap queue are guaranteed not to be pinned so don't bother
+ // checking if they are before unmapping
+ std::scoped_lock freeLock(freeHandleDesc->mutex);
+ if (handle_description->pin_virt_address)
+ UnmapHandle(*freeHandleDesc);
+ } else {
+ LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
+ }
+ }
+
+ smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
+ handle_description->aligned_size);
+ handle_description->pin_virt_address = address;
+ }
+
+ handle_description->pins++;
+ return handle_description->pin_virt_address;
+}
+
+void NvMap::UnpinHandle(Handle::Id handle) {
+ auto handle_description{GetHandle(handle)};
+ if (!handle_description) {
+ return;
+ }
+
+ std::scoped_lock lock(handle_description->mutex);
+ if (--handle_description->pins < 0) {
+ LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
+ } else if (!handle_description->pins) {
+ std::scoped_lock queueLock(unmap_queue_lock);
+
+ // Add to the unmap queue allowing this handle's memory to be freed if needed
+ unmap_queue.push_back(handle_description);
+ handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
+ }
+}
+
+void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) {
+ auto handle_description{GetHandle(handle)};
+ if (!handle_description) {
+ LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
+ return;
+ }
+
+ auto result = handle_description->Duplicate(internal_session);
+ if (result != NvResult::Success) {
+ LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
+ }
+}
+
+std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
+ std::weak_ptr<Handle> hWeak{GetHandle(handle)};
+ FreeInfo freeInfo;
+
+ // We use a weak ptr here so we can tell when the handle has been freed and report that back to
+ // guest
+ if (auto handle_description = hWeak.lock()) {
+ std::scoped_lock lock(handle_description->mutex);
+
+ if (internal_session) {
+ if (--handle_description->internal_dupes < 0)
+ LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
+ } else {
+ if (--handle_description->dupes < 0) {
+ LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
+ } else if (handle_description->dupes == 0) {
+ // Force unmap the handle
+ if (handle_description->pin_virt_address) {
+ std::scoped_lock queueLock(unmap_queue_lock);
+ UnmapHandle(*handle_description);
+ }
+
+ handle_description->pins = 0;
+ }
+ }
+
+ // Try to remove the shared ptr to the handle from the map, if nothing else is using the
+ // handle then it will now be freed when `handle_description` goes out of scope
+ if (TryRemoveHandle(*handle_description)) {
+ LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
+ } else {
+ LOG_DEBUG(Service_NVDRV,
+ "Tried to free nvmap handle: {} but didn't as it still has duplicates",
+ handle);
+ }
+
+ freeInfo = {
+ .address = handle_description->address,
+ .size = handle_description->size,
+ .was_uncached = handle_description->flags.map_uncached.Value() != 0,
+ };
+ } else {
+ return std::nullopt;
+ }
+
+ // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed
+ if (!hWeak.expired()) {
+ LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
+ freeInfo.address = 0;
+ }
+
+ return freeInfo;
+}
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
new file mode 100644
index 000000000..b9dd3801f
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -0,0 +1,175 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <atomic>
+#include <list>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <unordered_map>
+#include <assert.h>
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+#include "core/hle/service/nvdrv/nvdata.h"
+
+namespace Tegra {
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
+} // namespace Tegra
+
+namespace Service::Nvidia::NvCore {
+/**
+ * @brief The nvmap core class holds the global state for nvmap and provides methods to manage
+ * handles
+ */
+class NvMap {
+public:
+ /**
+ * @brief A handle to a contiguous block of memory in an application's address space
+ */
+ struct Handle {
+ std::mutex mutex;
+
+ u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU
+ u64 size; //!< Page-aligned size of the memory the handle refers to
+ u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to
+ u64 orig_size; //!< Original unaligned size of the memory this handle refers to
+
+ s32 dupes{1}; //!< How many guest references there are to this handle
+ s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle
+
+ using Id = u32;
+ Id id; //!< A globally unique identifier for this handle
+
+ s32 pins{};
+ u32 pin_virt_address{};
+ std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
+
+ union Flags {
+ u32 raw;
+ BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached
+ BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was
+ //!< allocated with a fixed address
+ BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins
+ } flags{};
+ static_assert(sizeof(Flags) == sizeof(u32));
+
+ u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
+ //!< this can also be in the nvdrv tmem
+ bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
+ //!< call
+
+ u8 kind{}; //!< Used for memory compression
+ bool allocated{}; //!< If the handle has been allocated with `Alloc`
+
+ u64 dma_map_addr{}; //! remove me after implementing pinning.
+
+ Handle(u64 size, Id id);
+
+ /**
+ * @brief Sets up the handle with the given memory config, can allocate memory from the tmem
+ * if a 0 address is passed
+ */
+ [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
+
+ /**
+ * @brief Increases the dupe counter of the handle for the given session
+ */
+ [[nodiscard]] NvResult Duplicate(bool internal_session);
+
+ /**
+ * @brief Obtains a pointer to the handle's memory and marks the handle it as having been
+ * mapped
+ */
+ u8* GetPointer() {
+ if (!address) {
+ return nullptr;
+ }
+
+ is_shared_mem_mapped = true;
+ return reinterpret_cast<u8*>(address);
+ }
+ };
+
+ /**
+ * @brief Encapsulates the result of a FreeHandle operation
+ */
+ struct FreeInfo {
+ u64 address; //!< Address the handle referred to before deletion
+ u64 size; //!< Page-aligned handle size
+ bool was_uncached; //!< If the handle was allocated as uncached
+ };
+
+ explicit NvMap(Tegra::Host1x::Host1x& host1x);
+
+ /**
+ * @brief Creates an unallocated handle of the given size
+ */
+ [[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out);
+
+ std::shared_ptr<Handle> GetHandle(Handle::Id handle);
+
+ VAddr GetHandleAddress(Handle::Id handle);
+
+ /**
+ * @brief Maps a handle into the SMMU address space
+ * @note This operation is refcounted, the number of calls to this must eventually match the
+ * number of calls to `UnpinHandle`
+ * @return The SMMU virtual address that the handle has been mapped to
+ */
+ u32 PinHandle(Handle::Id handle);
+
+ /**
+ * @brief When this has been called an equal number of times to `PinHandle` for the supplied
+ * handle it will be added to a list of handles to be freed when necessary
+ */
+ void UnpinHandle(Handle::Id handle);
+
+ /**
+ * @brief Tries to duplicate a handle
+ */
+ void DuplicateHandle(Handle::Id handle, bool internal_session = false);
+
+ /**
+ * @brief Tries to free a handle and remove a single dupe
+ * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned
+ * describing the prior state of the handle
+ */
+ std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session);
+
+private:
+ std::list<std::shared_ptr<Handle>> unmap_queue{};
+ std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
+
+ std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
+ handles{}; //!< Main owning map of handles
+ std::mutex handles_lock; //!< Protects access to `handles`
+
+ static constexpr u32 HandleIdIncrement{
+ 4}; //!< Each new handle ID is an increment of 4 from the previous
+ std::atomic<u32> next_handle_id{HandleIdIncrement};
+ Tegra::Host1x::Host1x& host1x;
+
+ void AddHandle(std::shared_ptr<Handle> handle);
+
+ /**
+ * @brief Unmaps and frees the SMMU memory region a handle is mapped to
+ * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
+ */
+ void UnmapHandle(Handle& handle_description);
+
+ /**
+ * @brief Removes a handle from the map taking its dupes into account
+ * @note handle_description.mutex MUST be locked when calling this
+ * @return If the handle was removed from the map
+ */
+ bool TryRemoveHandle(const Handle& handle_description);
+};
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
new file mode 100644
index 000000000..eda2041a0
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
@@ -0,0 +1,121 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "video_core/host1x/host1x.h"
+
+namespace Service::Nvidia::NvCore {
+
+SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {
+ constexpr u32 VBlank0SyncpointId{26};
+ constexpr u32 VBlank1SyncpointId{27};
+
+ // Reserve both vblank syncpoints as client managed as they use Continuous Mode
+ // Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode
+ // https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660
+ ReserveSyncpoint(VBlank0SyncpointId, true);
+ ReserveSyncpoint(VBlank1SyncpointId, true);
+
+ for (u32 syncpoint_id : channel_syncpoints) {
+ if (syncpoint_id) {
+ ReserveSyncpoint(syncpoint_id, false);
+ }
+ }
+}
+
+SyncpointManager::~SyncpointManager() = default;
+
+u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) {
+ if (syncpoints.at(id).reserved) {
+ ASSERT_MSG(false, "Requested syncpoint is in use");
+ return 0;
+ }
+
+ syncpoints.at(id).reserved = true;
+ syncpoints.at(id).interface_managed = client_managed;
+
+ return id;
+}
+
+u32 SyncpointManager::FindFreeSyncpoint() {
+ for (u32 i{1}; i < syncpoints.size(); i++) {
+ if (!syncpoints[i].reserved) {
+ return i;
+ }
+ }
+ ASSERT_MSG(false, "Failed to find a free syncpoint!");
+ return 0;
+}
+
+u32 SyncpointManager::AllocateSyncpoint(bool client_managed) {
+ std::lock_guard lock(reservation_lock);
+ return ReserveSyncpoint(FindFreeSyncpoint(), client_managed);
+}
+
+void SyncpointManager::FreeSyncpoint(u32 id) {
+ std::lock_guard lock(reservation_lock);
+ ASSERT(syncpoints.at(id).reserved);
+ syncpoints.at(id).reserved = false;
+}
+
+bool SyncpointManager::IsSyncpointAllocated(u32 id) {
+ return (id <= SyncpointCount) && syncpoints[id].reserved;
+}
+
+bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const {
+ const SyncpointInfo& syncpoint{syncpoints.at(id)};
+
+ if (!syncpoint.reserved) {
+ ASSERT(false);
+ return 0;
+ }
+
+ // If the interface manages counters then we don't keep track of the maximum value as it handles
+ // sanity checking the values then
+ if (syncpoint.interface_managed) {
+ return static_cast<s32>(syncpoint.counter_min - threshold) >= 0;
+ } else {
+ return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold);
+ }
+}
+
+u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
+ if (!syncpoints.at(id).reserved) {
+ ASSERT(false);
+ return 0;
+ }
+
+ return syncpoints.at(id).counter_max += amount;
+}
+
+u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
+ if (!syncpoints.at(id).reserved) {
+ ASSERT(false);
+ return 0;
+ }
+
+ return syncpoints.at(id).counter_min;
+}
+
+u32 SyncpointManager::UpdateMin(u32 id) {
+ if (!syncpoints.at(id).reserved) {
+ ASSERT(false);
+ return 0;
+ }
+
+ syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
+ return syncpoints.at(id).counter_min;
+}
+
+NvFence SyncpointManager::GetSyncpointFence(u32 id) {
+ if (!syncpoints.at(id).reserved) {
+ ASSERT(false);
+ return NvFence{};
+ }
+
+ return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max};
+}
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h
new file mode 100644
index 000000000..b76ef9032
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h
@@ -0,0 +1,134 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <array>
+#include <atomic>
+#include <mutex>
+
+#include "common/common_types.h"
+#include "core/hle/service/nvdrv/nvdata.h"
+
+namespace Tegra::Host1x {
+class Host1x;
+} // namespace Tegra::Host1x
+
+namespace Service::Nvidia::NvCore {
+
+enum class ChannelType : u32 {
+ MsEnc = 0,
+ VIC = 1,
+ GPU = 2,
+ NvDec = 3,
+ Display = 4,
+ NvJpg = 5,
+ TSec = 6,
+ Max = 7
+};
+
+/**
+ * @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached
+ * versions of the HW syncpoints which are intermittently synced
+ * @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them
+ * @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html
+ * @url
+ * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c
+ */
+class SyncpointManager final {
+public:
+ explicit SyncpointManager(Tegra::Host1x::Host1x& host1x);
+ ~SyncpointManager();
+
+ /**
+ * @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints
+ */
+ bool IsSyncpointAllocated(u32 id);
+
+ /**
+ * @brief Finds a free syncpoint and reserves it
+ * @return The ID of the reserved syncpoint
+ */
+ u32 AllocateSyncpoint(bool client_managed);
+
+ /**
+ * @url
+ * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259
+ */
+ bool HasSyncpointExpired(u32 id, u32 threshold) const;
+
+ bool IsFenceSignalled(NvFence fence) const {
+ return HasSyncpointExpired(fence.id, fence.value);
+ }
+
+ /**
+ * @brief Atomically increments the maximum value of a syncpoint by the given amount
+ * @return The new max value of the syncpoint
+ */
+ u32 IncrementSyncpointMaxExt(u32 id, u32 amount);
+
+ /**
+ * @return The minimum value of the syncpoint
+ */
+ u32 ReadSyncpointMinValue(u32 id);
+
+ /**
+ * @brief Synchronises the minimum value of the syncpoint to with the GPU
+ * @return The new minimum value of the syncpoint
+ */
+ u32 UpdateMin(u32 id);
+
+ /**
+ * @brief Frees the usage of a syncpoint.
+ */
+ void FreeSyncpoint(u32 id);
+
+ /**
+ * @return A fence that will be signalled once this syncpoint hits its maximum value
+ */
+ NvFence GetSyncpointFence(u32 id);
+
+ static constexpr std::array<u32, static_cast<u32>(ChannelType::Max)> channel_syncpoints{
+ 0x0, // `MsEnc` is unimplemented
+ 0xC, // `VIC`
+ 0x0, // `GPU` syncpoints are allocated per-channel instead
+ 0x36, // `NvDec`
+ 0x0, // `Display` is unimplemented
+ 0x37, // `NvJpg`
+ 0x0, // `TSec` is unimplemented
+ }; //!< Maps each channel ID to a constant syncpoint
+
+private:
+ /**
+ * @note reservation_lock should be locked when calling this
+ */
+ u32 ReserveSyncpoint(u32 id, bool client_managed);
+
+ /**
+ * @return The ID of the first free syncpoint
+ */
+ u32 FindFreeSyncpoint();
+
+ struct SyncpointInfo {
+ std::atomic<u32> counter_min; //!< The least value the syncpoint can be (The value it was
+ //!< when it was last synchronized with host1x)
+ std::atomic<u32> counter_max; //!< The maximum value the syncpoint can reach according to
+ //!< the current usage
+ bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a
+ //!< client interface is a HW block that can handle host1x
+ //!< transactions on behalf of a host1x client (Which would
+ //!< otherwise need to be manually synced using PIO which is
+ //!< synchronous and requires direct cooperation of the CPU)
+ bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved
+ //!< value
+ };
+
+ constexpr static std::size_t SyncpointCount{192};
+ std::array<SyncpointInfo, SyncpointCount> syncpoints{};
+ std::mutex reservation_lock;
+
+ Tegra::Host1x::Host1x& host1x;
+};
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index 696e8121e..204b0e757 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -11,6 +11,10 @@ namespace Core {
class System;
}
+namespace Kernel {
+class KEvent;
+}
+
namespace Service::Nvidia::Devices {
/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
@@ -64,6 +68,10 @@ public:
*/
virtual void OnClose(DeviceFD fd) = 0;
+ virtual Kernel::KEvent* QueryEvent(u32 event_id) {
+ return nullptr;
+ }
+
protected:
Core::System& system;
};
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 604711914..4122fc98d 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -5,15 +5,16 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
-#include "core/hle/service/nvdrv/devices/nvmap.h"
#include "core/perf_stats.h"
#include "video_core/gpu.h"
namespace Service::Nvidia::Devices {
-nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_)
- : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {}
+nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
+ : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
nvdisp_disp0::~nvdisp_disp0() = default;
NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -39,8 +40,9 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {}
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
u32 height, u32 stride, android::BufferTransformFlags transform,
- const Common::Rectangle<int>& crop_rect) {
- const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
+ const Common::Rectangle<int>& crop_rect,
+ std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
+ const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
LOG_TRACE(Service,
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
addr, offset, width, height, stride, format);
@@ -48,10 +50,15 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
const Tegra::FramebufferConfig framebuffer{addr, offset, width, height,
stride, format, transform, crop_rect};
+ system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences);
system.GetPerfStats().EndSystemFrame();
- system.GPU().SwapBuffers(&framebuffer);
system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
system.GetPerfStats().BeginSystemFrame();
}
+Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) {
+ LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id);
+ return nullptr;
+}
+
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index 67b105e02..04217ab12 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -11,13 +11,18 @@
#include "core/hle/service/nvflinger/buffer_transform_flags.h"
#include "core/hle/service/nvflinger/pixel_format.h"
+namespace Service::Nvidia::NvCore {
+class Container;
+class NvMap;
+} // namespace Service::Nvidia::NvCore
+
namespace Service::Nvidia::Devices {
class nvmap;
class nvdisp_disp0 final : public nvdevice {
public:
- explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_);
+ explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core);
~nvdisp_disp0() override;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -33,10 +38,14 @@ public:
/// Performs a screen flip, drawing the buffer pointed to by the handle.
void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height,
u32 stride, android::BufferTransformFlags transform,
- const Common::Rectangle<int>& crop_rect);
+ const Common::Rectangle<int>& crop_rect,
+ std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences);
+
+ Kernel::KEvent* QueryEvent(u32 event_id) override;
private:
- std::shared_ptr<nvmap> nvmap_dev;
+ NvCore::Container& container;
+ NvCore::NvMap& nvmap;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 9867a648d..6411dbf43 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -1,21 +1,30 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#include <cstring>
#include <utility>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
-#include "core/hle/service/nvdrv/devices/nvmap.h"
+#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
+#include "core/hle/service/nvdrv/nvdrv.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
namespace Service::Nvidia::Devices {
-nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_)
- : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {}
+nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core)
+ : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{},
+ gmmu{} {}
+
nvhost_as_gpu::~nvhost_as_gpu() = default;
NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -82,12 +91,52 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
IoctlAllocAsEx params{};
std::memcpy(&params, input.data(), input.size());
- LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size);
- if (params.big_page_size == 0) {
- params.big_page_size = DEFAULT_BIG_PAGE_SIZE;
+ LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
+
+ std::scoped_lock lock(mutex);
+
+ if (vm.initialised) {
+ ASSERT_MSG(false, "Cannot initialise an address space twice!");
+ return NvResult::InvalidState;
+ }
+
+ if (params.big_page_size) {
+ if (!std::has_single_bit(params.big_page_size)) {
+ LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size);
+ return NvResult::BadValue;
+ }
+
+ if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) {
+ LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size);
+ return NvResult::BadValue;
+ }
+
+ vm.big_page_size = params.big_page_size;
+ vm.big_page_size_bits = static_cast<u32>(std::countr_zero(params.big_page_size));
+
+ vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT;
+ }
+
+ // If this is unspecified then default values should be used
+ if (params.va_range_start) {
+ vm.va_range_start = params.va_range_start;
+ vm.va_range_split = params.va_range_split;
+ vm.va_range_end = params.va_range_end;
}
- big_page_size = params.big_page_size;
+ const auto start_pages{static_cast<u32>(vm.va_range_start >> VM::PAGE_SIZE_BITS)};
+ const auto end_pages{static_cast<u32>(vm.va_range_split >> VM::PAGE_SIZE_BITS)};
+ vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages);
+
+ const auto start_big_pages{static_cast<u32>(vm.va_range_split >> vm.big_page_size_bits)};
+ const auto end_big_pages{
+ static_cast<u32>((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)};
+ vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
+
+ gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
+ VM::PAGE_SIZE_BITS);
+ system.GPU().InitAddressSpace(*gmmu);
+ vm.initialised = true;
return NvResult::Success;
}
@@ -99,21 +148,76 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
params.page_size, params.flags);
- const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
- if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
- params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size);
+ std::scoped_lock lock(mutex);
+
+ if (!vm.initialised) {
+ return NvResult::BadValue;
+ }
+
+ if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) {
+ return NvResult::BadValue;
+ }
+
+ if (params.page_size != vm.big_page_size &&
+ ((params.flags & MappingFlags::Sparse) != MappingFlags::None)) {
+ UNIMPLEMENTED_MSG("Sparse small pages are not implemented!");
+ return NvResult::NotImplemented;
+ }
+
+ const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
+ : vm.big_page_size_bits};
+
+ auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
+ : *vm.big_page_allocator};
+
+ if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
+ allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages);
} else {
- params.offset = system.GPU().MemoryManager().Allocate(size, params.align);
+ params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits;
+ if (!params.offset) {
+ ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!");
+ return NvResult::InsufficientMemory;
+ }
}
- auto result = NvResult::Success;
- if (!params.offset) {
- LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size);
- result = NvResult::InsufficientMemory;
+ u64 size{static_cast<u64>(params.pages) * params.page_size};
+
+ if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) {
+ gmmu->MapSparse(params.offset, size);
}
+ allocation_map[params.offset] = {
+ .size = size,
+ .mappings{},
+ .page_size = params.page_size,
+ .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
+ .big_pages = params.page_size != VM::YUZU_PAGESIZE,
+ };
+
std::memcpy(output.data(), &params, output.size());
- return result;
+ return NvResult::Success;
+}
+
+void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
+ auto mapping{mapping_map.at(offset)};
+
+ if (!mapping->fixed) {
+ auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
+ u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
+
+ allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
+ static_cast<u32>(mapping->size >> page_size_bits));
+ }
+
+ // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
+ // Only FreeSpace can unmap them fully
+ if (mapping->sparse_alloc) {
+ gmmu->MapSparse(offset, mapping->size, mapping->big_page);
+ } else {
+ gmmu->Unmap(offset, mapping->size);
+ }
+
+ mapping_map.erase(offset);
}
NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -123,8 +227,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
params.pages, params.page_size);
- system.GPU().MemoryManager().Unmap(params.offset,
- static_cast<std::size_t>(params.pages) * params.page_size);
+ std::scoped_lock lock(mutex);
+
+ if (!vm.initialised) {
+ return NvResult::BadValue;
+ }
+
+ try {
+ auto allocation{allocation_map[params.offset]};
+
+ if (allocation.page_size != params.page_size ||
+ allocation.size != (static_cast<u64>(params.pages) * params.page_size)) {
+ return NvResult::BadValue;
+ }
+
+ for (const auto& mapping : allocation.mappings) {
+ FreeMappingLocked(mapping->offset);
+ }
+
+ // Unset sparse flag if required
+ if (allocation.sparse) {
+ gmmu->Unmap(params.offset, allocation.size);
+ }
+
+ auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
+ : *vm.big_page_allocator};
+ u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
+ : vm.big_page_size_bits};
+
+ allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
+ static_cast<u32>(allocation.size >> page_size_bits));
+ allocation_map.erase(params.offset);
+ } catch (const std::out_of_range&) {
+ return NvResult::BadValue;
+ }
std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
@@ -135,35 +271,52 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
- auto result = NvResult::Success;
std::vector<IoctlRemapEntry> entries(num_entries);
std::memcpy(entries.data(), input.data(), input.size());
+ std::scoped_lock lock(mutex);
+
+ if (!vm.initialised) {
+ return NvResult::BadValue;
+ }
+
for (const auto& entry : entries) {
- LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
- entry.offset, entry.nvmap_handle, entry.pages);
+ GPUVAddr virtual_address{static_cast<u64>(entry.as_offset_big_pages)
+ << vm.big_page_size_bits};
+ u64 size{static_cast<u64>(entry.big_pages) << vm.big_page_size_bits};
- const auto object{nvmap_dev->GetObject(entry.nvmap_handle)};
- if (!object) {
- LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
- result = NvResult::InvalidState;
- break;
+ auto alloc{allocation_map.upper_bound(virtual_address)};
+
+ if (alloc-- == allocation_map.begin() ||
+ (virtual_address - alloc->first) + size > alloc->second.size) {
+ LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!");
+ return NvResult::BadValue;
}
- const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
- const auto size{static_cast<u64>(entry.pages) << 0x10};
- const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
- const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)};
+ if (!alloc->second.sparse) {
+ LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!");
+ return NvResult::BadValue;
+ }
- if (!addr) {
- LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
- result = NvResult::InvalidState;
- break;
+ const bool use_big_pages = alloc->second.big_pages;
+ if (!entry.handle) {
+ gmmu->MapSparse(virtual_address, size, use_big_pages);
+ } else {
+ auto handle{nvmap.GetHandle(entry.handle)};
+ if (!handle) {
+ return NvResult::BadValue;
+ }
+
+ VAddr cpu_address{static_cast<VAddr>(
+ handle->address +
+ (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
+
+ gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
}
}
std::memcpy(output.data(), entries.data(), output.size());
- return result;
+ return NvResult::Success;
}
NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -173,79 +326,98 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
LOG_DEBUG(Service_NVDRV,
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
", offset={}",
- params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
+ params.flags, params.handle, params.buffer_offset, params.mapping_size,
params.offset);
- const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
- if (!object) {
- LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
- std::memcpy(output.data(), &params, output.size());
- return NvResult::InvalidState;
- }
-
- // The real nvservices doesn't make a distinction between handles and ids, and
- // object can only have one handle and it will be the same as its id. Assert that this is the
- // case to prevent unexpected behavior.
- ASSERT(object->id == params.nvmap_handle);
- auto& gpu = system.GPU();
+ std::scoped_lock lock(mutex);
- u64 page_size{params.page_size};
- if (!page_size) {
- page_size = object->align;
+ if (!vm.initialised) {
+ return NvResult::BadValue;
}
- if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
- if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
- const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
- const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
+ // Remaps a subregion of an existing mapping to a different PA
+ if ((params.flags & MappingFlags::Remap) != MappingFlags::None) {
+ try {
+ auto mapping{mapping_map.at(params.offset)};
- if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
- LOG_CRITICAL(Service_NVDRV,
- "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
- "mapping_size = {}, offset={}",
- params.flags, params.nvmap_handle, params.buffer_offset,
- params.mapping_size, params.offset);
-
- std::memcpy(output.data(), &params, output.size());
- return NvResult::InvalidState;
+ if (mapping->size < params.mapping_size) {
+ LOG_WARNING(Service_NVDRV,
+ "Cannot remap a partially mapped GPU address space region: 0x{:X}",
+ params.offset);
+ return NvResult::BadValue;
}
- std::memcpy(output.data(), &params, output.size());
- return NvResult::Success;
- } else {
- LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
+ u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
+ VAddr cpu_address{mapping->ptr + params.buffer_offset};
+
+ gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
- std::memcpy(output.data(), &params, output.size());
- return NvResult::InvalidState;
+ return NvResult::Success;
+ } catch (const std::out_of_range&) {
+ LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}",
+ params.offset);
+ return NvResult::BadValue;
}
}
- // We can only map objects that have already been assigned a CPU address.
- ASSERT(object->status == nvmap::Object::Status::Allocated);
-
- const auto physical_address{object->addr + params.buffer_offset};
- u64 size{params.mapping_size};
- if (!size) {
- size = object->size;
+ auto handle{nvmap.GetHandle(params.handle)};
+ if (!handle) {
+ return NvResult::BadValue;
}
- const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
- if (is_alloc) {
- params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
- } else {
- params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
- }
+ VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
+ u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
+
+ bool big_page{[&]() {
+ if (Common::IsAligned(handle->align, vm.big_page_size)) {
+ return true;
+ } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) {
+ return false;
+ } else {
+ ASSERT(false);
+ return false;
+ }
+ }()};
+
+ if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
+ auto alloc{allocation_map.upper_bound(params.offset)};
- auto result = NvResult::Success;
- if (!params.offset) {
- LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
- result = NvResult::InvalidState;
+ if (alloc-- == allocation_map.begin() ||
+ (params.offset - alloc->first) + size > alloc->second.size) {
+ ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!");
+ return NvResult::BadValue;
+ }
+
+ const bool use_big_pages = alloc->second.big_pages && big_page;
+ gmmu->Map(params.offset, cpu_address, size, use_big_pages);
+
+ auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
+ use_big_pages, alloc->second.sparse)};
+ alloc->second.mappings.push_back(mapping);
+ mapping_map[params.offset] = mapping;
} else {
- AddBufferMap(params.offset, size, physical_address, is_alloc);
+
+ auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
+ u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
+ u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
+
+ params.offset = static_cast<u64>(allocator.Allocate(
+ static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits)))
+ << page_size_bits;
+ if (!params.offset) {
+ ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!");
+ return NvResult::InsufficientMemory;
+ }
+
+ gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
+
+ auto mapping{
+ std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
+ mapping_map[params.offset] = mapping;
}
std::memcpy(output.data(), &params, output.size());
- return result;
+ return NvResult::Success;
}
NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -254,47 +426,82 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
- if (const auto size{RemoveBufferMap(params.offset)}; size) {
- system.GPU().MemoryManager().Unmap(params.offset, *size);
- } else {
- LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
+ std::scoped_lock lock(mutex);
+
+ if (!vm.initialised) {
+ return NvResult::BadValue;
+ }
+
+ try {
+ auto mapping{mapping_map.at(params.offset)};
+
+ if (!mapping->fixed) {
+ auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
+ u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
+
+ allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
+ static_cast<u32>(mapping->size >> page_size_bits));
+ }
+
+ // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
+ // Only FreeSpace can unmap them fully
+ if (mapping->sparse_alloc) {
+ gmmu->MapSparse(params.offset, mapping->size, mapping->big_page);
+ } else {
+ gmmu->Unmap(params.offset, mapping->size);
+ }
+
+ mapping_map.erase(params.offset);
+ } catch (const std::out_of_range&) {
+ LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlBindChannel params{};
std::memcpy(&params, input.data(), input.size());
- LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd);
+ LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
- channel = params.fd;
+ auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
+ gpu_channel_device->channel_state->memory_manager = gmmu;
return NvResult::Success;
}
+void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
+ params.buf_size = 2 * sizeof(VaRegion);
+
+ params.regions = std::array<VaRegion, 2>{
+ VaRegion{
+ .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS,
+ .page_size = VM::YUZU_PAGESIZE,
+ ._pad0_{},
+ .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(),
+ },
+ VaRegion{
+ .offset = vm.big_page_allocator->GetVAStart() << vm.big_page_size_bits,
+ .page_size = vm.big_page_size,
+ ._pad0_{},
+ .pages = vm.big_page_allocator->GetVALimit() - vm.big_page_allocator->GetVAStart(),
+ },
+ };
+}
+
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlGetVaRegions params{};
std::memcpy(&params, input.data(), input.size());
- LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
- params.buf_size);
-
- params.buf_size = 0x30;
+ LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
+ params.buf_size);
- params.small = IoctlVaRegion{
- .offset = 0x04000000,
- .page_size = DEFAULT_SMALL_PAGE_SIZE,
- .pages = 0x3fbfff,
- };
+ std::scoped_lock lock(mutex);
- params.big = IoctlVaRegion{
- .offset = 0x04000000,
- .page_size = big_page_size,
- .pages = 0x1bffff,
- };
+ if (!vm.initialised) {
+ return NvResult::BadValue;
+ }
- // TODO(ogniK): This probably can stay stubbed but should add support way way later
+ GetVARegionsImpl(params);
std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
@@ -305,62 +512,27 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u
IoctlGetVaRegions params{};
std::memcpy(&params, input.data(), input.size());
- LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
- params.buf_size);
-
- params.buf_size = 0x30;
+ LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
+ params.buf_size);
- params.small = IoctlVaRegion{
- .offset = 0x04000000,
- .page_size = 0x1000,
- .pages = 0x3fbfff,
- };
+ std::scoped_lock lock(mutex);
- params.big = IoctlVaRegion{
- .offset = 0x04000000,
- .page_size = big_page_size,
- .pages = 0x1bffff,
- };
+ if (!vm.initialised) {
+ return NvResult::BadValue;
+ }
- // TODO(ogniK): This probably can stay stubbed but should add support way way later
+ GetVARegionsImpl(params);
std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.small, sizeof(IoctlVaRegion));
- std::memcpy(inline_output.data() + sizeof(IoctlVaRegion), &params.big, sizeof(IoctlVaRegion));
+ std::memcpy(inline_output.data(), &params.regions[0], sizeof(VaRegion));
+ std::memcpy(inline_output.data() + sizeof(VaRegion), &params.regions[1], sizeof(VaRegion));
return NvResult::Success;
}
-std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const {
- const auto end{buffer_mappings.upper_bound(gpu_addr)};
- for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) {
- if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
- return iter->second;
- }
- }
-
- return std::nullopt;
-}
-
-void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
- bool is_allocated) {
- buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
-}
-
-std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
- if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
- std::size_t size{};
-
- if (iter->second.IsAllocated()) {
- size = iter->second.Size();
- }
-
- buffer_mappings.erase(iter);
-
- return size;
- }
-
- return std::nullopt;
+Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) {
+ LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id);
+ return nullptr;
}
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 555843a6f..86fe71c75 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -1,35 +1,50 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
+#include <bit>
+#include <list>
#include <map>
#include <memory>
+#include <mutex>
#include <optional>
#include <vector>
+#include "common/address_space.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
-namespace Service::Nvidia::Devices {
+namespace Tegra {
+class MemoryManager;
+} // namespace Tegra
+
+namespace Service::Nvidia {
+class Module;
+}
-constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16;
-constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12;
+namespace Service::Nvidia::NvCore {
+class Container;
+class NvMap;
+} // namespace Service::Nvidia::NvCore
-class nvmap;
+namespace Service::Nvidia::Devices {
-enum class AddressSpaceFlags : u32 {
- None = 0x0,
- FixedOffset = 0x1,
- Remap = 0x100,
+enum class MappingFlags : u32 {
+ None = 0,
+ Fixed = 1 << 0,
+ Sparse = 1 << 1,
+ Remap = 1 << 8,
};
-DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
+DECLARE_ENUM_FLAG_OPERATORS(MappingFlags);
class nvhost_as_gpu final : public nvdevice {
public:
- explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_);
+ explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core);
~nvhost_as_gpu() override;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -42,46 +57,17 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
-private:
- class BufferMap final {
- public:
- constexpr BufferMap() = default;
-
- constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_)
- : start_addr{start_addr_}, end_addr{start_addr_ + size_} {}
-
- constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_,
- bool is_allocated_)
- : start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_},
- is_allocated{is_allocated_} {}
-
- constexpr VAddr StartAddr() const {
- return start_addr;
- }
-
- constexpr VAddr EndAddr() const {
- return end_addr;
- }
-
- constexpr std::size_t Size() const {
- return end_addr - start_addr;
- }
-
- constexpr VAddr CpuAddr() const {
- return cpu_addr;
- }
-
- constexpr bool IsAllocated() const {
- return is_allocated;
- }
-
- private:
- GPUVAddr start_addr{};
- GPUVAddr end_addr{};
- VAddr cpu_addr{};
- bool is_allocated{};
+ Kernel::KEvent* QueryEvent(u32 event_id) override;
+
+ struct VaRegion {
+ u64 offset;
+ u32 page_size;
+ u32 _pad0_;
+ u64 pages;
};
+ static_assert(sizeof(VaRegion) == 0x18);
+private:
struct IoctlAllocAsEx {
u32_le flags{}; // usually passes 1
s32_le as_fd{}; // ignored; passes 0
@@ -96,7 +82,7 @@ private:
struct IoctlAllocSpace {
u32_le pages{};
u32_le page_size{};
- AddressSpaceFlags flags{};
+ MappingFlags flags{};
INSERT_PADDING_WORDS(1);
union {
u64_le offset;
@@ -113,19 +99,19 @@ private:
static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
struct IoctlRemapEntry {
- u16_le flags{};
- u16_le kind{};
- u32_le nvmap_handle{};
- u32_le map_offset{};
- u32_le offset{};
- u32_le pages{};
+ u16 flags;
+ u16 kind;
+ NvCore::NvMap::Handle::Id handle;
+ u32 handle_offset_big_pages;
+ u32 as_offset_big_pages;
+ u32 big_pages;
};
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
struct IoctlMapBufferEx {
- AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable
- u32_le kind{}; // -1 is default
- u32_le nvmap_handle{};
+ MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable
+ u32_le kind{}; // -1 is default
+ NvCore::NvMap::Handle::Id handle;
u32_le page_size{}; // 0 means don't care
s64_le buffer_offset{};
u64_le mapping_size{};
@@ -143,27 +129,15 @@ private:
};
static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
- struct IoctlVaRegion {
- u64_le offset{};
- u32_le page_size{};
- INSERT_PADDING_WORDS(1);
- u64_le pages{};
- };
- static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size");
-
struct IoctlGetVaRegions {
u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
u32_le reserved{};
- IoctlVaRegion small{};
- IoctlVaRegion big{};
+ std::array<VaRegion, 2> regions{};
};
- static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2,
+ static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
"IoctlGetVaRegions is incorrect size");
- s32 channel{};
- u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
-
NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output);
NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
@@ -172,18 +146,75 @@ private:
NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
+ void GetVARegionsImpl(IoctlGetVaRegions& params);
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
std::vector<u8>& inline_output);
- std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
- void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
- std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
+ void FreeMappingLocked(u64 offset);
+
+ Module& module;
+
+ NvCore::Container& container;
+ NvCore::NvMap& nvmap;
- std::shared_ptr<nvmap> nvmap_dev;
+ struct Mapping {
+ VAddr ptr;
+ u64 offset;
+ u64 size;
+ bool fixed;
+ bool big_page; // Only valid if fixed == false
+ bool sparse_alloc;
+
+ Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
+ : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
+ sparse_alloc(sparse_alloc_) {}
+ };
+
+ struct Allocation {
+ u64 size;
+ std::list<std::shared_ptr<Mapping>> mappings;
+ u32 page_size;
+ bool sparse;
+ bool big_pages;
+ };
- // This is expected to be ordered, therefore we must use a map, not unordered_map
- std::map<GPUVAddr, BufferMap> buffer_mappings;
+ std::map<u64, std::shared_ptr<Mapping>>
+ mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and
+ //!< mapping type, this is needed as what was originally a single buffer may
+ //!< have been split into multiple GPU side buffers with the remap flag.
+ std::map<u64, Allocation> allocation_map; //!< Holds allocations created by AllocSpace from
+ //!< which fixed buffers can be mapped into
+ std::mutex mutex; //!< Locks all AS operations
+
+ struct VM {
+ static constexpr u32 YUZU_PAGESIZE{0x1000};
+ static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)};
+
+ static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
+ static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};
+ u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
+ u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)};
+
+ static constexpr u32 VA_START_SHIFT{10};
+ static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34};
+ static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37};
+ u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT};
+ u64 va_range_split{DEFAULT_VA_SPLIT};
+ u64 va_range_end{DEFAULT_VA_RANGE};
+
+ using Allocator = Common::FlatAllocator<u32, 0, 32>;
+
+ std::unique_ptr<Allocator> big_page_allocator;
+ std::shared_ptr<Allocator>
+ small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel
+
+ bool initialised{};
+ } vm;
+ std::shared_ptr<Tegra::MemoryManager> gmmu;
+
+ // s32 channel{};
+ // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 527531f29..5bee4a3d3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -1,24 +1,39 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include <bit>
#include <cstdlib>
#include <cstring>
+#include <fmt/format.h>
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_writable_event.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
namespace Service::Nvidia::Devices {
nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
- SyncpointManager& syncpoint_manager_)
- : nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{
- syncpoint_manager_} {}
-nvhost_ctrl::~nvhost_ctrl() = default;
+ NvCore::Container& core_)
+ : nvdevice{system_}, events_interface{events_interface_}, core{core_},
+ syncpoint_manager{core_.GetSyncpointManager()} {}
+
+nvhost_ctrl::~nvhost_ctrl() {
+ for (auto& event : events) {
+ if (!event.registered) {
+ continue;
+ }
+ events_interface.FreeEvent(event.kevent);
+ }
+}
NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
std::vector<u8>& output) {
@@ -30,13 +45,15 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
case 0x1c:
return IocCtrlClearEventWait(input, output);
case 0x1d:
- return IocCtrlEventWait(input, output, false);
- case 0x1e:
return IocCtrlEventWait(input, output, true);
+ case 0x1e:
+ return IocCtrlEventWait(input, output, false);
case 0x1f:
return IocCtrlEventRegister(input, output);
case 0x20:
return IocCtrlEventUnregister(input, output);
+ case 0x21:
+ return IocCtrlEventUnregisterBatch(input, output);
}
break;
default:
@@ -60,6 +77,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
}
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
+
void nvhost_ctrl::OnClose(DeviceFD fd) {}
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -71,116 +89,167 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector
}
NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
- bool is_async) {
+ bool is_allocation) {
IocCtrlEventWaitParams params{};
std::memcpy(&params, input.data(), sizeof(params));
- LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}",
- params.syncpt_id, params.threshold, params.timeout, is_async);
+ LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
+ params.fence.id, params.fence.value, params.timeout, is_allocation);
- if (params.syncpt_id >= MaxSyncPoints) {
- return NvResult::BadParameter;
- }
+ bool must_unmark_fail = !is_allocation;
+ const u32 event_id = params.value.raw;
+ SCOPE_EXIT({
+ std::memcpy(output.data(), &params, sizeof(params));
+ if (must_unmark_fail) {
+ events[event_id].fails = 0;
+ }
+ });
- u32 event_id = params.value & 0x00FF;
+ const u32 fence_id = static_cast<u32>(params.fence.id);
- if (event_id >= MaxNvEvents) {
- std::memcpy(output.data(), &params, sizeof(params));
+ if (fence_id >= MaxSyncPoints) {
return NvResult::BadParameter;
}
- if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
- params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id);
- std::memcpy(output.data(), &params, sizeof(params));
- events_interface.failed[event_id] = false;
+ if (params.fence.value == 0) {
+ if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) {
+ LOG_WARNING(Service_NVDRV,
+ "Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
+ params.fence.id, params.fence.value, params.timeout, is_allocation);
+ } else {
+ params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
+ }
return NvResult::Success;
}
- if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id);
- syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
- params.value = new_value;
- std::memcpy(output.data(), &params, sizeof(params));
- events_interface.failed[event_id] = false;
+ if (syncpoint_manager.IsFenceSignalled(params.fence)) {
+ params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
return NvResult::Success;
}
- auto& event = events_interface.events[event_id];
- auto& gpu = system.GPU();
-
- // This is mostly to take into account unimplemented features. As synced
- // gpu is always synced.
- if (!gpu.IsAsync()) {
- event.event->GetWritableEvent().Signal();
- return NvResult::Success;
- }
- const u32 current_syncpoint_value = event.fence.value;
- const s32 diff = current_syncpoint_value - params.threshold;
- if (diff >= 0) {
- event.event->GetWritableEvent().Signal();
- params.value = current_syncpoint_value;
- std::memcpy(output.data(), &params, sizeof(params));
- events_interface.failed[event_id] = false;
+ if (const auto new_value = syncpoint_manager.UpdateMin(fence_id);
+ syncpoint_manager.IsFenceSignalled(params.fence)) {
+ params.value.raw = new_value;
return NvResult::Success;
}
- const u32 target_value = current_syncpoint_value - diff;
- if (!is_async) {
- params.value = 0;
+ auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
+ const u32 target_value = params.fence.value;
+
+ auto lock = NvEventsLock();
+
+ u32 slot = [&]() {
+ if (is_allocation) {
+ params.value.raw = 0;
+ return FindFreeNvEvent(fence_id);
+ } else {
+ return params.value.raw;
+ }
+ }();
+
+ must_unmark_fail = false;
+
+ const auto check_failing = [&]() {
+ if (events[slot].fails > 2) {
+ {
+ auto lk = system.StallProcesses();
+ host1x_syncpoint_manager.WaitHost(fence_id, target_value);
+ system.UnstallProcesses();
+ }
+ params.value.raw = target_value;
+ return true;
+ }
+ return false;
+ };
+
+ if (slot >= MaxNvEvents) {
+ return NvResult::BadParameter;
}
if (params.timeout == 0) {
- std::memcpy(output.data(), &params, sizeof(params));
+ if (check_failing()) {
+ events[slot].fails = 0;
+ return NvResult::Success;
+ }
return NvResult::Timeout;
}
- EventState status = events_interface.status[event_id];
- const bool bad_parameter = status == EventState::Busy;
- if (bad_parameter) {
- std::memcpy(output.data(), &params, sizeof(params));
+ auto& event = events[slot];
+
+ if (!event.registered) {
return NvResult::BadParameter;
}
- events_interface.SetEventStatus(event_id, EventState::Waiting);
- events_interface.assigned_syncpt[event_id] = params.syncpt_id;
- events_interface.assigned_value[event_id] = target_value;
- if (is_async) {
- params.value = params.syncpt_id << 4;
- } else {
- params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000;
- }
- params.value |= event_id;
- event.event->GetWritableEvent().Clear();
- if (events_interface.failed[event_id]) {
- {
- auto lk = system.StallProcesses();
- gpu.WaitFence(params.syncpt_id, target_value);
- system.UnstallProcesses();
- }
- std::memcpy(output.data(), &params, sizeof(params));
- events_interface.failed[event_id] = false;
+
+ if (event.IsBeingUsed()) {
+ return NvResult::BadParameter;
+ }
+
+ if (check_failing()) {
+ event.fails = 0;
return NvResult::Success;
}
- gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value);
- std::memcpy(output.data(), &params, sizeof(params));
+
+ params.value.raw = 0;
+
+ event.status.store(EventState::Waiting, std::memory_order_release);
+ event.assigned_syncpt = fence_id;
+ event.assigned_value = target_value;
+ if (is_allocation) {
+ params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id));
+ params.value.event_allocated.Assign(1);
+ } else {
+ params.value.syncpoint_id.Assign(fence_id);
+ }
+ params.value.raw |= slot;
+
+ event.wait_handle =
+ host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
+ auto& event_ = events[slot];
+ if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
+ EventState::Waiting) {
+ event_.kevent->GetWritableEvent().Signal();
+ }
+ event_.status.store(EventState::Signalled, std::memory_order_release);
+ });
return NvResult::Timeout;
}
+NvResult nvhost_ctrl::FreeEvent(u32 slot) {
+ if (slot >= MaxNvEvents) {
+ return NvResult::BadParameter;
+ }
+
+ auto& event = events[slot];
+
+ if (!event.registered) {
+ return NvResult::Success;
+ }
+
+ if (event.IsBeingUsed()) {
+ return NvResult::Busy;
+ }
+
+ FreeNvEvent(slot);
+ return NvResult::Success;
+}
+
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
IocCtrlEventRegisterParams params{};
std::memcpy(&params, input.data(), sizeof(params));
- const u32 event_id = params.user_event_id & 0x00FF;
+ const u32 event_id = params.user_event_id;
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
if (event_id >= MaxNvEvents) {
return NvResult::BadParameter;
}
- if (events_interface.registered[event_id]) {
- const auto event_state = events_interface.status[event_id];
- if (event_state != EventState::Free) {
- LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event");
- events_interface.UnregisterEvent(event_id);
- } else {
- return NvResult::BadParameter;
+
+ auto lock = NvEventsLock();
+
+ if (events[event_id].registered) {
+ const auto result = FreeEvent(event_id);
+ if (result != NvResult::Success) {
+ return result;
}
}
- events_interface.RegisterEvent(event_id);
+ CreateNvEvent(event_id);
return NvResult::Success;
}
@@ -190,34 +259,142 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
std::memcpy(&params, input.data(), sizeof(params));
const u32 event_id = params.user_event_id & 0x00FF;
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
- if (event_id >= MaxNvEvents) {
- return NvResult::BadParameter;
- }
- if (!events_interface.registered[event_id]) {
- return NvResult::BadParameter;
+
+ auto lock = NvEventsLock();
+ return FreeEvent(event_id);
+}
+
+NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input,
+ std::vector<u8>& output) {
+ IocCtrlEventUnregisterBatchParams params{};
+ std::memcpy(&params, input.data(), sizeof(params));
+ u64 event_mask = params.user_events;
+ LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
+
+ auto lock = NvEventsLock();
+ while (event_mask != 0) {
+ const u64 event_id = std::countr_zero(event_mask);
+ event_mask &= ~(1ULL << event_id);
+ const auto result = FreeEvent(static_cast<u32>(event_id));
+ if (result != NvResult::Success) {
+ return result;
+ }
}
- events_interface.UnregisterEvent(event_id);
return NvResult::Success;
}
NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
- IocCtrlEventSignalParams params{};
+ IocCtrlEventClearParams params{};
std::memcpy(&params, input.data(), sizeof(params));
- u32 event_id = params.event_id & 0x00FF;
- LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id);
+ u32 event_id = params.event_id.slot;
+ LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
if (event_id >= MaxNvEvents) {
return NvResult::BadParameter;
}
- if (events_interface.status[event_id] == EventState::Waiting) {
- events_interface.LiberateEvent(event_id);
- }
- events_interface.failed[event_id] = true;
- syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id);
+ auto lock = NvEventsLock();
+
+ auto& event = events[event_id];
+ if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) ==
+ EventState::Waiting) {
+ auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
+ host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle);
+ syncpoint_manager.UpdateMin(event.assigned_syncpt);
+ event.wait_handle = {};
+ }
+ event.fails++;
+ event.status.store(EventState::Cancelled, std::memory_order_release);
+ event.kevent->GetWritableEvent().Clear();
return NvResult::Success;
}
+Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
+ const auto desired_event = SyncpointEventValue{.raw = event_id};
+
+ const bool allocated = desired_event.event_allocated.Value() != 0;
+ const u32 slot{allocated ? desired_event.partial_slot.Value()
+ : static_cast<u32>(desired_event.slot)};
+ if (slot >= MaxNvEvents) {
+ ASSERT(false);
+ return nullptr;
+ }
+
+ const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value()
+ : desired_event.syncpoint_id.Value()};
+
+ auto lock = NvEventsLock();
+
+ auto& event = events[slot];
+ if (event.registered && event.assigned_syncpt == syncpoint_id) {
+ ASSERT(event.kevent);
+ return event.kevent;
+ }
+ // Is this possible in hardware?
+ ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
+ return nullptr;
+}
+
+std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() {
+ return std::unique_lock<std::mutex>(events_mutex);
+}
+
+void nvhost_ctrl::CreateNvEvent(u32 event_id) {
+ auto& event = events[event_id];
+ ASSERT(!event.kevent);
+ ASSERT(!event.registered);
+ ASSERT(!event.IsBeingUsed());
+ event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id));
+ event.status = EventState::Available;
+ event.registered = true;
+ const u64 mask = 1ULL << event_id;
+ event.fails = 0;
+ events_mask |= mask;
+ event.assigned_syncpt = 0;
+}
+
+void nvhost_ctrl::FreeNvEvent(u32 event_id) {
+ auto& event = events[event_id];
+ ASSERT(event.kevent);
+ ASSERT(event.registered);
+ ASSERT(!event.IsBeingUsed());
+ events_interface.FreeEvent(event.kevent);
+ event.kevent = nullptr;
+ event.status = EventState::Available;
+ event.registered = false;
+ const u64 mask = ~(1ULL << event_id);
+ events_mask &= mask;
+}
+
+u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) {
+ u32 slot{MaxNvEvents};
+ u32 free_slot{MaxNvEvents};
+ for (u32 i = 0; i < MaxNvEvents; i++) {
+ auto& event = events[i];
+ if (event.registered) {
+ if (!event.IsBeingUsed()) {
+ slot = i;
+ if (event.assigned_syncpt == syncpoint_id) {
+ return slot;
+ }
+ }
+ } else if (free_slot == MaxNvEvents) {
+ free_slot = i;
+ }
+ }
+ if (free_slot < MaxNvEvents) {
+ CreateNvEvent(free_slot);
+ return free_slot;
+ }
+
+ if (slot < MaxNvEvents) {
+ return slot;
+ }
+
+ LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event");
+ return 0;
+}
+
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 4fbb89b15..0b56d7070 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -1,20 +1,28 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
#include <array>
#include <vector>
+#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
#include "core/hle/service/nvdrv/nvdrv.h"
+#include "video_core/host1x/syncpoint_manager.h"
+
+namespace Service::Nvidia::NvCore {
+class Container;
+class SyncpointManager;
+} // namespace Service::Nvidia::NvCore
namespace Service::Nvidia::Devices {
class nvhost_ctrl final : public nvdevice {
public:
explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
- SyncpointManager& syncpoint_manager_);
+ NvCore::Container& core);
~nvhost_ctrl() override;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -27,7 +35,70 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
+ Kernel::KEvent* QueryEvent(u32 event_id) override;
+
+ union SyncpointEventValue {
+ u32 raw;
+
+ union {
+ BitField<0, 4, u32> partial_slot;
+ BitField<4, 28, u32> syncpoint_id;
+ };
+
+ struct {
+ u16 slot;
+ union {
+ BitField<0, 12, u16> syncpoint_id_for_allocation;
+ BitField<12, 1, u16> event_allocated;
+ };
+ };
+ };
+ static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
+
private:
+ struct InternalEvent {
+ // Mask representing registered events
+
+ // Each kernel event associated to an NV event
+ Kernel::KEvent* kevent{};
+ // The status of the current NVEvent
+ std::atomic<EventState> status{};
+
+ // Tells the NVEvent that it has failed.
+ u32 fails{};
+ // When an NVEvent is waiting on GPU interrupt, this is the sync_point
+ // associated with it.
+ u32 assigned_syncpt{};
+ // This is the value of the GPU interrupt for which the NVEvent is waiting
+ // for.
+ u32 assigned_value{};
+
+ // Tells if an NVEvent is registered or not
+ bool registered{};
+
+ // Used for waiting on a syncpoint & canceling it.
+ Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{};
+
+ bool IsBeingUsed() const {
+ const auto current_status = status.load(std::memory_order_acquire);
+ return current_status == EventState::Waiting ||
+ current_status == EventState::Cancelling ||
+ current_status == EventState::Signalling;
+ }
+ };
+
+ std::unique_lock<std::mutex> NvEventsLock();
+
+ void CreateNvEvent(u32 event_id);
+
+ void FreeNvEvent(u32 event_id);
+
+ u32 FindFreeNvEvent(u32 syncpoint_id);
+
+ std::array<InternalEvent, MaxNvEvents> events{};
+ std::mutex events_mutex;
+ u64 events_mask{};
+
struct IocSyncptReadParams {
u32_le id{};
u32_le value{};
@@ -83,27 +154,18 @@ private:
};
static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
- struct IocCtrlEventSignalParams {
- u32_le event_id{};
+ struct IocCtrlEventClearParams {
+ SyncpointEventValue event_id{};
};
- static_assert(sizeof(IocCtrlEventSignalParams) == 4,
- "IocCtrlEventSignalParams is incorrect size");
+ static_assert(sizeof(IocCtrlEventClearParams) == 4,
+ "IocCtrlEventClearParams is incorrect size");
struct IocCtrlEventWaitParams {
- u32_le syncpt_id{};
- u32_le threshold{};
- s32_le timeout{};
- u32_le value{};
- };
- static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size");
-
- struct IocCtrlEventWaitAsyncParams {
- u32_le syncpt_id{};
- u32_le threshold{};
+ NvFence fence{};
u32_le timeout{};
- u32_le value{};
+ SyncpointEventValue value{};
};
- static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16,
+ static_assert(sizeof(IocCtrlEventWaitParams) == 16,
"IocCtrlEventWaitAsyncParams is incorrect size");
struct IocCtrlEventRegisterParams {
@@ -118,19 +180,25 @@ private:
static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
"IocCtrlEventUnregisterParams is incorrect size");
- struct IocCtrlEventKill {
+ struct IocCtrlEventUnregisterBatchParams {
u64_le user_events{};
};
- static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size");
+ static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
+ "IocCtrlEventKill is incorrect size");
NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
- NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async);
+ NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
+ bool is_allocation);
NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
+ NvResult FreeEvent(u32 slot);
+
EventInterface& events_interface;
- SyncpointManager& syncpoint_manager;
+ NvCore::Container& core;
+ NvCore::SyncpointManager& syncpoint_manager;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 2b3b7efea..ced57dfe6 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -7,11 +7,19 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
+#include "core/hle/service/nvdrv/nvdrv.h"
namespace Service::Nvidia::Devices {
-nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {}
-nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default;
+nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_)
+ : nvdevice{system_}, events_interface{events_interface_} {
+ error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier");
+ unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent");
+}
+nvhost_ctrl_gpu::~nvhost_ctrl_gpu() {
+ events_interface.FreeEvent(error_notifier_event);
+ events_interface.FreeEvent(unknown_event);
+}
NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
std::vector<u8>& output) {
@@ -286,4 +294,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u
return NvResult::Success;
}
+Kernel::KEvent* nvhost_ctrl_gpu::QueryEvent(u32 event_id) {
+ switch (event_id) {
+ case 1:
+ return error_notifier_event;
+ case 2:
+ return unknown_event;
+ default: {
+ LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
+ }
+ }
+ return nullptr;
+}
+
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 97e9a90cb..1e8f254e2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -10,11 +10,15 @@
#include "common/swap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
+namespace Service::Nvidia {
+class EventInterface;
+}
+
namespace Service::Nvidia::Devices {
class nvhost_ctrl_gpu final : public nvdevice {
public:
- explicit nvhost_ctrl_gpu(Core::System& system_);
+ explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_);
~nvhost_ctrl_gpu() override;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -27,6 +31,8 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
+ Kernel::KEvent* QueryEvent(u32 event_id) override;
+
private:
struct IoctlGpuCharacteristics {
u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
@@ -160,6 +166,12 @@ private:
NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
+
+ EventInterface& events_interface;
+
+ // Events
+ Kernel::KEvent* error_notifier_event;
+ Kernel::KEvent* unknown_event;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index b98e63011..45a759fa8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -5,29 +5,46 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
-#include "core/hle/service/nvdrv/syncpoint_manager.h"
+#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/memory.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/engines/puller.h"
#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
namespace Service::Nvidia::Devices {
namespace {
-Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) {
- Tegra::GPU::FenceAction result{};
+Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) {
+ Tegra::Engines::Puller::FenceAction result{};
result.op.Assign(op);
result.syncpoint_id.Assign(syncpoint_id);
return {result.raw};
}
} // namespace
-nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_)
- : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {
- channel_fence.id = syncpoint_manager_.AllocateSyncpoint();
- channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
+nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
+ NvCore::Container& core_)
+ : nvdevice{system_}, events_interface{events_interface_}, core{core_},
+ syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
+ channel_state{system.GPU().AllocateChannel()} {
+ channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
+ sm_exception_breakpoint_int_report_event =
+ events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
+ sm_exception_breakpoint_pause_report_event =
+ events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause");
+ error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier");
}
-nvhost_gpu::~nvhost_gpu() = default;
+nvhost_gpu::~nvhost_gpu() {
+ events_interface.FreeEvent(sm_exception_breakpoint_int_report_event);
+ events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event);
+ events_interface.FreeEvent(error_notifier_event);
+ syncpoint_manager.FreeSyncpoint(channel_syncpoint);
+}
NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
std::vector<u8>& output) {
@@ -167,9 +184,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
params.unk3);
- channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
+ if (channel_state->initialized) {
+ LOG_CRITICAL(Service_NVDRV, "Already allocated!");
+ return NvResult::AlreadyAllocated;
+ }
+
+ system.GPU().InitChannel(*channel_state);
- params.fence_out = channel_fence;
+ params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
@@ -188,39 +210,37 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
return {
- Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
+ Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
Tegra::SubmissionMode::Increasing),
{fence.value},
- Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
+ Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
Tegra::SubmissionMode::Increasing),
- BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id),
+ BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id),
};
}
-static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence,
- u32 add_increment) {
+static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence) {
std::vector<Tegra::CommandHeader> result{
- Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
+ Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
Tegra::SubmissionMode::Increasing),
{}};
- for (u32 count = 0; count < add_increment; ++count) {
- result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
+ for (u32 count = 0; count < 2; ++count) {
+ result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
Tegra::SubmissionMode::Increasing));
- result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id));
+ result.emplace_back(
+ BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id));
}
return result;
}
-static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence,
- u32 add_increment) {
+static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence) {
std::vector<Tegra::CommandHeader> result{
- Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
+ Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1,
Tegra::SubmissionMode::Increasing),
{}};
- const std::vector<Tegra::CommandHeader> increment{
- BuildIncrementCommandList(fence, add_increment)};
+ const std::vector<Tegra::CommandHeader> increment{BuildIncrementCommandList(fence)};
result.insert(result.end(), increment.begin(), increment.end());
@@ -234,33 +254,41 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
auto& gpu = system.GPU();
- params.fence_out.id = channel_fence.id;
+ std::scoped_lock lock(channel_mutex);
- if (params.flags.add_wait.Value() &&
- !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
- gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
- }
+ const auto bind_id = channel_state->bind_id;
- if (params.flags.add_increment.Value() || params.flags.increment.Value()) {
- const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0;
- params.fence_out.value = syncpoint_manager.IncreaseSyncpoint(
- params.fence_out.id, params.AddIncrementValue() + increment_value);
- } else {
- params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id);
+ auto& flags = params.flags;
+
+ if (flags.fence_wait.Value()) {
+ if (flags.increment_value.Value()) {
+ return NvResult::BadParameter;
+ }
+
+ if (!syncpoint_manager.IsFenceSignalled(params.fence)) {
+ gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)});
+ }
}
- gpu.PushGPUEntries(std::move(entries));
+ params.fence.id = channel_syncpoint;
+
+ u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) +
+ (flags.increment_value.Value() != 0 ? params.fence.value : 0)};
+ params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment);
+ gpu.PushGPUEntries(bind_id, std::move(entries));
- if (params.flags.add_increment.Value()) {
- if (params.flags.suppress_wfi) {
- gpu.PushGPUEntries(Tegra::CommandList{
- BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())});
+ if (flags.fence_increment.Value()) {
+ if (flags.suppress_wfi.Value()) {
+ gpu.PushGPUEntries(bind_id,
+ Tegra::CommandList{BuildIncrementCommandList(params.fence)});
} else {
- gpu.PushGPUEntries(Tegra::CommandList{
- BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())});
+ gpu.PushGPUEntries(bind_id,
+ Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)});
}
}
+ flags.raw = 0;
+
std::memcpy(output.data(), &params, sizeof(IoctlSubmitGpfifo));
return NvResult::Success;
}
@@ -328,4 +356,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vect
return NvResult::Success;
}
+Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) {
+ switch (event_id) {
+ case 1:
+ return sm_exception_breakpoint_int_report_event;
+ case 2:
+ return sm_exception_breakpoint_pause_report_event;
+ case 3:
+ return error_notifier_event;
+ default: {
+ LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
+ }
+ }
+ return nullptr;
+}
+
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 8a9f7775a..1e4ecd55b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -13,17 +13,31 @@
#include "core/hle/service/nvdrv/nvdata.h"
#include "video_core/dma_pusher.h"
+namespace Tegra {
+namespace Control {
+struct ChannelState;
+}
+} // namespace Tegra
+
namespace Service::Nvidia {
+
+namespace NvCore {
+class Container;
+class NvMap;
class SyncpointManager;
-}
+} // namespace NvCore
+
+class EventInterface;
+} // namespace Service::Nvidia
namespace Service::Nvidia::Devices {
+class nvhost_as_gpu;
class nvmap;
class nvhost_gpu final : public nvdevice {
public:
- explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_);
+ explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
+ NvCore::Container& core);
~nvhost_gpu() override;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -36,7 +50,10 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
+ Kernel::KEvent* QueryEvent(u32 event_id) override;
+
private:
+ friend class nvhost_as_gpu;
enum class CtxObjects : u32_le {
Ctx2D = 0x902D,
Ctx3D = 0xB197,
@@ -146,17 +163,13 @@ private:
u32_le num_entries{}; // number of fence objects being submitted
union {
u32_le raw;
- BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list
- BitField<1, 1, u32_le> add_increment; // append an increment to the list
- BitField<2, 1, u32_le> new_hw_format; // mostly ignored
- BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
- BitField<8, 1, u32_le> increment; // increment the returned fence
+ BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list
+ BitField<1, 1, u32_le> fence_increment; // append an increment to the list
+ BitField<2, 1, u32_le> new_hw_format; // mostly ignored
+ BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
+ BitField<8, 1, u32_le> increment_value; // increment the returned fence
} flags;
- NvFence fence_out{}; // returned new fence object for others to wait on
-
- u32 AddIncrementValue() const {
- return flags.add_increment.Value() << 1;
- }
+ NvFence fence{}; // returned new fence object for others to wait on
};
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence),
"IoctlSubmitGpfifo is incorrect size");
@@ -191,9 +204,18 @@ private:
NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
- std::shared_ptr<nvmap> nvmap_dev;
- SyncpointManager& syncpoint_manager;
- NvFence channel_fence;
+ EventInterface& events_interface;
+ NvCore::Container& core;
+ NvCore::SyncpointManager& syncpoint_manager;
+ NvCore::NvMap& nvmap;
+ std::shared_ptr<Tegra::Control::ChannelState> channel_state;
+ u32 channel_syncpoint;
+ std::mutex channel_mutex;
+
+ // Events
+ Kernel::KEvent* sm_exception_breakpoint_int_report_event;
+ Kernel::KEvent* sm_exception_breakpoint_pause_report_event;
+ Kernel::KEvent* error_notifier_event;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index a7385fce8..1703f9cc3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -5,14 +5,14 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
#include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices {
-nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_)
- : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
+nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
+ : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
nvhost_nvdec::~nvhost_nvdec() = default;
NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -21,8 +21,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
case 0x0:
switch (command.cmd) {
case 0x1: {
- if (!fd_to_id.contains(fd)) {
- fd_to_id[fd] = next_id++;
+ auto& host1x_file = core.Host1xDeviceFile();
+ if (!host1x_file.fd_to_id.contains(fd)) {
+ host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
}
return Submit(fd, input, output);
}
@@ -73,8 +74,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) {
void nvhost_nvdec::OnClose(DeviceFD fd) {
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
- const auto iter = fd_to_id.find(fd);
- if (iter != fd_to_id.end()) {
+ auto& host1x_file = core.Host1xDeviceFile();
+ const auto iter = host1x_file.fd_to_id.find(fd);
+ if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second);
}
system.AudioCore().SetNVDECActive(false);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
index 29b3e6a36..c1b4e53e8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
@@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices {
class nvhost_nvdec final : public nvhost_nvdec_common {
public:
- explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_);
+ explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core);
~nvhost_nvdec() override;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -23,9 +22,6 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
-
-private:
- u32 next_id{};
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 8b2cd9bf1..99eede702 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -8,10 +8,12 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
-#include "core/hle/service/nvdrv/devices/nvmap.h"
-#include "core/hle/service/nvdrv/syncpoint_manager.h"
#include "core/memory.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_base.h"
@@ -44,10 +46,22 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
}
} // Anonymous namespace
-nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_)
- : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {}
-nvhost_nvdec_common::~nvhost_nvdec_common() = default;
+nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
+ NvCore::ChannelType channel_type_)
+ : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
+ nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
+ auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated;
+ if (syncpts_accumulated.empty()) {
+ channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
+ } else {
+ channel_syncpoint = syncpts_accumulated.front();
+ syncpts_accumulated.pop_front();
+ }
+}
+
+nvhost_nvdec_common::~nvhost_nvdec_common() {
+ core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
+}
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
IoctlSetNvmapFD params{};
@@ -84,16 +98,16 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
const SyncptIncr& syncpt_incr = syncpt_increments[i];
fence_thresholds[i] =
- syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments);
+ syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments);
}
}
for (const auto& cmd_buffer : command_buffers) {
- const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id);
+ const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
- system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(),
+ system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
cmdlist.size() * sizeof(u32));
- gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
+ gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
// Some games expect command_buffers to be written back
@@ -112,10 +126,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
std::memcpy(&params, input.data(), sizeof(IoctlGetSyncpoint));
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
- if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) {
- device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint();
- }
- params.value = device_syncpoints[params.param];
+ // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
+ params.value = channel_syncpoint;
std::memcpy(output.data(), &params, sizeof(IoctlGetSyncpoint));
return NvResult::Success;
@@ -123,6 +135,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
IoctlGetWaitbase params{};
+ LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
params.value = 0; // Seems to be hard coded at 0
std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase));
@@ -136,28 +149,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
- auto& gpu = system.GPU();
-
for (auto& cmd_buffer : cmd_buffer_handles) {
- auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)};
- if (!object) {
- LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle);
- std::memcpy(output.data(), &params, output.size());
- return NvResult::InvalidState;
- }
- if (object->dma_map_addr == 0) {
- // NVDEC and VIC memory is in the 32-bit address space
- // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
- const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size);
- object->dma_map_addr = static_cast<u32>(low_addr);
- // Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
- ASSERT(object->dma_map_addr == low_addr);
- }
- if (!object->dma_map_addr) {
- LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
- } else {
- cmd_buffer.map_address = object->dma_map_addr;
- }
+ cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
}
std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer));
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
@@ -167,11 +160,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
}
NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
- // This is intntionally stubbed.
- // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame
- // addresses, and risk invalidating data before the async GPU thread is done with it
+ IoctlMapBuffer params{};
+ std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
+ std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
+
+ SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
+ for (auto& cmd_buffer : cmd_buffer_handles) {
+ nvmap.UnpinHandle(cmd_buffer.map_handle);
+ }
+
std::memset(output.data(), 0, output.size());
- LOG_DEBUG(Service_NVDRV, "(STUBBED) called");
return NvResult::Success;
}
@@ -182,4 +180,9 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
return NvResult::Success;
}
+Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
+ LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id);
+ return nullptr;
+}
+
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index 12d39946d..fe76100c8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -3,21 +3,26 @@
#pragma once
+#include <deque>
#include <vector>
#include "common/common_types.h"
#include "common/swap.h"
+#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
namespace Service::Nvidia {
-class SyncpointManager;
+
+namespace NvCore {
+class Container;
+class NvMap;
+} // namespace NvCore
namespace Devices {
-class nvmap;
class nvhost_nvdec_common : public nvdevice {
public:
- explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_);
+ explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core,
+ NvCore::ChannelType channel_type);
~nvhost_nvdec_common() override;
protected:
@@ -110,11 +115,15 @@ protected:
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
- std::unordered_map<DeviceFD, u32> fd_to_id{};
+ Kernel::KEvent* QueryEvent(u32 event_id) override;
+
+ u32 channel_syncpoint;
s32_le nvmap_fd{};
u32_le submit_timeout{};
- std::shared_ptr<nvmap> nvmap_dev;
- SyncpointManager& syncpoint_manager;
+ NvCore::Container& core;
+ NvCore::SyncpointManager& syncpoint_manager;
+ NvCore::NvMap& nvmap;
+ NvCore::ChannelType channel_type;
std::array<u32, MaxSyncPoints> device_syncpoints{};
};
}; // namespace Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index f58e8bada..73f97136e 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -4,13 +4,14 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
#include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices {
-nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_)
- : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
+
+nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
+ : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
nvhost_vic::~nvhost_vic() = default;
@@ -19,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
switch (command.group) {
case 0x0:
switch (command.cmd) {
- case 0x1:
- if (!fd_to_id.contains(fd)) {
- fd_to_id[fd] = next_id++;
+ case 0x1: {
+ auto& host1x_file = core.Host1xDeviceFile();
+ if (!host1x_file.fd_to_id.contains(fd)) {
+ host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
}
return Submit(fd, input, output);
+ }
case 0x2:
return GetSyncpoint(input, output);
case 0x3:
@@ -67,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
void nvhost_vic::OnOpen(DeviceFD fd) {}
void nvhost_vic::OnClose(DeviceFD fd) {
- const auto iter = fd_to_id.find(fd);
- if (iter != fd_to_id.end()) {
+ auto& host1x_file = core.Host1xDeviceFile();
+ const auto iter = host1x_file.fd_to_id.find(fd);
+ if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second);
}
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
index b41b195ae..f164caafb 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
@@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices {
class nvhost_vic final : public nvhost_nvdec_common {
public:
- explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
- SyncpointManager& syncpoint_manager_);
+ explicit nvhost_vic(Core::System& system_, NvCore::Container& core);
~nvhost_vic();
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -22,8 +21,5 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
-
-private:
- u32 next_id{};
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index d8518149d..ddf273b5e 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -2,19 +2,26 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
+#include <bit>
#include <cstring>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/devices/nvmap.h"
+#include "core/memory.h"
+
+using Core::Memory::YUZU_PAGESIZE;
namespace Service::Nvidia::Devices {
-nvmap::nvmap(Core::System& system_) : nvdevice{system_} {
- // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to
- // represent this.
- CreateObject(0);
-}
+nvmap::nvmap(Core::System& system_, NvCore::Container& container_)
+ : nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {}
nvmap::~nvmap() = default;
@@ -62,39 +69,21 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
void nvmap::OnOpen(DeviceFD fd) {}
void nvmap::OnClose(DeviceFD fd) {}
-VAddr nvmap::GetObjectAddress(u32 handle) const {
- auto object = GetObject(handle);
- ASSERT(object);
- ASSERT(object->status == Object::Status::Allocated);
- return object->addr;
-}
-
-u32 nvmap::CreateObject(u32 size) {
- // Create a new nvmap object and obtain a handle to it.
- auto object = std::make_shared<Object>();
- object->id = next_id++;
- object->size = size;
- object->status = Object::Status::Created;
- object->refcount = 1;
-
- const u32 handle = next_handle++;
-
- handles.insert_or_assign(handle, std::move(object));
-
- return handle;
-}
-
NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
IocCreateParams params;
std::memcpy(&params, input.data(), sizeof(params));
- LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size);
-
- if (!params.size) {
- LOG_ERROR(Service_NVDRV, "Size is 0");
- return NvResult::BadValue;
+ LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
+
+ std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
+ auto result =
+ file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
+ if (result != NvResult::Success) {
+ LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
+ return result;
}
-
- params.handle = CreateObject(params.size);
+ handle_description->orig_size = params.size; // Orig size is the unaligned size
+ params.handle = handle_description->id;
+ LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
@@ -103,63 +92,68 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
IocAllocParams params;
std::memcpy(&params, input.data(), sizeof(params));
- LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr);
+ LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
if (!params.handle) {
- LOG_ERROR(Service_NVDRV, "Handle is 0");
+ LOG_CRITICAL(Service_NVDRV, "Handle is 0");
return NvResult::BadValue;
}
if ((params.align - 1) & params.align) {
- LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
+ LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
return NvResult::BadValue;
}
- const u32 min_alignment = 0x1000;
- if (params.align < min_alignment) {
- params.align = min_alignment;
+ // Force page size alignment at a minimum
+ if (params.align < YUZU_PAGESIZE) {
+ params.align = YUZU_PAGESIZE;
}
- auto object = GetObject(params.handle);
- if (!object) {
- LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
+ auto handle_description{file.GetHandle(params.handle)};
+ if (!handle_description) {
+ LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
return NvResult::BadValue;
}
- if (object->status == Object::Status::Allocated) {
- LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
+ if (handle_description->allocated) {
+ LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
return NvResult::InsufficientMemory;
}
- object->flags = params.flags;
- object->align = params.align;
- object->kind = params.kind;
- object->addr = params.addr;
- object->status = Object::Status::Allocated;
-
+ const auto result =
+ handle_description->Alloc(params.flags, params.align, params.kind, params.address);
+ if (result != NvResult::Success) {
+ LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
+ return result;
+ }
+ ASSERT(system.CurrentProcess()
+ ->PageTable()
+ .LockForDeviceAddressSpace(handle_description->address, handle_description->size)
+ .IsSuccess());
std::memcpy(output.data(), &params, sizeof(params));
- return NvResult::Success;
+ return result;
}
NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
IocGetIdParams params;
std::memcpy(&params, input.data(), sizeof(params));
- LOG_WARNING(Service_NVDRV, "called");
+ LOG_DEBUG(Service_NVDRV, "called");
+ // See the comment in FromId for extra info on this function
if (!params.handle) {
- LOG_ERROR(Service_NVDRV, "Handle is zero");
+ LOG_CRITICAL(Service_NVDRV, "Error!");
return NvResult::BadValue;
}
- auto object = GetObject(params.handle);
- if (!object) {
- LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
- return NvResult::BadValue;
+ auto handle_description{file.GetHandle(params.handle)};
+ if (!handle_description) {
+ LOG_CRITICAL(Service_NVDRV, "Error!");
+ return NvResult::AccessDenied; // This will always return EPERM irrespective of if the
+ // handle exists or not
}
- params.id = object->id;
-
+ params.id = handle_description->id;
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
@@ -168,26 +162,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
IocFromIdParams params;
std::memcpy(&params, input.data(), sizeof(params));
- LOG_WARNING(Service_NVDRV, "(STUBBED) called");
+ LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
- auto itr = std::find_if(handles.begin(), handles.end(),
- [&](const auto& entry) { return entry.second->id == params.id; });
- if (itr == handles.end()) {
- LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
+ // Handles and IDs are always the same value in nvmap however IDs can be used globally given the
+ // right permissions.
+ // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and
+ // so this function just does simple validation and passes through the handle id.
+ if (!params.id) {
+ LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!");
return NvResult::BadValue;
}
- auto& object = itr->second;
- if (object->status != Object::Status::Allocated) {
- LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
+ auto handle_description{file.GetHandle(params.id)};
+ if (!handle_description) {
+ LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
return NvResult::BadValue;
}
- itr->second->refcount++;
-
- // Return the existing handle instead of creating a new one.
- params.handle = itr->first;
-
+ auto result = handle_description->Duplicate(false);
+ if (result != NvResult::Success) {
+ LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
+ return result;
+ }
+ params.handle = handle_description->id;
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
@@ -198,35 +195,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
IocParamParams params;
std::memcpy(&params, input.data(), sizeof(params));
- LOG_DEBUG(Service_NVDRV, "(STUBBED) called type={}", params.param);
+ LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
- auto object = GetObject(params.handle);
- if (!object) {
- LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
+ if (!params.handle) {
+ LOG_CRITICAL(Service_NVDRV, "Invalid handle!");
return NvResult::BadValue;
}
- if (object->status != Object::Status::Allocated) {
- LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
+ auto handle_description{file.GetHandle(params.handle)};
+ if (!handle_description) {
+ LOG_CRITICAL(Service_NVDRV, "Not registered handle!");
return NvResult::BadValue;
}
- switch (static_cast<ParamTypes>(params.param)) {
- case ParamTypes::Size:
- params.result = object->size;
+ switch (params.param) {
+ case HandleParameterType::Size:
+ params.result = static_cast<u32_le>(handle_description->orig_size);
+ break;
+ case HandleParameterType::Alignment:
+ params.result = static_cast<u32_le>(handle_description->align);
break;
- case ParamTypes::Alignment:
- params.result = object->align;
+ case HandleParameterType::Base:
+ params.result = static_cast<u32_le>(-22); // posix EINVAL
break;
- case ParamTypes::Heap:
- // TODO(Subv): Seems to be a hardcoded value?
- params.result = 0x40000000;
+ case HandleParameterType::Heap:
+ if (handle_description->allocated)
+ params.result = 0x40000000;
+ else
+ params.result = 0;
break;
- case ParamTypes::Kind:
- params.result = object->kind;
+ case HandleParameterType::Kind:
+ params.result = handle_description->kind;
+ break;
+ case HandleParameterType::IsSharedMemMapped:
+ params.result = handle_description->is_shared_mem_mapped;
break;
default:
- UNIMPLEMENTED();
+ return NvResult::BadValue;
}
std::memcpy(output.data(), &params, sizeof(params));
@@ -234,46 +239,29 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
}
NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
- // TODO(Subv): These flags are unconfirmed.
- enum FreeFlags {
- Freed = 0,
- NotFreedYet = 1,
- };
-
IocFreeParams params;
std::memcpy(&params, input.data(), sizeof(params));
- LOG_DEBUG(Service_NVDRV, "(STUBBED) called");
+ LOG_DEBUG(Service_NVDRV, "called");
- auto itr = handles.find(params.handle);
- if (itr == handles.end()) {
- LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
- return NvResult::BadValue;
- }
- if (!itr->second->refcount) {
- LOG_ERROR(
- Service_NVDRV,
- "There is no references to this object. The object is already freed. handle={:08X}",
- params.handle);
- return NvResult::BadValue;
+ if (!params.handle) {
+ LOG_CRITICAL(Service_NVDRV, "Handle null freed?");
+ return NvResult::Success;
}
- itr->second->refcount--;
-
- params.size = itr->second->size;
-
- if (itr->second->refcount == 0) {
- params.flags = Freed;
- // The address of the nvmap is written to the output if we're finally freeing it, otherwise
- // 0 is written.
- params.address = itr->second->addr;
+ if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
+ ASSERT(system.CurrentProcess()
+ ->PageTable()
+ .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
+ .IsSuccess());
+ params.address = freeInfo->address;
+ params.size = static_cast<u32>(freeInfo->size);
+ params.flags.raw = 0;
+ params.flags.map_uncached.Assign(freeInfo->was_uncached);
} else {
- params.flags = NotFreedYet;
- params.address = 0;
+ // This is possible when there's internel dups or other duplicates.
}
- handles.erase(params.handle);
-
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index d5360d6e5..e9bfd0358 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -9,15 +9,23 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
+namespace Service::Nvidia::NvCore {
+class Container;
+} // namespace Service::Nvidia::NvCore
+
namespace Service::Nvidia::Devices {
class nvmap final : public nvdevice {
public:
- explicit nvmap(Core::System& system_);
+ explicit nvmap(Core::System& system_, NvCore::Container& container);
~nvmap() override;
+ nvmap(const nvmap&) = delete;
+ nvmap& operator=(const nvmap&) = delete;
+
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
std::vector<u8>& output) override;
NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -28,31 +36,15 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
- /// Returns the allocated address of an nvmap object given its handle.
- VAddr GetObjectAddress(u32 handle) const;
-
- /// Represents an nvmap object.
- struct Object {
- enum class Status { Created, Allocated };
- u32 id;
- u32 size;
- u32 flags;
- u32 align;
- u8 kind;
- VAddr addr;
- Status status;
- u32 refcount;
- u32 dma_map_addr;
+ enum class HandleParameterType : u32_le {
+ Size = 1,
+ Alignment = 2,
+ Base = 3,
+ Heap = 4,
+ Kind = 5,
+ IsSharedMemMapped = 6
};
- std::shared_ptr<Object> GetObject(u32 handle) const {
- auto itr = handles.find(handle);
- if (itr != handles.end()) {
- return itr->second;
- }
- return {};
- }
-
private:
/// Id to use for the next handle that is created.
u32 next_handle = 0;
@@ -60,9 +52,6 @@ private:
/// Id to use for the next object that is created.
u32 next_id = 0;
- /// Mapping of currently allocated handles to the objects they represent.
- std::unordered_map<u32, std::shared_ptr<Object>> handles;
-
struct IocCreateParams {
// Input
u32_le size{};
@@ -83,11 +72,11 @@ private:
// Input
u32_le handle{};
u32_le heap_mask{};
- u32_le flags{};
+ NvCore::NvMap::Handle::Flags flags{};
u32_le align{};
u8 kind{};
INSERT_PADDING_BYTES(7);
- u64_le addr{};
+ u64_le address{};
};
static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
@@ -96,14 +85,14 @@ private:
INSERT_PADDING_BYTES(4);
u64_le address{};
u32_le size{};
- u32_le flags{};
+ NvCore::NvMap::Handle::Flags flags{};
};
static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
struct IocParamParams {
// Input
u32_le handle{};
- u32_le param{};
+ HandleParameterType param{};
// Output
u32_le result{};
};
@@ -117,14 +106,15 @@ private:
};
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
- u32 CreateObject(u32 size);
-
NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
+
+ NvCore::Container& container;
+ NvCore::NvMap& file;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h
index 1d00394c8..0e2f47075 100644
--- a/src/core/hle/service/nvdrv/nvdata.h
+++ b/src/core/hle/service/nvdrv/nvdata.h
@@ -1,5 +1,6 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
@@ -78,11 +79,15 @@ enum class NvResult : u32 {
ModuleNotPresent = 0xA000E,
};
+// obtained from
+// https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47
enum class EventState {
- Free = 0,
- Registered = 1,
- Waiting = 2,
- Busy = 3,
+ Available = 0,
+ Waiting = 1,
+ Cancelling = 2,
+ Signalling = 3,
+ Signalled = 4,
+ Cancelled = 5,
};
union Ioctl {
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 756eb7453..5e7b7468f 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -1,5 +1,6 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#include <utility>
@@ -8,6 +9,7 @@
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_writable_event.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
@@ -15,17 +17,31 @@
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
+#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
#include "core/hle/service/nvdrv/devices/nvmap.h"
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/hle/service/nvdrv/nvdrv_interface.h"
#include "core/hle/service/nvdrv/nvmemp.h"
-#include "core/hle/service/nvdrv/syncpoint_manager.h"
#include "core/hle/service/nvflinger/nvflinger.h"
+#include "video_core/gpu.h"
namespace Service::Nvidia {
+EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {}
+
+EventInterface::~EventInterface() = default;
+
+Kernel::KEvent* EventInterface::CreateEvent(std::string name) {
+ Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
+ return new_event;
+}
+
+void EventInterface::FreeEvent(Kernel::KEvent* event) {
+ module.service_context.CloseEvent(event);
+}
+
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
Core::System& system) {
auto module_ = std::make_shared<Module>(system);
@@ -38,34 +54,54 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
}
Module::Module(Core::System& system)
- : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} {
- for (u32 i = 0; i < MaxNvEvents; i++) {
- events_interface.events[i].event =
- service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i));
- events_interface.status[i] = EventState::Free;
- events_interface.registered[i] = false;
- }
- auto nvmap_dev = std::make_shared<Devices::nvmap>(system);
- devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev);
- devices["/dev/nvhost-gpu"] =
- std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager);
- devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system);
- devices["/dev/nvmap"] = nvmap_dev;
- devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev);
- devices["/dev/nvhost-ctrl"] =
- std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager);
- devices["/dev/nvhost-nvdec"] =
- std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager);
- devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system);
- devices["/dev/nvhost-vic"] =
- std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager);
+ : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} {
+ builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvmap"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvmap>(system, container);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvdisp_disp0>(system, container);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvhost_nvdec>(system, container);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system);
+ return open_files.emplace(fd, device).first;
+ };
+ builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) {
+ std::shared_ptr<Devices::nvdevice> device =
+ std::make_shared<Devices::nvhost_vic>(system, container);
+ return open_files.emplace(fd, device).first;
+ };
}
-Module::~Module() {
- for (u32 i = 0; i < MaxNvEvents; i++) {
- service_context.CloseEvent(events_interface.events[i].event);
- }
-}
+Module::~Module() {}
NvResult Module::VerifyFD(DeviceFD fd) const {
if (fd < 0) {
@@ -82,18 +118,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
}
DeviceFD Module::Open(const std::string& device_name) {
- if (devices.find(device_name) == devices.end()) {
+ auto it = builders.find(device_name);
+ if (it == builders.end()) {
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
return INVALID_NVDRV_FD;
}
- auto device = devices[device_name];
const DeviceFD fd = next_fd++;
+ auto& builder = it->second;
+ auto device = builder(fd)->second;
device->OnOpen(fd);
- open_files[fd] = std::move(device);
-
return fd;
}
@@ -168,22 +204,24 @@ NvResult Module::Close(DeviceFD fd) {
return NvResult::Success;
}
-void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
- for (u32 i = 0; i < MaxNvEvents; i++) {
- if (events_interface.assigned_syncpt[i] == syncpoint_id &&
- events_interface.assigned_value[i] == value) {
- events_interface.LiberateEvent(i);
- events_interface.events[i].event->GetWritableEvent().Signal();
- }
+NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
+ if (fd < 0) {
+ LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
+ return NvResult::InvalidState;
}
-}
-Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) {
- return events_interface.events[event_id].event->GetReadableEvent();
-}
+ const auto itr = open_files.find(fd);
-Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) {
- return events_interface.events[event_id].event->GetWritableEvent();
+ if (itr == open_files.end()) {
+ LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
+ return NvResult::NotImplemented;
+ }
+
+ event = itr->second->QueryEvent(event_id);
+ if (!event) {
+ return NvResult::BadParameter;
+ }
+ return NvResult::Success;
}
} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index c929e5106..146d046a9 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -1,16 +1,20 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
+#include <functional>
+#include <list>
#include <memory>
+#include <string>
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "core/hle/service/kernel_helpers.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/nvdata.h"
-#include "core/hle/service/nvdrv/syncpoint_manager.h"
#include "core/hle/service/nvflinger/ui/fence.h"
#include "core/hle/service/service.h"
@@ -28,81 +32,31 @@ class NVFlinger;
namespace Service::Nvidia {
+namespace NvCore {
+class Container;
class SyncpointManager;
+} // namespace NvCore
namespace Devices {
class nvdevice;
-}
+class nvhost_ctrl;
+} // namespace Devices
-/// Represents an Nvidia event
-struct NvEvent {
- Kernel::KEvent* event{};
- NvFence fence{};
-};
+class Module;
-struct EventInterface {
- // Mask representing currently busy events
- u64 events_mask{};
- // Each kernel event associated to an NV event
- std::array<NvEvent, MaxNvEvents> events;
- // The status of the current NVEvent
- std::array<EventState, MaxNvEvents> status{};
- // Tells if an NVEvent is registered or not
- std::array<bool, MaxNvEvents> registered{};
- // Tells the NVEvent that it has failed.
- std::array<bool, MaxNvEvents> failed{};
- // When an NVEvent is waiting on GPU interrupt, this is the sync_point
- // associated with it.
- std::array<u32, MaxNvEvents> assigned_syncpt{};
- // This is the value of the GPU interrupt for which the NVEvent is waiting
- // for.
- std::array<u32, MaxNvEvents> assigned_value{};
- // Constant to denote an unasigned syncpoint.
- static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
- std::optional<u32> GetFreeEvent() const {
- u64 mask = events_mask;
- for (u32 i = 0; i < MaxNvEvents; i++) {
- const bool is_free = (mask & 0x1) == 0;
- if (is_free) {
- if (status[i] == EventState::Registered || status[i] == EventState::Free) {
- return {i};
- }
- }
- mask = mask >> 1;
- }
- return std::nullopt;
- }
- void SetEventStatus(const u32 event_id, EventState new_status) {
- EventState old_status = status[event_id];
- if (old_status == new_status) {
- return;
- }
- status[event_id] = new_status;
- if (new_status == EventState::Registered) {
- registered[event_id] = true;
- }
- if (new_status == EventState::Waiting || new_status == EventState::Busy) {
- events_mask |= (1ULL << event_id);
- }
- }
- void RegisterEvent(const u32 event_id) {
- registered[event_id] = true;
- if (status[event_id] == EventState::Free) {
- status[event_id] = EventState::Registered;
- }
- }
- void UnregisterEvent(const u32 event_id) {
- registered[event_id] = false;
- if (status[event_id] == EventState::Registered) {
- status[event_id] = EventState::Free;
- }
- }
- void LiberateEvent(const u32 event_id) {
- status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free;
- events_mask &= ~(1ULL << event_id);
- assigned_syncpt[event_id] = unassigned_syncpt;
- assigned_value[event_id] = 0;
- }
+class EventInterface {
+public:
+ explicit EventInterface(Module& module_);
+ ~EventInterface();
+
+ Kernel::KEvent* CreateEvent(std::string name);
+
+ void FreeEvent(Kernel::KEvent* event);
+
+private:
+ Module& module;
+ std::mutex guard;
+ std::list<Devices::nvhost_ctrl*> on_signal;
};
class Module final {
@@ -112,9 +66,9 @@ public:
/// Returns a pointer to one of the available devices, identified by its name.
template <typename T>
- std::shared_ptr<T> GetDevice(const std::string& name) {
- auto itr = devices.find(name);
- if (itr == devices.end())
+ std::shared_ptr<T> GetDevice(DeviceFD fd) {
+ auto itr = open_files.find(fd);
+ if (itr == open_files.end())
return nullptr;
return std::static_pointer_cast<T>(itr->second);
}
@@ -137,28 +91,27 @@ public:
/// Closes a device file descriptor and returns operation success.
NvResult Close(DeviceFD fd);
- void SignalSyncpt(const u32 syncpoint_id, const u32 value);
-
- Kernel::KReadableEvent& GetEvent(u32 event_id);
-
- Kernel::KWritableEvent& GetEventWriteable(u32 event_id);
+ NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
private:
- /// Manages syncpoints on the host
- SyncpointManager syncpoint_manager;
+ friend class EventInterface;
+ friend class Service::NVFlinger::NVFlinger;
/// Id to use for the next open file descriptor.
DeviceFD next_fd = 1;
+ using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>;
/// Mapping of file descriptors to the devices they reference.
- std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files;
+ FilesContainerType open_files;
- /// Mapping of device node names to their implementation.
- std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
+ KernelHelpers::ServiceContext service_context;
EventInterface events_interface;
- KernelHelpers::ServiceContext service_context;
+ /// Manages syncpoints on the host
+ NvCore::Container container;
+
+ std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
};
/// Registers all NVDRV services with the specified service manager.
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
index b5a980384..edbdfee43 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
@@ -1,10 +1,12 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#include <cinttypes>
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
+#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/service/nvdrv/nvdata.h"
#include "core/hle/service/nvdrv/nvdrv.h"
@@ -12,10 +14,6 @@
namespace Service::Nvidia {
-void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
- nvdrv->SignalSyncpt(syncpoint_id, value);
-}
-
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_NVDRV, "called");
IPC::ResponseBuilder rb{ctx, 4};
@@ -164,8 +162,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) {
void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto fd = rp.Pop<DeviceFD>();
- const auto event_id = rp.Pop<u32>() & 0x00FF;
- LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id);
+ const auto event_id = rp.Pop<u32>();
if (!is_initialized) {
ServiceError(ctx, NvResult::NotInitialized);
@@ -173,24 +170,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
return;
}
- const auto nv_result = nvdrv->VerifyFD(fd);
- if (nv_result != NvResult::Success) {
- LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd);
- ServiceError(ctx, nv_result);
- return;
- }
+ Kernel::KEvent* event = nullptr;
+ NvResult result = nvdrv->QueryEvent(fd, event_id, event);
- if (event_id < MaxNvEvents) {
+ if (result == NvResult::Success) {
IPC::ResponseBuilder rb{ctx, 3, 1};
rb.Push(ResultSuccess);
- auto& event = nvdrv->GetEvent(event_id);
- event.Clear();
- rb.PushCopyObjects(event);
+ auto& readable_event = event->GetReadableEvent();
+ rb.PushCopyObjects(readable_event);
rb.PushEnum(NvResult::Success);
} else {
+ LOG_ERROR(Service_NVDRV, "Invalid event request!");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
- rb.PushEnum(NvResult::BadParameter);
+ rb.PushEnum(result);
}
}
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h
index cbd37b52b..cd58a4f35 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.h
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.h
@@ -18,8 +18,6 @@ public:
explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
~NVDRV() override;
- void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value);
-
private:
void Open(Kernel::HLERequestContext& ctx);
void Ioctl1(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/syncpoint_manager.cpp
deleted file mode 100644
index a6fa943e8..000000000
--- a/src/core/hle/service/nvdrv/syncpoint_manager.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/assert.h"
-#include "core/hle/service/nvdrv/syncpoint_manager.h"
-#include "video_core/gpu.h"
-
-namespace Service::Nvidia {
-
-SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {}
-
-SyncpointManager::~SyncpointManager() = default;
-
-u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) {
- syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id);
- return GetSyncpointMin(syncpoint_id);
-}
-
-u32 SyncpointManager::AllocateSyncpoint() {
- for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) {
- if (!syncpoints[syncpoint_id].is_allocated) {
- syncpoints[syncpoint_id].is_allocated = true;
- return syncpoint_id;
- }
- }
- ASSERT_MSG(false, "No more available syncpoints!");
- return {};
-}
-
-u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) {
- for (u32 index = 0; index < value; ++index) {
- syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed);
- }
-
- return GetSyncpointMax(syncpoint_id);
-}
-
-} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.h b/src/core/hle/service/nvdrv/syncpoint_manager.h
deleted file mode 100644
index 7f080f76e..000000000
--- a/src/core/hle/service/nvdrv/syncpoint_manager.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <array>
-#include <atomic>
-
-#include "common/common_types.h"
-#include "core/hle/service/nvdrv/nvdata.h"
-
-namespace Tegra {
-class GPU;
-}
-
-namespace Service::Nvidia {
-
-class SyncpointManager final {
-public:
- explicit SyncpointManager(Tegra::GPU& gpu_);
- ~SyncpointManager();
-
- /**
- * Returns true if the specified syncpoint is expired for the given value.
- * @param syncpoint_id Syncpoint ID to check.
- * @param value Value to check against the specified syncpoint.
- * @returns True if the specified syncpoint is expired for the given value, otherwise False.
- */
- bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const {
- return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value);
- }
-
- /**
- * Gets the lower bound for the specified syncpoint.
- * @param syncpoint_id Syncpoint ID to get the lower bound for.
- * @returns The lower bound for the specified syncpoint.
- */
- u32 GetSyncpointMin(u32 syncpoint_id) const {
- return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed);
- }
-
- /**
- * Gets the uper bound for the specified syncpoint.
- * @param syncpoint_id Syncpoint ID to get the upper bound for.
- * @returns The upper bound for the specified syncpoint.
- */
- u32 GetSyncpointMax(u32 syncpoint_id) const {
- return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed);
- }
-
- /**
- * Refreshes the minimum value for the specified syncpoint.
- * @param syncpoint_id Syncpoint ID to be refreshed.
- * @returns The new syncpoint minimum value.
- */
- u32 RefreshSyncpoint(u32 syncpoint_id);
-
- /**
- * Allocates a new syncoint.
- * @returns The syncpoint ID for the newly allocated syncpoint.
- */
- u32 AllocateSyncpoint();
-
- /**
- * Increases the maximum value for the specified syncpoint.
- * @param syncpoint_id Syncpoint ID to be increased.
- * @param value Value to increase the specified syncpoint by.
- * @returns The new syncpoint maximum value.
- */
- u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value);
-
-private:
- struct Syncpoint {
- std::atomic<u32> min;
- std::atomic<u32> max;
- std::atomic<bool> is_allocated;
- };
-
- std::array<Syncpoint, MaxSyncPoints> syncpoints{};
-
- Tegra::GPU& gpu;
-};
-
-} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
index 4b3d5efd6..1ce67c771 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
@@ -5,15 +5,18 @@
// https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp
#include "common/logging/log.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvflinger/buffer_item.h"
#include "core/hle/service/nvflinger/buffer_queue_consumer.h"
#include "core/hle/service/nvflinger/buffer_queue_core.h"
#include "core/hle/service/nvflinger/producer_listener.h"
+#include "core/hle/service/nvflinger/ui/graphic_buffer.h"
namespace Service::android {
-BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_)
- : core{std::move(core_)}, slots{core->slots} {}
+BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_,
+ Service::Nvidia::NvCore::NvMap& nvmap_)
+ : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {}
BufferQueueConsumer::~BufferQueueConsumer() = default;
@@ -133,6 +136,8 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
slots[slot].buffer_state = BufferState::Free;
+ nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true);
+
listener = core->connected_producer_listener;
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.h b/src/core/hle/service/nvflinger/buffer_queue_consumer.h
index b598c314f..4ec06ca13 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_consumer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.h
@@ -13,6 +13,10 @@
#include "core/hle/service/nvflinger/buffer_queue_defs.h"
#include "core/hle/service/nvflinger/status.h"
+namespace Service::Nvidia::NvCore {
+class NvMap;
+} // namespace Service::Nvidia::NvCore
+
namespace Service::android {
class BufferItem;
@@ -21,7 +25,8 @@ class IConsumerListener;
class BufferQueueConsumer final {
public:
- explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_);
+ explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_,
+ Service::Nvidia::NvCore::NvMap& nvmap_);
~BufferQueueConsumer();
Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present);
@@ -32,6 +37,7 @@ public:
private:
std::shared_ptr<BufferQueueCore> core;
BufferQueueDefs::SlotsType& slots;
+ Service::Nvidia::NvCore::NvMap& nvmap;
};
} // namespace Service::android
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index 337431488..d4ab23a10 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -14,7 +14,7 @@
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/kernel_helpers.h"
-#include "core/hle/service/nvdrv/nvdrv.h"
+#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvflinger/buffer_queue_core.h"
#include "core/hle/service/nvflinger/buffer_queue_producer.h"
#include "core/hle/service/nvflinger/consumer_listener.h"
@@ -26,8 +26,10 @@
namespace Service::android {
BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_,
- std::shared_ptr<BufferQueueCore> buffer_queue_core_)
- : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots) {
+ std::shared_ptr<BufferQueueCore> buffer_queue_core_,
+ Service::Nvidia::NvCore::NvMap& nvmap_)
+ : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots),
+ nvmap(nvmap_) {
buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent");
}
@@ -530,6 +532,8 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
item.is_droppable = core->dequeue_buffer_cannot_block || async;
item.swap_interval = swap_interval;
+ nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true);
+
sticky_transform = sticky_transform_;
if (core->queue.empty()) {
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h
index 42d4722dc..0ba03a568 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h
@@ -31,6 +31,10 @@ namespace Service::KernelHelpers {
class ServiceContext;
} // namespace Service::KernelHelpers
+namespace Service::Nvidia::NvCore {
+class NvMap;
+} // namespace Service::Nvidia::NvCore
+
namespace Service::android {
class BufferQueueCore;
@@ -39,7 +43,8 @@ class IProducerListener;
class BufferQueueProducer final : public IBinder {
public:
explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_,
- std::shared_ptr<BufferQueueCore> buffer_queue_core_);
+ std::shared_ptr<BufferQueueCore> buffer_queue_core_,
+ Service::Nvidia::NvCore::NvMap& nvmap_);
~BufferQueueProducer();
void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override;
@@ -78,6 +83,8 @@ private:
s32 next_callback_ticket{};
s32 current_callback_ticket{};
std::condition_variable_any callback_condition;
+
+ Service::Nvidia::NvCore::NvMap& nvmap;
};
} // namespace Service::android
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 4246e5e25..aa14d2cbc 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -24,6 +24,8 @@
#include "core/hle/service/vi/layer/vi_layer.h"
#include "core/hle/service/vi/vi_results.h"
#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
namespace Service::NVFlinger {
@@ -105,10 +107,15 @@ NVFlinger::~NVFlinger() {
display.GetLayer(layer).Core().NotifyShutdown();
}
}
+
+ if (nvdrv) {
+ nvdrv->Close(disp_fd);
+ }
}
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
nvdrv = std::move(instance);
+ disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
}
std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
@@ -142,7 +149,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) {
const auto buffer_id = next_buffer_queue_id++;
- display.CreateLayer(layer_id, buffer_id);
+ display.CreateLayer(layer_id, buffer_id, nvdrv->container);
}
void NVFlinger::CloseLayer(u64 layer_id) {
@@ -262,30 +269,24 @@ void NVFlinger::Compose() {
return; // We are likely shutting down
}
- auto& gpu = system.GPU();
- const auto& multi_fence = buffer.fence;
- guard->unlock();
- for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
- const auto& fence = multi_fence.fences[fence_id];
- gpu.WaitFence(fence.id, fence.value);
- }
- guard->lock();
-
- MicroProfileFlip();
-
// Now send the buffer to the GPU for drawing.
// TODO(Subv): Support more than just disp0. The display device selection is probably based
// on which display we're drawing (Default, Internal, External, etc)
- auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0");
+ auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
ASSERT(nvdisp);
+ guard->unlock();
Common::Rectangle<int> crop_rect{
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(),
igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(),
- static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect);
+ static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect,
+ buffer.fence.fences, buffer.fence.num_fences);
+
+ MicroProfileFlip();
+ guard->lock();
swap_interval = buffer.swap_interval;
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 3bbe5d92b..b62615de2 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -116,6 +116,7 @@ private:
void SplitVSync(std::stop_token stop_token);
std::shared_ptr<Nvidia::Module> nvdrv;
+ s32 disp_fd;
std::list<VI::Display> displays;
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index aa49aa775..288aafaaf 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -12,6 +12,7 @@
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/kernel_helpers.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvflinger/buffer_item_consumer.h"
#include "core/hle/service/nvflinger/buffer_queue_consumer.h"
#include "core/hle/service/nvflinger/buffer_queue_core.h"
@@ -29,11 +30,13 @@ struct BufferQueue {
std::unique_ptr<android::BufferQueueConsumer> consumer;
};
-static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context) {
+static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context,
+ Service::Nvidia::NvCore::NvMap& nvmap) {
auto buffer_queue_core = std::make_shared<android::BufferQueueCore>();
- return {buffer_queue_core,
- std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core),
- std::make_unique<android::BufferQueueConsumer>(buffer_queue_core)};
+ return {
+ buffer_queue_core,
+ std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core, nvmap),
+ std::make_unique<android::BufferQueueConsumer>(buffer_queue_core, nvmap)};
}
Display::Display(u64 id, std::string name_,
@@ -74,10 +77,11 @@ void Display::SignalVSyncEvent() {
vsync_event->GetWritableEvent().Signal();
}
-void Display::CreateLayer(u64 layer_id, u32 binder_id) {
+void Display::CreateLayer(u64 layer_id, u32 binder_id,
+ Service::Nvidia::NvCore::Container& nv_core) {
ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment");
- auto [core, producer, consumer] = CreateBufferQueue(service_context);
+ auto [core, producer, consumer] = CreateBufferQueue(service_context, nv_core.GetNvMapFile());
auto buffer_item_consumer = std::make_shared<android::BufferItemConsumer>(std::move(consumer));
buffer_item_consumer->Connect(false);
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 8dbb0ef80..33d5f398c 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -27,6 +27,11 @@ namespace Service::NVFlinger {
class HosBinderDriverServer;
}
+namespace Service::Nvidia::NvCore {
+class Container;
+class NvMap;
+} // namespace Service::Nvidia::NvCore
+
namespace Service::VI {
class Layer;
@@ -93,7 +98,7 @@ public:
/// @param layer_id The ID to assign to the created layer.
/// @param binder_id The ID assigned to the buffer queue.
///
- void CreateLayer(u64 layer_id, u32 binder_id);
+ void CreateLayer(u64 layer_id, u32 binder_id, Service::Nvidia::NvCore::Container& core);
/// Closes and removes a layer from this display with the given ID.
///
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index f083811ec..9c917cacf 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -58,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size");
class NativeWindow final {
public:
constexpr explicit NativeWindow(u32 id_) : id{id_} {}
+ constexpr explicit NativeWindow(const NativeWindow& other) = default;
private:
const u32 magic = 2;
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 34ad7cadd..2ac792566 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -551,6 +551,11 @@ struct Memory::Impl {
[]() {});
}
+ [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const {
+ return GetPointerImpl(
+ vaddr, []() {}, []() {});
+ }
+
/**
* Reads a particular data type out of memory at the given virtual address.
*
@@ -686,6 +691,10 @@ u8* Memory::GetPointer(VAddr vaddr) {
return impl->GetPointer(vaddr);
}
+u8* Memory::GetPointerSilent(VAddr vaddr) {
+ return impl->GetPointerSilent(vaddr);
+}
+
const u8* Memory::GetPointer(VAddr vaddr) const {
return impl->GetPointer(vaddr);
}
diff --git a/src/core/memory.h b/src/core/memory.h
index a11ff8766..81eac448b 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -114,6 +114,7 @@ public:
* If the address is not valid, nullptr will be returned.
*/
u8* GetPointer(VAddr vaddr);
+ u8* GetPointerSilent(VAddr vaddr);
template <typename T>
T* GetPointer(VAddr vaddr) {
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index 578bc8c1b..ce42475d4 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -964,9 +964,9 @@ private:
demote_endif_node.type = Type::EndIf;
demote_endif_node.data.end_if.merge = return_block_it->data.block;
- asl.insert(return_block_it, demote_endif_node);
- asl.insert(return_block_it, demote_node);
- asl.insert(return_block_it, demote_if_node);
+ const auto next_it_1 = asl.insert(return_block_it, demote_endif_node);
+ const auto next_it_2 = asl.insert(next_it_1, demote_node);
+ asl.insert(next_it_2, demote_if_node);
}
ObjectPool<Statement>& stmt_pool;
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index 597112ba4..e8be58357 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -19,8 +19,10 @@ namespace {
struct ConstBufferAddr {
u32 index;
u32 offset;
+ u32 shift_left;
u32 secondary_index;
u32 secondary_offset;
+ u32 secondary_shift_left;
IR::U32 dynamic_offset;
u32 count;
bool has_secondary;
@@ -172,19 +174,41 @@ bool IsTextureInstruction(const IR::Inst& inst) {
return IndexedInstruction(inst) != IR::Opcode::Void;
}
-std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst);
+std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env);
-std::optional<ConstBufferAddr> Track(const IR::Value& value) {
- return IR::BreadthFirstSearch(value, TryGetConstBuffer);
+std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env) {
+ return IR::BreadthFirstSearch(
+ value, [&env](const IR::Inst* inst) { return TryGetConstBuffer(inst, env); });
}
-std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
+std::optional<u32> TryGetConstant(IR::Value& value, Environment& env) {
+ const IR::Inst* inst = value.InstRecursive();
+ if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
+ return std::nullopt;
+ }
+ const IR::Value index{inst->Arg(0)};
+ const IR::Value offset{inst->Arg(1)};
+ if (!index.IsImmediate()) {
+ return std::nullopt;
+ }
+ if (!offset.IsImmediate()) {
+ return std::nullopt;
+ }
+ const auto index_number = index.U32();
+ if (index_number != 1) {
+ return std::nullopt;
+ }
+ const auto offset_number = offset.U32();
+ return env.ReadCbufValue(index_number, offset_number);
+}
+
+std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env) {
switch (inst->GetOpcode()) {
default:
return std::nullopt;
case IR::Opcode::BitwiseOr32: {
- std::optional lhs{Track(inst->Arg(0))};
- std::optional rhs{Track(inst->Arg(1))};
+ std::optional lhs{Track(inst->Arg(0), env)};
+ std::optional rhs{Track(inst->Arg(1), env)};
if (!lhs || !rhs) {
return std::nullopt;
}
@@ -194,19 +218,62 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
if (lhs->count > 1 || rhs->count > 1) {
return std::nullopt;
}
- if (lhs->index > rhs->index || lhs->offset > rhs->offset) {
+ if (lhs->shift_left > 0 || lhs->index > rhs->index || lhs->offset > rhs->offset) {
std::swap(lhs, rhs);
}
return ConstBufferAddr{
.index = lhs->index,
.offset = lhs->offset,
+ .shift_left = lhs->shift_left,
.secondary_index = rhs->index,
.secondary_offset = rhs->offset,
+ .secondary_shift_left = rhs->shift_left,
.dynamic_offset = {},
.count = 1,
.has_secondary = true,
};
}
+ case IR::Opcode::ShiftLeftLogical32: {
+ const IR::Value shift{inst->Arg(1)};
+ if (!shift.IsImmediate()) {
+ return std::nullopt;
+ }
+ std::optional lhs{Track(inst->Arg(0), env)};
+ if (lhs) {
+ lhs->shift_left = shift.U32();
+ }
+ return lhs;
+ break;
+ }
+ case IR::Opcode::BitwiseAnd32: {
+ IR::Value op1{inst->Arg(0)};
+ IR::Value op2{inst->Arg(1)};
+ if (op1.IsImmediate()) {
+ std::swap(op1, op2);
+ }
+ if (!op2.IsImmediate() && !op1.IsImmediate()) {
+ do {
+ auto try_index = TryGetConstant(op1, env);
+ if (try_index) {
+ op1 = op2;
+ op2 = IR::Value{*try_index};
+ break;
+ }
+ auto try_index_2 = TryGetConstant(op2, env);
+ if (try_index_2) {
+ op2 = IR::Value{*try_index_2};
+ break;
+ }
+ return std::nullopt;
+ } while (false);
+ }
+ std::optional lhs{Track(op1, env)};
+ if (lhs) {
+ lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32()));
+ }
+ return lhs;
+ break;
+ }
case IR::Opcode::GetCbufU32x2:
case IR::Opcode::GetCbufU32:
break;
@@ -222,8 +289,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
return ConstBufferAddr{
.index = index.U32(),
.offset = offset.U32(),
+ .shift_left = 0,
.secondary_index = 0,
.secondary_offset = 0,
+ .secondary_shift_left = 0,
.dynamic_offset = {},
.count = 1,
.has_secondary = false,
@@ -247,8 +316,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
return ConstBufferAddr{
.index = index.U32(),
.offset = base_offset,
+ .shift_left = 0,
.secondary_index = 0,
.secondary_offset = 0,
+ .secondary_shift_left = 0,
.dynamic_offset = dynamic_offset,
.count = 8,
.has_secondary = false,
@@ -258,7 +329,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
ConstBufferAddr addr;
if (IsBindless(inst)) {
- const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0))};
+ const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0), env)};
if (!track_addr) {
throw NotImplementedException("Failed to track bindless texture constant buffer");
}
@@ -267,8 +338,10 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
addr = ConstBufferAddr{
.index = env.TextureBoundBuffer(),
.offset = inst.Arg(0).U32(),
+ .shift_left = 0,
.secondary_index = 0,
.secondary_offset = 0,
+ .secondary_shift_left = 0,
.dynamic_offset = {},
.count = 1,
.has_secondary = false,
@@ -284,8 +357,9 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
- const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)};
- const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)};
+ const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left};
+ const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)
+ << cbuf.secondary_shift_left};
return env.ReadTextureType(lhs_raw | rhs_raw);
}
@@ -487,8 +561,10 @@ void TexturePass(Environment& env, IR::Program& program) {
.has_secondary = cbuf.has_secondary,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
+ .shift_left = cbuf.shift_left,
.secondary_cbuf_index = cbuf.secondary_index,
.secondary_cbuf_offset = cbuf.secondary_offset,
+ .secondary_shift_left = cbuf.secondary_shift_left,
.count = cbuf.count,
.size_shift = DESCRIPTOR_SIZE_SHIFT,
});
@@ -499,8 +575,10 @@ void TexturePass(Environment& env, IR::Program& program) {
.has_secondary = cbuf.has_secondary,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
+ .shift_left = cbuf.shift_left,
.secondary_cbuf_index = cbuf.secondary_index,
.secondary_cbuf_offset = cbuf.secondary_offset,
+ .secondary_shift_left = cbuf.secondary_shift_left,
.count = cbuf.count,
.size_shift = DESCRIPTOR_SIZE_SHIFT,
});
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index f5690805c..cc596da4f 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -61,8 +61,10 @@ struct TextureBufferDescriptor {
bool has_secondary;
u32 cbuf_index;
u32 cbuf_offset;
+ u32 shift_left;
u32 secondary_cbuf_index;
u32 secondary_cbuf_offset;
+ u32 secondary_shift_left;
u32 count;
u32 size_shift;
};
@@ -85,8 +87,10 @@ struct TextureDescriptor {
bool has_secondary;
u32 cbuf_index;
u32 cbuf_offset;
+ u32 shift_left;
u32 secondary_cbuf_index;
u32 secondary_cbuf_offset;
+ u32 secondary_shift_left;
u32 count;
u32 size_shift;
};
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 5b3808351..40e6d1ec4 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -4,7 +4,7 @@
add_subdirectory(host_shaders)
if(LIBVA_FOUND)
- set_source_files_properties(command_classes/codecs/codec.cpp
+ set_source_files_properties(host1x/codecs/codec.cpp
PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES})
endif()
@@ -15,26 +15,14 @@ add_library(video_core STATIC
buffer_cache/buffer_cache.h
cdma_pusher.cpp
cdma_pusher.h
- command_classes/codecs/codec.cpp
- command_classes/codecs/codec.h
- command_classes/codecs/h264.cpp
- command_classes/codecs/h264.h
- command_classes/codecs/vp8.cpp
- command_classes/codecs/vp8.h
- command_classes/codecs/vp9.cpp
- command_classes/codecs/vp9.h
- command_classes/codecs/vp9_types.h
- command_classes/host1x.cpp
- command_classes/host1x.h
- command_classes/nvdec.cpp
- command_classes/nvdec.h
- command_classes/nvdec_common.h
- command_classes/sync_manager.cpp
- command_classes/sync_manager.h
- command_classes/vic.cpp
- command_classes/vic.h
compatible_formats.cpp
compatible_formats.h
+ control/channel_state.cpp
+ control/channel_state.h
+ control/channel_state_cache.cpp
+ control/channel_state_cache.h
+ control/scheduler.cpp
+ control/scheduler.h
delayed_destruction_ring.h
dirty_flags.cpp
dirty_flags.h
@@ -54,7 +42,31 @@ add_library(video_core STATIC
engines/maxwell_3d.h
engines/maxwell_dma.cpp
engines/maxwell_dma.h
+ engines/puller.cpp
+ engines/puller.h
framebuffer_config.h
+ host1x/codecs/codec.cpp
+ host1x/codecs/codec.h
+ host1x/codecs/h264.cpp
+ host1x/codecs/h264.h
+ host1x/codecs/vp8.cpp
+ host1x/codecs/vp8.h
+ host1x/codecs/vp9.cpp
+ host1x/codecs/vp9.h
+ host1x/codecs/vp9_types.h
+ host1x/control.cpp
+ host1x/control.h
+ host1x/host1x.cpp
+ host1x/host1x.h
+ host1x/nvdec.cpp
+ host1x/nvdec.h
+ host1x/nvdec_common.h
+ host1x/sync_manager.cpp
+ host1x/sync_manager.h
+ host1x/syncpoint_manager.cpp
+ host1x/syncpoint_manager.h
+ host1x/vic.cpp
+ host1x/vic.h
macro/macro.cpp
macro/macro.h
macro/macro_hle.cpp
@@ -195,6 +207,7 @@ add_library(video_core STATIC
texture_cache/render_targets.h
texture_cache/samples_helper.h
texture_cache/slot_vector.h
+ texture_cache/texture_cache.cpp
texture_cache/texture_cache.h
texture_cache/texture_cache_base.h
texture_cache/types.h
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index f015dae56..8e26b3f95 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -5,7 +5,6 @@
#include <algorithm>
#include <array>
-#include <deque>
#include <memory>
#include <mutex>
#include <numeric>
@@ -23,6 +22,7 @@
#include "common/settings.h"
#include "core/memory.h"
#include "video_core/buffer_cache/buffer_base.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/delayed_destruction_ring.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
@@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE
using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
template <typename P>
-class BufferCache {
+class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
// Page size for caching purposes.
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
@@ -116,10 +116,7 @@ public:
static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
- Runtime& runtime_);
+ Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
void TickFrame();
@@ -129,7 +126,7 @@ public:
void DownloadMemory(VAddr cpu_addr, u64 size);
- bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer);
+ bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
@@ -353,7 +350,7 @@ private:
void NotifyBufferDeletion();
- [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const;
+ [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const;
[[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
PixelFormat format);
@@ -367,9 +364,6 @@ private:
void ClearDownload(IntervalType subtract_interval);
VideoCore::RasterizerInterface& rasterizer;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
Core::Memory::Memory& cpu_memory;
SlotVector<Buffer> slot_buffers;
@@ -444,12 +438,8 @@ private:
template <class P>
BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
- Runtime& runtime_)
- : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
- kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
+ Core::Memory::Memory& cpu_memory_, Runtime& runtime_)
+ : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} {
// Ensure the first slot is used for the null buffer
void(slot_buffers.insert(runtime, NullBufferParams{}));
common_ranges.clear();
@@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
template <class P>
bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
- const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address);
- const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address);
+ const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
+ const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
if (!cpu_src_address || !cpu_dest_address) {
return false;
}
@@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
template <class P>
bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
- const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address);
+ const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
if (!cpu_dst_address) {
return false;
}
@@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
template <class P>
void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
u32 size) {
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
const Binding binding{
.cpu_addr = *cpu_addr,
.size = size,
@@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) {
if (is_indexed) {
BindHostIndexBuffer();
} else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count);
}
@@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index,
enabled_storage_buffers[stage] |= 1U << ssbo_index;
written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
- const auto& cbufs = maxwell3d.state.shader_stages[stage];
+ const auto& cbufs = maxwell3d->state.shader_stages[stage];
const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
- storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr);
+ storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
}
template <class P>
@@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index,
enabled_compute_storage_buffers |= 1U << ssbo_index;
written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
- const auto& launch_desc = kepler_compute.launch_description;
+ const auto& launch_desc = kepler_compute->launch_description;
ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
const auto& cbufs = launch_desc.const_buffer_config;
const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
- compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr);
+ compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
}
template <class P>
@@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
const bool is_accuracy_normal =
Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
+ auto it = committed_ranges.begin();
+ while (it != committed_ranges.end()) {
+ auto& current_intervals = *it;
+ auto next_it = std::next(it);
+ while (next_it != committed_ranges.end()) {
+ for (auto& interval : *next_it) {
+ current_intervals.subtract(interval);
+ }
+ next_it++;
+ }
+ it++;
+ }
+
boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
u64 total_size_bytes = 0;
u64 largest_copy = 0;
@@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() {
const u32 size = index_buffer.size;
SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
- const u32 new_offset = offset + maxwell3d.regs.index_array.first *
- maxwell3d.regs.index_array.FormatSizeInBytes();
+ const u32 new_offset = offset + maxwell3d->regs.index_array.first *
+ maxwell3d->regs.index_array.FormatSizeInBytes();
runtime.BindIndexBuffer(buffer, new_offset, size);
} else {
- runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format,
- maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count,
- buffer, offset, size);
+ runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_array.format,
+ maxwell3d->regs.index_array.first,
+ maxwell3d->regs.index_array.count, buffer, offset, size);
}
}
template <class P>
void BufferCache<P>::BindHostVertexBuffers() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
const Binding& binding = vertex_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
@@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
}
flags[Dirty::VertexBuffer0 + index] = false;
- const u32 stride = maxwell3d.regs.vertex_array[index].stride;
+ const u32 stride = maxwell3d->regs.vertex_array[index].stride;
const u32 offset = buffer.Offset(binding.cpu_addr);
runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride);
}
@@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
template <class P>
void BufferCache<P>::BindHostTransformFeedbackBuffers() {
- if (maxwell3d.regs.tfb_enabled == 0) {
+ if (maxwell3d->regs.tfb_enabled == 0) {
return;
}
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1239,16 +1242,19 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
template <class P>
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
- if (is_indexed) {
- UpdateIndexBuffer();
- }
- UpdateVertexBuffers();
- UpdateTransformFeedbackBuffers();
- for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
- UpdateUniformBuffers(stage);
- UpdateStorageBuffers(stage);
- UpdateTextureBuffers(stage);
- }
+ do {
+ has_deleted_buffers = false;
+ if (is_indexed) {
+ UpdateIndexBuffer();
+ }
+ UpdateVertexBuffers();
+ UpdateTransformFeedbackBuffers();
+ for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
+ UpdateUniformBuffers(stage);
+ UpdateStorageBuffers(stage);
+ UpdateTextureBuffers(stage);
+ }
+ } while (has_deleted_buffers);
}
template <class P>
@@ -1262,8 +1268,8 @@ template <class P>
void BufferCache<P>::UpdateIndexBuffer() {
// We have to check for the dirty flags and index count
// The index count is currently changed without updating the dirty flags
- const auto& index_array = maxwell3d.regs.index_array;
- auto& flags = maxwell3d.dirty.flags;
+ const auto& index_array = maxwell3d->regs.index_array;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) {
return;
}
@@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
const GPUVAddr gpu_addr_begin = index_array.StartAddress();
const GPUVAddr gpu_addr_end = index_array.EndAddress();
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes();
const u32 size = std::min(address_size, draw_size);
@@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() {
template <class P>
void BufferCache<P>::UpdateVertexBuffers() {
- auto& flags = maxwell3d.dirty.flags;
- if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) {
+ auto& flags = maxwell3d->dirty.flags;
+ if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) {
return;
}
flags[Dirty::VertexBuffers] = false;
@@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() {
template <class P>
void BufferCache<P>::UpdateVertexBuffer(u32 index) {
- if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) {
+ if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) {
return;
}
- const auto& array = maxwell3d.regs.vertex_array[index];
- const auto& limit = maxwell3d.regs.vertex_array_limit[index];
+ const auto& array = maxwell3d->regs.vertex_array[index];
+ const auto& limit = maxwell3d->regs.vertex_array_limit[index];
const GPUVAddr gpu_addr_begin = array.StartAddress();
const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin);
- u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
- if (address_size >= 64_MiB) {
- // Reported vertex buffer size is very large, cap to mapped buffer size
- GPUVAddr submapped_addr_end = gpu_addr_begin;
-
- const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)};
- if (ranges.size() > 0) {
- const auto& [addr, size] = *ranges.begin();
- submapped_addr_end = addr + size;
- }
-
- address_size =
- std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin));
- }
- const u32 size = address_size; // TODO: Analyze stride and number of vertices
- if (array.enable == 0 || size == 0 || !cpu_addr) {
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
+ u32 address_size = static_cast<u32>(
+ std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max())));
+ if (array.enable == 0 || address_size == 0 || !cpu_addr) {
vertex_buffers[index] = NULL_BINDING;
return;
}
+ if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
+ address_size =
+ static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
+ }
+ const u32 size = address_size; // TODO: Analyze stride and number of vertices
vertex_buffers[index] = Binding{
.cpu_addr = *cpu_addr,
.size = size,
@@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
template <class P>
void BufferCache<P>::UpdateTransformFeedbackBuffers() {
- if (maxwell3d.regs.tfb_enabled == 0) {
+ if (maxwell3d->regs.tfb_enabled == 0) {
return;
}
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1392,10 +1390,10 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() {
template <class P>
void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
- const auto& binding = maxwell3d.regs.tfb_bindings[index];
+ const auto& binding = maxwell3d->regs.tfb_bindings[index];
const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset;
const u32 size = binding.buffer_size;
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) {
transform_feedback_buffers[index] = NULL_BINDING;
return;
@@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
Binding& binding = compute_uniform_buffers[index];
binding = NULL_BINDING;
- const auto& launch_desc = kepler_compute.launch_description;
+ const auto& launch_desc = kepler_compute->launch_description;
if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
const auto& cbuf = launch_desc.const_buffer_config[index];
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address());
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
if (cpu_addr) {
binding.cpu_addr = *cpu_addr;
binding.size = cbuf.size;
@@ -1567,6 +1565,8 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
+ auto& new_buffer = slot_buffers[new_buffer_id];
+ runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0);
for (const BufferId overlap_id : overlap.ids) {
JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
}
@@ -1695,7 +1695,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
template <class P>
bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
- std::span<u8> inlined_buffer) {
+ std::span<const u8> inlined_buffer) {
const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
if (!is_dirty) {
return false;
@@ -1831,7 +1831,7 @@ void BufferCache<P>::NotifyBufferDeletion() {
dirty_uniform_buffers.fill(~u32{0});
uniform_buffer_binding_sizes.fill({});
}
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
flags[Dirty::IndexBuffer] = true;
flags[Dirty::VertexBuffers] = true;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
@@ -1841,16 +1841,18 @@ void BufferCache<P>::NotifyBufferDeletion() {
}
template <class P>
-typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const {
- const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr);
- const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8);
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
+ bool is_written) const {
+ const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
+ const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr || size == 0) {
return NULL_BINDING;
}
+ const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
const Binding binding{
.cpu_addr = *cpu_addr,
- .size = size,
+ .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr),
.buffer_id = BufferId{},
};
return binding;
@@ -1859,7 +1861,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
template <class P>
typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
GPUVAddr gpu_addr, u32 size, PixelFormat format) {
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
TextureBufferBinding binding;
if (!cpu_addr || size == 0) {
binding.cpu_addr = 0;
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp
index 8e890a85e..28a2d2090 100644
--- a/src/video_core/cdma_pusher.cpp
+++ b/src/video_core/cdma_pusher.cpp
@@ -2,20 +2,22 @@
// SPDX-License-Identifier: MIT
#include <bit>
-#include "command_classes/host1x.h"
-#include "command_classes/nvdec.h"
-#include "command_classes/vic.h"
#include "video_core/cdma_pusher.h"
-#include "video_core/command_classes/sync_manager.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/control.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/nvdec.h"
+#include "video_core/host1x/nvdec_common.h"
+#include "video_core/host1x/sync_manager.h"
+#include "video_core/host1x/vic.h"
+#include "video_core/memory_manager.h"
namespace Tegra {
-CDmaPusher::CDmaPusher(GPU& gpu_)
- : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)),
- vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)),
- host1x_processor(std::make_unique<Host1x>(gpu)),
- sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {}
+CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_)
+ : host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)),
+ vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)),
+ host1x_processor(std::make_unique<Host1x::Control>(host1x)),
+ sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {}
CDmaPusher::~CDmaPusher() = default;
@@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
case ThiMethod::SetMethod1:
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
static_cast<u32>(vic_thi_state.method_0), data);
- vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data);
+ vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0),
+ data);
break;
default:
break;
}
break;
- case ChClassId::Host1x:
+ case ChClassId::Control:
// This device is mainly for syncpoint synchronization
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
- host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data);
+ host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data);
break;
default:
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h
index d6ffef95f..83112dfce 100644
--- a/src/video_core/cdma_pusher.h
+++ b/src/video_core/cdma_pusher.h
@@ -12,11 +12,13 @@
namespace Tegra {
-class GPU;
+namespace Host1x {
+class Control;
class Host1x;
class Nvdec;
class SyncptIncrManager;
class Vic;
+} // namespace Host1x
enum class ChSubmissionMode : u32 {
SetClass = 0,
@@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 {
enum class ChClassId : u32 {
NoClass = 0x0,
- Host1x = 0x1,
+ Control = 0x1,
VideoEncodeMpeg = 0x20,
VideoEncodeNvEnc = 0x21,
VideoStreamingVi = 0x30,
@@ -88,7 +90,7 @@ enum class ThiMethod : u32 {
class CDmaPusher {
public:
- explicit CDmaPusher(GPU& gpu_);
+ explicit CDmaPusher(Host1x::Host1x& host1x);
~CDmaPusher();
/// Process the command entry
@@ -101,11 +103,11 @@ private:
/// Write arguments value to the ThiRegisters member at the specified offset
void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
- GPU& gpu;
- std::shared_ptr<Tegra::Nvdec> nvdec_processor;
- std::unique_ptr<Tegra::Vic> vic_processor;
- std::unique_ptr<Tegra::Host1x> host1x_processor;
- std::unique_ptr<SyncptIncrManager> sync_manager;
+ Host1x::Host1x& host1x;
+ std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
+ std::unique_ptr<Tegra::Host1x::Vic> vic_processor;
+ std::unique_ptr<Tegra::Host1x::Control> host1x_processor;
+ std::unique_ptr<Host1x::SyncptIncrManager> sync_manager;
ChClassId current_class{};
ThiRegisters vic_thi_state{};
ThiRegisters nvdec_thi_state{};
diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp
deleted file mode 100644
index 11855fe10..000000000
--- a/src/video_core/command_classes/host1x.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/assert.h"
-#include "video_core/command_classes/host1x.h"
-#include "video_core/gpu.h"
-
-Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {}
-
-Tegra::Host1x::~Host1x() = default;
-
-void Tegra::Host1x::ProcessMethod(Method method, u32 argument) {
- switch (method) {
- case Method::LoadSyncptPayload32:
- syncpoint_value = argument;
- break;
- case Method::WaitSyncpt:
- case Method::WaitSyncpt32:
- Execute(argument);
- break;
- default:
- UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method));
- break;
- }
-}
-
-void Tegra::Host1x::Execute(u32 data) {
- gpu.WaitFence(data, syncpoint_value);
-}
diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp
new file mode 100644
index 000000000..cdecc3a91
--- /dev/null
+++ b/src/video_core/control/channel_state.cpp
@@ -0,0 +1,40 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/dma_pusher.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/kepler_compute.h"
+#include "video_core/engines/kepler_memory.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/engines/maxwell_dma.h"
+#include "video_core/engines/puller.h"
+#include "video_core/memory_manager.h"
+
+namespace Tegra::Control {
+
+ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
+
+void ChannelState::Init(Core::System& system, GPU& gpu) {
+ ASSERT(memory_manager);
+ dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this);
+ maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager);
+ fermi_2d = std::make_unique<Engines::Fermi2D>();
+ kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
+ maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
+ kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
+ initialized = true;
+}
+
+void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
+ dma_pusher->BindRasterizer(rasterizer);
+ memory_manager->BindRasterizer(rasterizer);
+ maxwell_3d->BindRasterizer(rasterizer);
+ fermi_2d->BindRasterizer(rasterizer);
+ kepler_memory->BindRasterizer(rasterizer);
+ kepler_compute->BindRasterizer(rasterizer);
+ maxwell_dma->BindRasterizer(rasterizer);
+}
+
+} // namespace Tegra::Control
diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h
new file mode 100644
index 000000000..3a7b9872c
--- /dev/null
+++ b/src/video_core/control/channel_state.h
@@ -0,0 +1,68 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_types.h"
+
+namespace Core {
+class System;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Tegra {
+
+class GPU;
+
+namespace Engines {
+class Puller;
+class Fermi2D;
+class Maxwell3D;
+class MaxwellDMA;
+class KeplerCompute;
+class KeplerMemory;
+} // namespace Engines
+
+class MemoryManager;
+class DmaPusher;
+
+namespace Control {
+
+struct ChannelState {
+ explicit ChannelState(s32 bind_id);
+ ChannelState(const ChannelState& state) = delete;
+ ChannelState& operator=(const ChannelState&) = delete;
+ ChannelState(ChannelState&& other) noexcept = default;
+ ChannelState& operator=(ChannelState&& other) noexcept = default;
+
+ void Init(Core::System& system, GPU& gpu);
+
+ void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
+
+ s32 bind_id = -1;
+ /// 3D engine
+ std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
+ /// 2D engine
+ std::unique_ptr<Engines::Fermi2D> fermi_2d;
+ /// Compute engine
+ std::unique_ptr<Engines::KeplerCompute> kepler_compute;
+ /// DMA engine
+ std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
+ /// Inline memory engine
+ std::unique_ptr<Engines::KeplerMemory> kepler_memory;
+
+ std::shared_ptr<MemoryManager> memory_manager;
+
+ std::unique_ptr<DmaPusher> dma_pusher;
+
+ bool initialized{};
+};
+
+} // namespace Control
+
+} // namespace Tegra
diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp
new file mode 100644
index 000000000..4ebeb6356
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.cpp
@@ -0,0 +1,14 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "video_core/control/channel_state_cache.inc"
+
+namespace VideoCommon {
+
+ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state)
+ : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute},
+ gpu_memory{*channel_state.memory_manager} {}
+
+template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>;
+
+} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h
new file mode 100644
index 000000000..584a0c26c
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.h
@@ -0,0 +1,101 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <deque>
+#include <limits>
+#include <mutex>
+#include <optional>
+#include <unordered_map>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Tegra {
+
+namespace Engines {
+class Maxwell3D;
+class KeplerCompute;
+} // namespace Engines
+
+class MemoryManager;
+
+namespace Control {
+struct ChannelState;
+}
+
+} // namespace Tegra
+
+namespace VideoCommon {
+
+class ChannelInfo {
+public:
+ ChannelInfo() = delete;
+ explicit ChannelInfo(Tegra::Control::ChannelState& state);
+ ChannelInfo(const ChannelInfo& state) = delete;
+ ChannelInfo& operator=(const ChannelInfo&) = delete;
+ ChannelInfo(ChannelInfo&& other) = default;
+ ChannelInfo& operator=(ChannelInfo&& other) = default;
+
+ Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::Engines::KeplerCompute& kepler_compute;
+ Tegra::MemoryManager& gpu_memory;
+};
+
+template <class P>
+class ChannelSetupCaches {
+public:
+ /// Operations for seting the channel of execution.
+ virtual ~ChannelSetupCaches();
+
+ /// Create channel state.
+ virtual void CreateChannel(Tegra::Control::ChannelState& channel);
+
+ /// Bind a channel for execution.
+ void BindToChannel(s32 id);
+
+ /// Erase channel's state.
+ void EraseChannel(s32 id);
+
+ Tegra::MemoryManager* GetFromID(size_t id) const {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ const auto ref = address_spaces.find(id);
+ return ref->second.gpu_memory;
+ }
+
+ std::optional<size_t> getStorageID(size_t id) const {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ const auto ref = address_spaces.find(id);
+ if (ref == address_spaces.end()) {
+ return std::nullopt;
+ }
+ return ref->second.storage_id;
+ }
+
+protected:
+ static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
+
+ P* channel_state;
+ size_t current_channel_id{UNSET_CHANNEL};
+ size_t current_address_space{};
+ Tegra::Engines::Maxwell3D* maxwell3d;
+ Tegra::Engines::KeplerCompute* kepler_compute;
+ Tegra::MemoryManager* gpu_memory;
+
+ std::deque<P> channel_storage;
+ std::deque<size_t> free_channel_ids;
+ std::unordered_map<s32, size_t> channel_map;
+ std::vector<size_t> active_channel_ids;
+ struct AddresSpaceRef {
+ size_t ref_count;
+ size_t storage_id;
+ Tegra::MemoryManager* gpu_memory;
+ };
+ std::unordered_map<size_t, AddresSpaceRef> address_spaces;
+ mutable std::mutex config_mutex;
+
+ virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc
new file mode 100644
index 000000000..460313893
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.inc
@@ -0,0 +1,86 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <algorithm>
+
+#include "video_core/control/channel_state.h"
+#include "video_core/control/channel_state_cache.h"
+#include "video_core/engines/kepler_compute.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/memory_manager.h"
+
+namespace VideoCommon {
+
+template <class P>
+ChannelSetupCaches<P>::~ChannelSetupCaches() = default;
+
+template <class P>
+void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0);
+ auto new_id = [this, &channel]() {
+ if (!free_channel_ids.empty()) {
+ auto id = free_channel_ids.front();
+ free_channel_ids.pop_front();
+ new (&channel_storage[id]) P(channel);
+ return id;
+ }
+ channel_storage.emplace_back(channel);
+ return channel_storage.size() - 1;
+ }();
+ channel_map.emplace(channel.bind_id, new_id);
+ if (current_channel_id != UNSET_CHANNEL) {
+ channel_state = &channel_storage[current_channel_id];
+ }
+ active_channel_ids.push_back(new_id);
+ auto as_it = address_spaces.find(channel.memory_manager->GetID());
+ if (as_it != address_spaces.end()) {
+ as_it->second.ref_count++;
+ return;
+ }
+ AddresSpaceRef new_gpu_mem_ref{
+ .ref_count = 1,
+ .storage_id = address_spaces.size(),
+ .gpu_memory = channel.memory_manager.get(),
+ };
+ address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref);
+ OnGPUASRegister(channel.memory_manager->GetID());
+}
+
+/// Bind a channel for execution.
+template <class P>
+void ChannelSetupCaches<P>::BindToChannel(s32 id) {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ auto it = channel_map.find(id);
+ ASSERT(it != channel_map.end() && id >= 0);
+ current_channel_id = it->second;
+ channel_state = &channel_storage[current_channel_id];
+ maxwell3d = &channel_state->maxwell3d;
+ kepler_compute = &channel_state->kepler_compute;
+ gpu_memory = &channel_state->gpu_memory;
+ current_address_space = gpu_memory->GetID();
+}
+
+/// Erase channel's channel_state.
+template <class P>
+void ChannelSetupCaches<P>::EraseChannel(s32 id) {
+ std::unique_lock<std::mutex> lk(config_mutex);
+ const auto it = channel_map.find(id);
+ ASSERT(it != channel_map.end() && id >= 0);
+ const auto this_id = it->second;
+ free_channel_ids.push_back(this_id);
+ channel_map.erase(it);
+ if (this_id == current_channel_id) {
+ current_channel_id = UNSET_CHANNEL;
+ channel_state = nullptr;
+ maxwell3d = nullptr;
+ kepler_compute = nullptr;
+ gpu_memory = nullptr;
+ } else if (current_channel_id != UNSET_CHANNEL) {
+ channel_state = &channel_storage[current_channel_id];
+ }
+ active_channel_ids.erase(
+ std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id));
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp
new file mode 100644
index 000000000..f7cbe204e
--- /dev/null
+++ b/src/video_core/control/scheduler.cpp
@@ -0,0 +1,32 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <memory>
+
+#include "common/assert.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/control/scheduler.h"
+#include "video_core/gpu.h"
+
+namespace Tegra::Control {
+Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
+
+Scheduler::~Scheduler() = default;
+
+void Scheduler::Push(s32 channel, CommandList&& entries) {
+ std::unique_lock lk(scheduling_guard);
+ auto it = channels.find(channel);
+ ASSERT(it != channels.end());
+ auto channel_state = it->second;
+ gpu.BindChannel(channel_state->bind_id);
+ channel_state->dma_pusher->Push(std::move(entries));
+ channel_state->dma_pusher->DispatchCalls();
+}
+
+void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
+ s32 channel = new_channel->bind_id;
+ std::unique_lock lk(scheduling_guard);
+ channels.emplace(channel, new_channel);
+}
+
+} // namespace Tegra::Control
diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h
new file mode 100644
index 000000000..44addf61c
--- /dev/null
+++ b/src/video_core/control/scheduler.h
@@ -0,0 +1,37 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+
+#include "video_core/dma_pusher.h"
+
+namespace Tegra {
+
+class GPU;
+
+namespace Control {
+
+struct ChannelState;
+
+class Scheduler {
+public:
+ explicit Scheduler(GPU& gpu_);
+ ~Scheduler();
+
+ void Push(s32 channel, CommandList&& entries);
+
+ void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
+
+private:
+ std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
+ std::mutex scheduling_guard;
+ GPU& gpu;
+};
+
+} // namespace Control
+
+} // namespace Tegra
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 29b8582ab..9835e3ac1 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -12,7 +12,10 @@
namespace Tegra {
-DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {}
+DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
+ Control::ChannelState& channel_state_)
+ : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_,
+ *this, channel_state_} {}
DmaPusher::~DmaPusher() = default;
@@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
void DmaPusher::DispatchCalls() {
MICROPROFILE_SCOPE(DispatchCalls);
- gpu.SyncGuestHost();
-
dma_pushbuffer_subindex = 0;
dma_state.is_last_call = true;
@@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() {
}
}
gpu.FlushCommands();
- gpu.SyncGuestHost();
gpu.OnCommandListEnd();
}
@@ -76,11 +76,11 @@ bool DmaPusher::Step() {
// Push buffer non-empty, read a word
command_headers.resize(command_list_header.size);
if (Settings::IsGPULevelHigh()) {
- gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(),
- command_list_header.size * sizeof(u32));
+ memory_manager.ReadBlock(dma_get, command_headers.data(),
+ command_list_header.size * sizeof(u32));
} else {
- gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
- command_list_header.size * sizeof(u32));
+ memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
+ command_list_header.size * sizeof(u32));
}
}
for (std::size_t index = 0; index < command_headers.size();) {
@@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) {
void DmaPusher::CallMethod(u32 argument) const {
if (dma_state.method < non_puller_methods) {
- gpu.CallMethod(GPU::MethodCall{
+ puller.CallPullerMethod(Engines::Puller::MethodCall{
dma_state.method,
argument,
dma_state.subchannel,
@@ -168,12 +168,16 @@ void DmaPusher::CallMethod(u32 argument) const {
void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
if (dma_state.method < non_puller_methods) {
- gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
- dma_state.method_count);
+ puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
+ dma_state.method_count);
} else {
subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
num_methods, dma_state.method_count);
}
}
+void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
+ puller.BindRasterizer(rasterizer);
+}
+
} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 872fd146a..938f0f11c 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -10,6 +10,7 @@
#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/engines/engine_interface.h"
+#include "video_core/engines/puller.h"
namespace Core {
class System;
@@ -17,7 +18,12 @@ class System;
namespace Tegra {
+namespace Control {
+struct ChannelState;
+}
+
class GPU;
+class MemoryManager;
enum class SubmissionMode : u32 {
IncreasingOld = 0,
@@ -31,24 +37,32 @@ enum class SubmissionMode : u32 {
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
// So the values you see in docs might be multiplied by 4.
+// Register documentation:
+// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h
+//
+// Register Description (approx):
+// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt
enum class BufferMethods : u32 {
BindObject = 0x0,
+ Illegal = 0x1,
Nop = 0x2,
SemaphoreAddressHigh = 0x4,
SemaphoreAddressLow = 0x5,
- SemaphoreSequence = 0x6,
- SemaphoreTrigger = 0x7,
- NotifyIntr = 0x8,
+ SemaphoreSequencePayload = 0x6,
+ SemaphoreOperation = 0x7,
+ NonStallInterrupt = 0x8,
WrcacheFlush = 0x9,
- Unk28 = 0xA,
- UnkCacheFlush = 0xB,
+ MemOpA = 0xA,
+ MemOpB = 0xB,
+ MemOpC = 0xC,
+ MemOpD = 0xD,
RefCnt = 0x14,
SemaphoreAcquire = 0x1A,
SemaphoreRelease = 0x1B,
- FenceValue = 0x1C,
- FenceAction = 0x1D,
- WaitForInterrupt = 0x1E,
- Unk7c = 0x1F,
+ SyncpointPayload = 0x1C,
+ SyncpointOperation = 0x1D,
+ WaitForIdle = 0x1E,
+ CRCCheck = 0x1F,
Yield = 0x20,
NonPullerMethods = 0x40,
};
@@ -102,7 +116,8 @@ struct CommandList final {
*/
class DmaPusher final {
public:
- explicit DmaPusher(Core::System& system_, GPU& gpu_);
+ explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
+ Control::ChannelState& channel_state_);
~DmaPusher();
void Push(CommandList&& entries) {
@@ -115,6 +130,8 @@ public:
subchannels[subchannel_id] = engine;
}
+ void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
+
private:
static constexpr u32 non_puller_methods = 0x40;
static constexpr u32 max_subchannels = 8;
@@ -148,6 +165,8 @@ private:
GPU& gpu;
Core::System& system;
+ MemoryManager& memory_manager;
+ mutable Engines::Puller puller;
};
} // namespace Tegra
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 6ff5b1eca..a34819234 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -3,6 +3,7 @@
#include <cstring>
+#include "common/algorithm.h"
#include "common/assert.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/memory_manager.h"
@@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
if (!is_last_call) {
return;
}
+ ProcessData(inner_buffer);
+}
+
+void State::ProcessData(const u32* data, size_t num_data) {
+ std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32));
+ ProcessData(read_buffer);
+}
+
+void State::ProcessData(std::span<const u8> read_buffer) {
const GPUVAddr address{regs.dest.Address()};
if (is_linear) {
- rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer);
+ if (regs.line_count == 1) {
+ rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer);
+ } else {
+ for (u32 line = 0; line < regs.line_count; ++line) {
+ const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch;
+ memory_manager.WriteBlockUnsafe(
+ dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in,
+ regs.line_length_in);
+ }
+ memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count);
+ }
} else {
- UNIMPLEMENTED_IF(regs.dest.z != 0);
- UNIMPLEMENTED_IF(regs.dest.depth != 1);
- UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0);
- UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0);
+ u32 width = regs.dest.width;
+ u32 x_elements = regs.line_length_in;
+ u32 x_offset = regs.dest.x;
+ const u32 bpp_shift = Common::FoldRight(
+ 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
+ width, x_elements, x_offset, static_cast<u32>(address));
+ width >>= bpp_shift;
+ x_elements >>= bpp_shift;
+ x_offset >>= bpp_shift;
+ const u32 bytes_per_pixel = 1U << bpp_shift;
const std::size_t dst_size = Tegra::Texture::CalculateSize(
- true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0);
+ true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
+ regs.dest.BlockHeight(), regs.dest.BlockDepth());
tmp_buffer.resize(dst_size);
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
- Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y,
- regs.dest.BlockHeight(), copy_size, inner_buffer.data(),
- tmp_buffer.data());
+ Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
+ regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
+ x_elements, regs.line_count, regs.dest.BlockHeight(),
+ regs.dest.BlockDepth(), regs.line_length_in);
memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size);
}
}
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index 94ff3314a..f08f6e36a 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -3,6 +3,7 @@
#pragma once
+#include <span>
#include <vector>
#include "common/bit_field.h"
#include "common/common_types.h"
@@ -33,7 +34,7 @@ struct Registers {
u32 width;
u32 height;
u32 depth;
- u32 z;
+ u32 layer;
u32 x;
u32 y;
@@ -62,11 +63,14 @@ public:
void ProcessExec(bool is_linear_);
void ProcessData(u32 data, bool is_last_call);
+ void ProcessData(const u32* data, size_t num_data);
/// Binds a rasterizer to this engine.
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
private:
+ void ProcessData(std::span<const u8> read_buffer);
+
u32 write_offset = 0;
u32 copy_size = 0;
std::vector<u8> inner_buffer;
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 5db254d94..7c50bdbe0 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
}
case KEPLER_COMPUTE_REG_INDEX(data_upload): {
upload_state.ProcessData(method_argument, is_last_call);
- if (is_last_call) {
- }
break;
}
case KEPLER_COMPUTE_REG_INDEX(launch):
@@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
u32 methods_pending) {
- for (std::size_t i = 0; i < amount; i++) {
- CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ switch (method) {
+ case KEPLER_COMPUTE_REG_INDEX(data_upload):
+ upload_state.ProcessData(base_start, static_cast<size_t>(amount));
+ return;
+ default:
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ }
+ break;
}
}
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index e2b029542..a3fbab1e5 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
}
case KEPLERMEMORY_REG_INDEX(data): {
upload_state.ProcessData(method_argument, is_last_call);
- if (is_last_call) {
- }
break;
}
}
@@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
u32 methods_pending) {
- for (std::size_t i = 0; i < amount; i++) {
- CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ switch (method) {
+ case KEPLERMEMORY_REG_INDEX(data):
+ upload_state.ProcessData(base_start, static_cast<size_t>(amount));
+ return;
+ default:
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
+ }
+ break;
}
}
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 3a4646289..3c6e44a25 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -219,6 +219,8 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
regs.index_array.count = regs.small_index_2.count;
regs.index_array.first = regs.small_index_2.first;
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
+ // a macro calls this one over and over, should it increase instancing?
+ // Used by Hades and likely other Vulkan games.
return DrawArrays();
case MAXWELL3D_REG_INDEX(topology_override):
use_topology_override = true;
@@ -237,11 +239,12 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
return upload_state.ProcessExec(regs.exec_upload.linear != 0);
case MAXWELL3D_REG_INDEX(data_upload):
upload_state.ProcessData(argument, is_last_call);
- if (is_last_call) {
- }
return;
case MAXWELL3D_REG_INDEX(fragment_barrier):
return rasterizer->FragmentBarrier();
+ case MAXWELL3D_REG_INDEX(invalidate_texture_data_cache):
+ rasterizer->InvalidateGPUCache();
+ return rasterizer->WaitForIdle();
case MAXWELL3D_REG_INDEX(tiled_cache_barrier):
return rasterizer->TiledCacheBarrier();
}
@@ -311,6 +314,9 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
ProcessCBMultiData(base_start, amount);
break;
+ case MAXWELL3D_REG_INDEX(data_upload):
+ upload_state.ProcessData(base_start, static_cast<size_t>(amount));
+ return;
default:
for (std::size_t i = 0; i < amount; i++) {
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
@@ -447,18 +453,10 @@ void Maxwell3D::ProcessFirmwareCall4() {
}
void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
- struct LongQueryResult {
- u64_le value;
- u64_le timestamp;
- };
- static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size");
const GPUVAddr sequence_address{regs.query.QueryAddress()};
if (long_query) {
- // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast
- // GPU, this command may actually take a while to complete in real hardware due to GPU
- // wait queues.
- LongQueryResult query_result{payload, system.GPU().GetTicks()};
- memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result));
+ memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
+ memory_manager.Write<u64>(sequence_address, payload);
} else {
memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload));
}
@@ -472,10 +470,25 @@ void Maxwell3D::ProcessQueryGet() {
switch (regs.query.query_get.operation) {
case Regs::QueryOperation::Release:
- if (regs.query.query_get.fence == 1) {
- rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence);
+ if (regs.query.query_get.fence == 1 || regs.query.query_get.short_query != 0) {
+ const GPUVAddr sequence_address{regs.query.QueryAddress()};
+ const u32 payload = regs.query.query_sequence;
+ std::function<void()> operation([this, sequence_address, payload] {
+ memory_manager.Write<u32>(sequence_address, payload);
+ });
+ rasterizer->SignalFence(std::move(operation));
} else {
- StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
+ struct LongQueryResult {
+ u64_le value;
+ u64_le timestamp;
+ };
+ const GPUVAddr sequence_address{regs.query.QueryAddress()};
+ const u32 payload = regs.query.query_sequence;
+ std::function<void()> operation([this, sequence_address, payload] {
+ memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
+ memory_manager.Write<u64>(sequence_address, payload);
+ });
+ rasterizer->SyncOperation(std::move(operation));
}
break;
case Regs::QueryOperation::Acquire:
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 0efe58282..3909d36c1 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/algorithm.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
@@ -54,8 +55,6 @@ void MaxwellDMA::Launch() {
const LaunchDMA& launch = regs.launch_dma;
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
- ASSERT(regs.dst_params.origin.x == 0);
- ASSERT(regs.dst_params.origin.y == 0);
const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
@@ -121,23 +120,40 @@ void MaxwellDMA::CopyPitchToPitch() {
void MaxwellDMA::CopyBlockLinearToPitch() {
UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
- UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0);
UNIMPLEMENTED_IF(regs.src_params.layer != 0);
+ const bool is_remapping = regs.launch_dma.remap_enable != 0;
+
// Optimized path for micro copies.
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
- if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
+ if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
regs.src_params.height > GOB_SIZE_Y) {
FastCopyBlockLinearToPitch();
return;
}
// Deswizzle the input and copy it over.
- UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
- const u32 bytes_per_pixel =
- regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
const Parameters& src_params = regs.src_params;
- const u32 width = src_params.width;
+
+ const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
+ const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
+
+ const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
+
+ u32 width = src_params.width;
+ u32 x_elements = regs.line_length_in;
+ u32 x_offset = src_params.origin.x;
+ u32 bpp_shift = 0U;
+ if (!is_remapping) {
+ bpp_shift = Common::FoldRight(
+ 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
+ width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
+ width >>= bpp_shift;
+ x_elements >>= bpp_shift;
+ x_offset >>= bpp_shift;
+ }
+
+ const u32 bytes_per_pixel = base_bpp << bpp_shift;
const u32 height = src_params.height;
const u32 depth = src_params.depth;
const u32 block_height = src_params.block_size.height;
@@ -155,30 +171,45 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
- UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel,
- block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(),
- read_buffer.data());
+ UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
+ src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
+ regs.pitch_out);
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
}
void MaxwellDMA::CopyPitchToBlockLinear() {
UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
- UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
+ UNIMPLEMENTED_IF(regs.dst_params.layer != 0);
+
+ const bool is_remapping = regs.launch_dma.remap_enable != 0;
+ const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
+ const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
const auto& dst_params = regs.dst_params;
- const u32 bytes_per_pixel =
- regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1;
- const u32 width = dst_params.width;
+
+ const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
+
+ u32 width = dst_params.width;
+ u32 x_elements = regs.line_length_in;
+ u32 x_offset = dst_params.origin.x;
+ u32 bpp_shift = 0U;
+ if (!is_remapping) {
+ bpp_shift = Common::FoldRight(
+ 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
+ width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
+ width >>= bpp_shift;
+ x_elements >>= bpp_shift;
+ x_offset >>= bpp_shift;
+ }
+
+ const u32 bytes_per_pixel = base_bpp << bpp_shift;
const u32 height = dst_params.height;
const u32 depth = dst_params.depth;
const u32 block_height = dst_params.block_size.height;
const u32 block_depth = dst_params.block_size.depth;
const size_t dst_size =
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
- const size_t dst_layer_size =
- CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
-
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
if (read_buffer.size() < src_size) {
@@ -188,32 +219,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
write_buffer.resize(dst_size);
}
+ memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
if (Settings::IsGPULevelExtreme()) {
- memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
} else {
- memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
}
// If the input is linear and the output is tiled, swizzle the input and copy it over.
- if (regs.dst_params.block_size.depth > 0) {
- ASSERT(dst_params.layer == 0);
- SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height,
- bytes_per_pixel, block_height, block_depth, dst_params.origin.x,
- dst_params.origin.y, write_buffer.data(), read_buffer.data());
- } else {
- SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel,
- write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(),
- block_height, dst_params.origin.x, dst_params.origin.y);
- }
+ SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
+ dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
+ regs.pitch_in);
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
}
void MaxwellDMA::FastCopyBlockLinearToPitch() {
- const u32 bytes_per_pixel =
- regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
+ const u32 bytes_per_pixel = 1U;
const size_t src_size = GOB_SIZE;
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
u32 pos_x = regs.src_params.origin.x;
@@ -239,9 +261,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
}
- UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width,
- bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y,
- write_buffer.data(), read_buffer.data());
+ UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
+ regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
+ regs.src_params.block_size.height, regs.src_params.block_size.depth,
+ regs.pitch_out);
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
}
@@ -249,16 +272,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
void MaxwellDMA::ReleaseSemaphore() {
const auto type = regs.launch_dma.semaphore_type;
const GPUVAddr address = regs.semaphore.address;
+ const u32 payload = regs.semaphore.payload;
switch (type) {
case LaunchDMA::SemaphoreType::NONE:
break;
- case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE:
- memory_manager.Write<u32>(address, regs.semaphore.payload);
+ case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
+ std::function<void()> operation(
+ [this, address, payload] { memory_manager.Write<u32>(address, payload); });
+ rasterizer->SignalFence(std::move(operation));
break;
- case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE:
- memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload));
- memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
+ }
+ case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
+ std::function<void()> operation([this, address, payload] {
+ memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
+ memory_manager.Write<u64>(address, payload);
+ });
+ rasterizer->SignalFence(std::move(operation));
break;
+ }
default:
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
}
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 074bac92c..bc48320ce 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -189,10 +189,16 @@ public:
BitField<4, 3, Swizzle> dst_y;
BitField<8, 3, Swizzle> dst_z;
BitField<12, 3, Swizzle> dst_w;
+ BitField<0, 12, u32> dst_components_raw;
BitField<16, 2, u32> component_size_minus_one;
BitField<20, 2, u32> num_src_components_minus_one;
BitField<24, 2, u32> num_dst_components_minus_one;
};
+
+ Swizzle GetComponent(size_t i) const {
+ const u32 raw = dst_components_raw;
+ return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
+ }
};
static_assert(sizeof(RemapConst) == 12);
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
new file mode 100644
index 000000000..cca890792
--- /dev/null
+++ b/src/video_core/engines/puller.cpp
@@ -0,0 +1,306 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "common/settings.h"
+#include "core/core.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/dma_pusher.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/kepler_compute.h"
+#include "video_core/engines/kepler_memory.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/engines/maxwell_dma.h"
+#include "video_core/engines/puller.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace Tegra::Engines {
+
+Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_,
+ Control::ChannelState& channel_state_)
+ : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{
+ channel_state_} {}
+
+Puller::~Puller() = default;
+
+void Puller::ProcessBindMethod(const MethodCall& method_call) {
+ // Bind the current subchannel to the desired engine id.
+ LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
+ method_call.argument);
+ const auto engine_id = static_cast<EngineID>(method_call.argument);
+ bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
+ switch (engine_id) {
+ case EngineID::FERMI_TWOD_A:
+ dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel);
+ break;
+ case EngineID::MAXWELL_B:
+ dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
+ }
+}
+
+void Puller::ProcessFenceActionMethod() {
+ switch (regs.fence_action.op) {
+ case Puller::FenceOperation::Acquire:
+ // UNIMPLEMENTED_MSG("Channel Scheduling pending.");
+ // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
+ rasterizer->ReleaseFences();
+ break;
+ case Puller::FenceOperation::Increment:
+ rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
+ }
+}
+
+void Puller::ProcessSemaphoreTriggerMethod() {
+ const auto semaphoreOperationMask = 0xF;
+ const auto op =
+ static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
+ if (op == GpuSemaphoreOperation::WriteLong) {
+ const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
+ const u32 payload = regs.semaphore_sequence;
+ std::function<void()> operation([this, sequence_address, payload] {
+ memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
+ memory_manager.Write<u64>(sequence_address, payload);
+ });
+ rasterizer->SignalFence(std::move(operation));
+ } else {
+ do {
+ const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
+ regs.acquire_source = true;
+ regs.acquire_value = regs.semaphore_sequence;
+ if (op == GpuSemaphoreOperation::AcquireEqual) {
+ regs.acquire_active = true;
+ regs.acquire_mode = false;
+ if (word != regs.acquire_value) {
+ rasterizer->ReleaseFences();
+ continue;
+ }
+ } else if (op == GpuSemaphoreOperation::AcquireGequal) {
+ regs.acquire_active = true;
+ regs.acquire_mode = true;
+ if (word < regs.acquire_value) {
+ rasterizer->ReleaseFences();
+ continue;
+ }
+ } else if (op == GpuSemaphoreOperation::AcquireMask) {
+ if (word && regs.semaphore_sequence == 0) {
+ rasterizer->ReleaseFences();
+ continue;
+ }
+ } else {
+ LOG_ERROR(HW_GPU, "Invalid semaphore operation");
+ }
+ } while (false);
+ }
+}
+
+void Puller::ProcessSemaphoreRelease() {
+ const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
+ const u32 payload = regs.semaphore_release;
+ std::function<void()> operation([this, sequence_address, payload] {
+ memory_manager.Write<u32>(sequence_address, payload);
+ });
+ rasterizer->SyncOperation(std::move(operation));
+}
+
+void Puller::ProcessSemaphoreAcquire() {
+ u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
+ const auto value = regs.semaphore_acquire;
+ while (word != value) {
+ regs.acquire_active = true;
+ regs.acquire_value = value;
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ rasterizer->ReleaseFences();
+ word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
+ // TODO(kemathe73) figure out how to do the acquire_timeout
+ regs.acquire_mode = false;
+ regs.acquire_source = false;
+ }
+}
+
+/// Calls a GPU puller method.
+void Puller::CallPullerMethod(const MethodCall& method_call) {
+ regs.reg_array[method_call.method] = method_call.argument;
+ const auto method = static_cast<BufferMethods>(method_call.method);
+
+ switch (method) {
+ case BufferMethods::BindObject: {
+ ProcessBindMethod(method_call);
+ break;
+ }
+ case BufferMethods::Nop:
+ case BufferMethods::SemaphoreAddressHigh:
+ case BufferMethods::SemaphoreAddressLow:
+ case BufferMethods::SemaphoreSequencePayload:
+ case BufferMethods::SyncpointPayload:
+ break;
+ case BufferMethods::WrcacheFlush:
+ case BufferMethods::RefCnt:
+ rasterizer->SignalReference();
+ break;
+ case BufferMethods::SyncpointOperation:
+ ProcessFenceActionMethod();
+ break;
+ case BufferMethods::WaitForIdle:
+ rasterizer->WaitForIdle();
+ break;
+ case BufferMethods::SemaphoreOperation: {
+ ProcessSemaphoreTriggerMethod();
+ break;
+ }
+ case BufferMethods::NonStallInterrupt: {
+ LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented");
+ break;
+ }
+ case BufferMethods::MemOpA: {
+ LOG_ERROR(HW_GPU, "Memory Operation A");
+ break;
+ }
+ case BufferMethods::MemOpB: {
+ // Implement this better.
+ rasterizer->InvalidateGPUCache();
+ break;
+ }
+ case BufferMethods::MemOpC:
+ case BufferMethods::MemOpD: {
+ LOG_ERROR(HW_GPU, "Memory Operation C,D");
+ break;
+ }
+ case BufferMethods::SemaphoreAcquire: {
+ ProcessSemaphoreAcquire();
+ break;
+ }
+ case BufferMethods::SemaphoreRelease: {
+ ProcessSemaphoreRelease();
+ break;
+ }
+ case BufferMethods::Yield: {
+ // TODO(Kmather73): Research and implement this method.
+ LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
+ break;
+ }
+ default:
+ LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
+ break;
+ }
+}
+
+/// Calls a GPU engine method.
+void Puller::CallEngineMethod(const MethodCall& method_call) {
+ const EngineID engine = bound_engines[method_call.subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::MAXWELL_B:
+ channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument,
+ method_call.IsLastCall());
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
+/// Calls a GPU engine multivalue method.
+void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ const EngineID engine = bound_engines[subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_B:
+ channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
+/// Calls a GPU method.
+void Puller::CallMethod(const MethodCall& method_call) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
+ method_call.subchannel);
+
+ ASSERT(method_call.subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method_call.method)) {
+ CallEngineMethod(method_call);
+ } else {
+ CallPullerMethod(method_call);
+ }
+}
+
+/// Calls a GPU multivalue method.
+void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+
+ ASSERT(subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method)) {
+ CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
+ } else {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallPullerMethod(MethodCall{
+ method,
+ base_start[i],
+ subchannel,
+ methods_pending - static_cast<u32>(i),
+ });
+ }
+ }
+}
+
+void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
+ rasterizer = rasterizer_;
+}
+
+/// Determines where the method should be executed.
+[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) {
+ const auto buffer_method = static_cast<BufferMethods>(method);
+ return buffer_method >= BufferMethods::NonPullerMethods;
+}
+
+} // namespace Tegra::Engines
diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h
new file mode 100644
index 000000000..d4175ee94
--- /dev/null
+++ b/src/video_core/engines/puller.h
@@ -0,0 +1,177 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <array>
+#include <cstddef>
+#include <vector>
+#include "common/bit_field.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "video_core/engines/engine_interface.h"
+
+namespace Core {
+class System;
+}
+
+namespace Tegra {
+class MemoryManager;
+class DmaPusher;
+
+enum class EngineID {
+ FERMI_TWOD_A = 0x902D, // 2D Engine
+ MAXWELL_B = 0xB197, // 3D Engine
+ KEPLER_COMPUTE_B = 0xB1C0,
+ KEPLER_INLINE_TO_MEMORY_B = 0xA140,
+ MAXWELL_DMA_COPY_A = 0xB0B5,
+};
+
+namespace Control {
+struct ChannelState;
+}
+} // namespace Tegra
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace Tegra::Engines {
+
+class Puller final {
+public:
+ struct MethodCall {
+ u32 method{};
+ u32 argument{};
+ u32 subchannel{};
+ u32 method_count{};
+
+ explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
+ : method(method_), argument(argument_), subchannel(subchannel_),
+ method_count(method_count_) {}
+
+ [[nodiscard]] bool IsLastCall() const {
+ return method_count <= 1;
+ }
+ };
+
+ enum class FenceOperation : u32 {
+ Acquire = 0,
+ Increment = 1,
+ };
+
+ union FenceAction {
+ u32 raw;
+ BitField<0, 1, FenceOperation> op;
+ BitField<8, 24, u32> syncpoint_id;
+ };
+
+ explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher,
+ Control::ChannelState& channel_state);
+ ~Puller();
+
+ void CallMethod(const MethodCall& method_call);
+
+ void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
+ void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
+
+ void CallPullerMethod(const MethodCall& method_call);
+
+ void CallEngineMethod(const MethodCall& method_call);
+
+ void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
+private:
+ Tegra::GPU& gpu;
+
+ MemoryManager& memory_manager;
+ DmaPusher& dma_pusher;
+ Control::ChannelState& channel_state;
+ VideoCore::RasterizerInterface* rasterizer = nullptr;
+
+ static constexpr std::size_t NUM_REGS = 0x800;
+ struct Regs {
+ static constexpr size_t NUM_REGS = 0x40;
+
+ union {
+ struct {
+ INSERT_PADDING_WORDS_NOINIT(0x4);
+ struct {
+ u32 address_high;
+ u32 address_low;
+
+ [[nodiscard]] GPUVAddr SemaphoreAddress() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ } semaphore_address;
+
+ u32 semaphore_sequence;
+ u32 semaphore_trigger;
+ INSERT_PADDING_WORDS_NOINIT(0xC);
+
+ // The pusher and the puller share the reference counter, the pusher only has read
+ // access
+ u32 reference_count;
+ INSERT_PADDING_WORDS_NOINIT(0x5);
+
+ u32 semaphore_acquire;
+ u32 semaphore_release;
+ u32 fence_value;
+ FenceAction fence_action;
+ INSERT_PADDING_WORDS_NOINIT(0xE2);
+
+ // Puller state
+ u32 acquire_mode;
+ u32 acquire_source;
+ u32 acquire_active;
+ u32 acquire_timeout;
+ u32 acquire_value;
+ };
+ std::array<u32, NUM_REGS> reg_array;
+ };
+ } regs{};
+
+ void ProcessBindMethod(const MethodCall& method_call);
+ void ProcessFenceActionMethod();
+ void ProcessSemaphoreAcquire();
+ void ProcessSemaphoreRelease();
+ void ProcessSemaphoreTriggerMethod();
+ [[nodiscard]] bool ExecuteMethodOnEngine(u32 method);
+
+ /// Mapping of command subchannels to their bound engine ids
+ std::array<EngineID, 8> bound_engines{};
+
+ enum class GpuSemaphoreOperation {
+ AcquireEqual = 0x1,
+ WriteLong = 0x2,
+ AcquireGequal = 0x4,
+ AcquireMask = 0x8,
+ };
+
+#define ASSERT_REG_POSITION(field_name, position) \
+ static_assert(offsetof(Regs, field_name) == position * 4, \
+ "Field " #field_name " has invalid position")
+
+ ASSERT_REG_POSITION(semaphore_address, 0x4);
+ ASSERT_REG_POSITION(semaphore_sequence, 0x6);
+ ASSERT_REG_POSITION(semaphore_trigger, 0x7);
+ ASSERT_REG_POSITION(reference_count, 0x14);
+ ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
+ ASSERT_REG_POSITION(semaphore_release, 0x1B);
+ ASSERT_REG_POSITION(fence_value, 0x1C);
+ ASSERT_REG_POSITION(fence_action, 0x1D);
+
+ ASSERT_REG_POSITION(acquire_mode, 0x100);
+ ASSERT_REG_POSITION(acquire_source, 0x101);
+ ASSERT_REG_POSITION(acquire_active, 0x102);
+ ASSERT_REG_POSITION(acquire_timeout, 0x103);
+ ASSERT_REG_POSITION(acquire_value, 0x104);
+
+#undef ASSERT_REG_POSITION
+};
+
+} // namespace Tegra::Engines
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 1e9832ddd..c390ac91b 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -4,40 +4,24 @@
#pragma once
#include <algorithm>
+#include <cstring>
+#include <deque>
+#include <functional>
+#include <memory>
#include <queue>
#include "common/common_types.h"
#include "video_core/delayed_destruction_ring.h"
#include "video_core/gpu.h"
-#include "video_core/memory_manager.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
#include "video_core/rasterizer_interface.h"
namespace VideoCommon {
class FenceBase {
public:
- explicit FenceBase(u32 payload_, bool is_stubbed_)
- : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {}
-
- explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_)
- : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {}
-
- GPUVAddr GetAddress() const {
- return address;
- }
-
- u32 GetPayload() const {
- return payload;
- }
-
- bool IsSemaphore() const {
- return is_semaphore;
- }
-
-private:
- GPUVAddr address;
- u32 payload;
- bool is_semaphore;
+ explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {}
protected:
bool is_stubbed;
@@ -57,30 +41,28 @@ public:
buffer_cache.AccumulateFlushes();
}
- void SignalSemaphore(GPUVAddr addr, u32 value) {
+ void SyncOperation(std::function<void()>&& func) {
+ uncommitted_operations.emplace_back(std::move(func));
+ }
+
+ void SignalFence(std::function<void()>&& func) {
TryReleasePendingFences();
const bool should_flush = ShouldFlush();
CommitAsyncFlushes();
- TFence new_fence = CreateFence(addr, value, !should_flush);
+ uncommitted_operations.emplace_back(std::move(func));
+ CommitOperations();
+ TFence new_fence = CreateFence(!should_flush);
fences.push(new_fence);
QueueFence(new_fence);
if (should_flush) {
rasterizer.FlushCommands();
}
- rasterizer.SyncGuestHost();
}
void SignalSyncPoint(u32 value) {
- TryReleasePendingFences();
- const bool should_flush = ShouldFlush();
- CommitAsyncFlushes();
- TFence new_fence = CreateFence(value, !should_flush);
- fences.push(new_fence);
- QueueFence(new_fence);
- if (should_flush) {
- rasterizer.FlushCommands();
- }
- rasterizer.SyncGuestHost();
+ syncpoint_manager.IncrementGuest(value);
+ std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); });
+ SignalFence(std::move(func));
}
void WaitPendingFences() {
@@ -90,11 +72,10 @@ public:
WaitFence(current_fence);
}
PopAsyncFlushes();
- if (current_fence->IsSemaphore()) {
- gpu_memory.template Write<u32>(current_fence->GetAddress(),
- current_fence->GetPayload());
- } else {
- gpu.IncrementSyncPoint(current_fence->GetPayload());
+ auto operations = std::move(pending_operations.front());
+ pending_operations.pop_front();
+ for (auto& operation : operations) {
+ operation();
}
PopFence();
}
@@ -104,16 +85,14 @@ protected:
explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
TTextureCache& texture_cache_, TTBufferCache& buffer_cache_,
TQueryCache& query_cache_)
- : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()},
+ : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()},
texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {}
virtual ~FenceManager() = default;
- /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
+ /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is
/// true
- virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
- /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
- virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
+ virtual TFence CreateFence(bool is_stubbed) = 0;
/// Queues a fence into the backend if the fence isn't stubbed.
virtual void QueueFence(TFence& fence) = 0;
/// Notifies that the backend fence has been signaled/reached in host GPU.
@@ -123,7 +102,7 @@ protected:
VideoCore::RasterizerInterface& rasterizer;
Tegra::GPU& gpu;
- Tegra::MemoryManager& gpu_memory;
+ Tegra::Host1x::SyncpointManager& syncpoint_manager;
TTextureCache& texture_cache;
TTBufferCache& buffer_cache;
TQueryCache& query_cache;
@@ -136,11 +115,10 @@ private:
return;
}
PopAsyncFlushes();
- if (current_fence->IsSemaphore()) {
- gpu_memory.template Write<u32>(current_fence->GetAddress(),
- current_fence->GetPayload());
- } else {
- gpu.IncrementSyncPoint(current_fence->GetPayload());
+ auto operations = std::move(pending_operations.front());
+ pending_operations.pop_front();
+ for (auto& operation : operations) {
+ operation();
}
PopFence();
}
@@ -159,16 +137,20 @@ private:
}
void PopAsyncFlushes() {
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- texture_cache.PopAsyncFlushes();
- buffer_cache.PopAsyncFlushes();
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.PopAsyncFlushes();
+ buffer_cache.PopAsyncFlushes();
+ }
query_cache.PopAsyncFlushes();
}
void CommitAsyncFlushes() {
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- texture_cache.CommitAsyncFlushes();
- buffer_cache.CommitAsyncFlushes();
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.CommitAsyncFlushes();
+ buffer_cache.CommitAsyncFlushes();
+ }
query_cache.CommitAsyncFlushes();
}
@@ -177,7 +159,13 @@ private:
fences.pop();
}
+ void CommitOperations() {
+ pending_operations.emplace_back(std::move(uncommitted_operations));
+ }
+
std::queue<TFence> fences;
+ std::deque<std::function<void()>> uncommitted_operations;
+ std::deque<std::deque<std::function<void()>>> pending_operations;
DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 33431f2a0..28b38273e 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -14,10 +14,11 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/frontend/emu_window.h"
-#include "core/hardware_interrupt_manager.h"
#include "core/hle/service/nvdrv/nvdata.h"
#include "core/perf_stats.h"
#include "video_core/cdma_pusher.h"
+#include "video_core/control/channel_state.h"
+#include "video_core/control/scheduler.h"
#include "video_core/dma_pusher.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_compute.h"
@@ -26,75 +27,64 @@
#include "video_core/engines/maxwell_dma.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_base.h"
#include "video_core/shader_notify.h"
namespace Tegra {
-MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
-
struct GPU::Impl {
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
- : gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>(
- system)},
- dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_},
- maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
- fermi_2d{std::make_unique<Engines::Fermi2D>()},
- kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
- maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)},
- kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)},
+ : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
- gpu_thread{system_, is_async_} {}
+ gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {}
~Impl() = default;
- /// Binds a renderer to the GPU.
- void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
- renderer = std::move(renderer_);
- rasterizer = renderer->ReadRasterizer();
-
- memory_manager->BindRasterizer(rasterizer);
- maxwell_3d->BindRasterizer(rasterizer);
- fermi_2d->BindRasterizer(rasterizer);
- kepler_compute->BindRasterizer(rasterizer);
- kepler_memory->BindRasterizer(rasterizer);
- maxwell_dma->BindRasterizer(rasterizer);
+ std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
+ auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
+ channels.emplace(channel_id, channel_state);
+ scheduler->DeclareChannel(channel_state);
+ return channel_state;
}
- /// Calls a GPU method.
- void CallMethod(const GPU::MethodCall& method_call) {
- LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
- method_call.subchannel);
+ void BindChannel(s32 channel_id) {
+ if (bound_channel == channel_id) {
+ return;
+ }
+ auto it = channels.find(channel_id);
+ ASSERT(it != channels.end());
+ bound_channel = channel_id;
+ current_channel = it->second.get();
- ASSERT(method_call.subchannel < bound_engines.size());
+ rasterizer->BindChannel(*current_channel);
+ }
- if (ExecuteMethodOnEngine(method_call.method)) {
- CallEngineMethod(method_call);
- } else {
- CallPullerMethod(method_call);
- }
+ std::shared_ptr<Control::ChannelState> AllocateChannel() {
+ return CreateChannel(new_channel_id++);
}
- /// Calls a GPU multivalue method.
- void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending) {
- LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+ void InitChannel(Control::ChannelState& to_init) {
+ to_init.Init(system, gpu);
+ to_init.BindRasterizer(rasterizer);
+ rasterizer->InitializeChannel(to_init);
+ }
- ASSERT(subchannel < bound_engines.size());
+ void InitAddressSpace(Tegra::MemoryManager& memory_manager) {
+ memory_manager.BindRasterizer(rasterizer);
+ }
- if (ExecuteMethodOnEngine(method)) {
- CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
- } else {
- for (std::size_t i = 0; i < amount; i++) {
- CallPullerMethod(GPU::MethodCall{
- method,
- base_start[i],
- subchannel,
- methods_pending - static_cast<u32>(i),
- });
- }
- }
+ void ReleaseChannel(Control::ChannelState& to_release) {
+ UNIMPLEMENTED();
+ }
+
+ /// Binds a renderer to the GPU.
+ void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
+ renderer = std::move(renderer_);
+ rasterizer = renderer->ReadRasterizer();
+ host1x.MemoryManager().BindRasterizer(rasterizer);
}
/// Flush all current written commands into the host GPU for execution.
@@ -103,85 +93,82 @@ struct GPU::Impl {
}
/// Synchronizes CPU writes with Host GPU memory.
- void SyncGuestHost() {
- rasterizer->SyncGuestHost();
+ void InvalidateGPUCache() {
+ rasterizer->InvalidateGPUCache();
}
/// Signal the ending of command list.
void OnCommandListEnd() {
- if (is_async) {
- // This command only applies to asynchronous GPU mode
- gpu_thread.OnCommandListEnd();
- }
+ gpu_thread.OnCommandListEnd();
}
/// Request a host GPU memory flush from the CPU.
- [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) {
- std::unique_lock lck{flush_request_mutex};
- const u64 fence = ++last_flush_fence;
- flush_requests.emplace_back(fence, addr, size);
+ template <typename Func>
+ [[nodiscard]] u64 RequestSyncOperation(Func&& action) {
+ std::unique_lock lck{sync_request_mutex};
+ const u64 fence = ++last_sync_fence;
+ sync_requests.emplace_back(action);
return fence;
}
/// Obtains current flush request fence id.
- [[nodiscard]] u64 CurrentFlushRequestFence() const {
- return current_flush_fence.load(std::memory_order_relaxed);
+ [[nodiscard]] u64 CurrentSyncRequestFence() const {
+ return current_sync_fence.load(std::memory_order_relaxed);
+ }
+
+ void WaitForSyncOperation(const u64 fence) {
+ std::unique_lock lck{sync_request_mutex};
+ sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; });
}
/// Tick pending requests within the GPU.
void TickWork() {
- std::unique_lock lck{flush_request_mutex};
- while (!flush_requests.empty()) {
- auto& request = flush_requests.front();
- const u64 fence = request.fence;
- const VAddr addr = request.addr;
- const std::size_t size = request.size;
- flush_requests.pop_front();
- flush_request_mutex.unlock();
- rasterizer->FlushRegion(addr, size);
- current_flush_fence.store(fence);
- flush_request_mutex.lock();
+ std::unique_lock lck{sync_request_mutex};
+ while (!sync_requests.empty()) {
+ auto request = std::move(sync_requests.front());
+ sync_requests.pop_front();
+ sync_request_mutex.unlock();
+ request();
+ current_sync_fence.fetch_add(1, std::memory_order_release);
+ sync_request_mutex.lock();
+ sync_request_cv.notify_all();
}
}
/// Returns a reference to the Maxwell3D GPU engine.
[[nodiscard]] Engines::Maxwell3D& Maxwell3D() {
- return *maxwell_3d;
+ ASSERT(current_channel);
+ return *current_channel->maxwell_3d;
}
/// Returns a const reference to the Maxwell3D GPU engine.
[[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const {
- return *maxwell_3d;
+ ASSERT(current_channel);
+ return *current_channel->maxwell_3d;
}
/// Returns a reference to the KeplerCompute GPU engine.
[[nodiscard]] Engines::KeplerCompute& KeplerCompute() {
- return *kepler_compute;
+ ASSERT(current_channel);
+ return *current_channel->kepler_compute;
}
/// Returns a reference to the KeplerCompute GPU engine.
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const {
- return *kepler_compute;
- }
-
- /// Returns a reference to the GPU memory manager.
- [[nodiscard]] Tegra::MemoryManager& MemoryManager() {
- return *memory_manager;
- }
-
- /// Returns a const reference to the GPU memory manager.
- [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const {
- return *memory_manager;
+ ASSERT(current_channel);
+ return *current_channel->kepler_compute;
}
/// Returns a reference to the GPU DMA pusher.
[[nodiscard]] Tegra::DmaPusher& DmaPusher() {
- return *dma_pusher;
+ ASSERT(current_channel);
+ return *current_channel->dma_pusher;
}
/// Returns a const reference to the GPU DMA pusher.
[[nodiscard]] const Tegra::DmaPusher& DmaPusher() const {
- return *dma_pusher;
+ ASSERT(current_channel);
+ return *current_channel->dma_pusher;
}
/// Returns a reference to the underlying renderer.
@@ -204,77 +191,6 @@ struct GPU::Impl {
return *shader_notify;
}
- /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
- void WaitFence(u32 syncpoint_id, u32 value) {
- // Synced GPU, is always in sync
- if (!is_async) {
- return;
- }
- if (syncpoint_id == UINT32_MAX) {
- // TODO: Research what this does.
- LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented");
- return;
- }
- MICROPROFILE_SCOPE(GPU_wait);
- std::unique_lock lock{sync_mutex};
- sync_cv.wait(lock, [=, this] {
- if (shutting_down.load(std::memory_order_relaxed)) {
- // We're shutting down, ensure no threads continue to wait for the next syncpoint
- return true;
- }
- return syncpoints.at(syncpoint_id).load() >= value;
- });
- }
-
- void IncrementSyncPoint(u32 syncpoint_id) {
- auto& syncpoint = syncpoints.at(syncpoint_id);
- syncpoint++;
- std::scoped_lock lock{sync_mutex};
- sync_cv.notify_all();
- auto& interrupt = syncpt_interrupts.at(syncpoint_id);
- if (!interrupt.empty()) {
- u32 value = syncpoint.load();
- auto it = interrupt.begin();
- while (it != interrupt.end()) {
- if (value >= *it) {
- TriggerCpuInterrupt(syncpoint_id, *it);
- it = interrupt.erase(it);
- continue;
- }
- it++;
- }
- }
- }
-
- [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const {
- return syncpoints.at(syncpoint_id).load();
- }
-
- void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
- std::scoped_lock lock{sync_mutex};
- auto& interrupt = syncpt_interrupts.at(syncpoint_id);
- bool contains = std::any_of(interrupt.begin(), interrupt.end(),
- [value](u32 in_value) { return in_value == value; });
- if (contains) {
- return;
- }
- interrupt.emplace_back(value);
- }
-
- [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
- std::scoped_lock lock{sync_mutex};
- auto& interrupt = syncpt_interrupts.at(syncpoint_id);
- const auto iter =
- std::find_if(interrupt.begin(), interrupt.end(),
- [value](u32 interrupt_value) { return value == interrupt_value; });
-
- if (iter == interrupt.end()) {
- return false;
- }
- interrupt.erase(iter);
- return true;
- }
-
[[nodiscard]] u64 GetTicks() const {
// This values were reversed engineered by fincs from NVN
// The gpu clock is reported in units of 385/625 nanoseconds
@@ -306,7 +222,7 @@ struct GPU::Impl {
/// This can be used to launch any necessary threads and register any necessary
/// core timing events.
void Start() {
- gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher);
+ gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
cpu_context = renderer->GetRenderWindow().CreateSharedContext();
cpu_context->MakeCurrent();
}
@@ -328,8 +244,8 @@ struct GPU::Impl {
}
/// Push GPU command entries to be processed
- void PushGPUEntries(Tegra::CommandList&& entries) {
- gpu_thread.SubmitList(std::move(entries));
+ void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
+ gpu_thread.SubmitList(channel, std::move(entries));
}
/// Push GPU command buffer entries to be processed
@@ -339,7 +255,7 @@ struct GPU::Impl {
}
if (!cdma_pushers.contains(id)) {
- cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu));
+ cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x));
}
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
@@ -376,308 +292,55 @@ struct GPU::Impl {
gpu_thread.FlushAndInvalidateRegion(addr, size);
}
- void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const {
- auto& interrupt_manager = system.InterruptManager();
- interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
- }
-
- void ProcessBindMethod(const GPU::MethodCall& method_call) {
- // Bind the current subchannel to the desired engine id.
- LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
- method_call.argument);
- const auto engine_id = static_cast<EngineID>(method_call.argument);
- bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
- switch (engine_id) {
- case EngineID::FERMI_TWOD_A:
- dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel);
- break;
- case EngineID::MAXWELL_B:
- dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel);
- break;
- case EngineID::KEPLER_COMPUTE_B:
- dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel);
- break;
- case EngineID::MAXWELL_DMA_COPY_A:
- dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel);
- break;
- case EngineID::KEPLER_INLINE_TO_MEMORY_B:
- dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
- }
- }
-
- void ProcessFenceActionMethod() {
- switch (regs.fence_action.op) {
- case GPU::FenceOperation::Acquire:
- WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
- break;
- case GPU::FenceOperation::Increment:
- IncrementSyncPoint(regs.fence_action.syncpoint_id);
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
- }
- }
-
- void ProcessWaitForInterruptMethod() {
- // TODO(bunnei) ImplementMe
- LOG_WARNING(HW_GPU, "(STUBBED) called");
- }
-
- void ProcessSemaphoreTriggerMethod() {
- const auto semaphoreOperationMask = 0xF;
- const auto op =
- static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
- if (op == GpuSemaphoreOperation::WriteLong) {
- struct Block {
- u32 sequence;
- u32 zeros = 0;
- u64 timestamp;
- };
-
- Block block{};
- block.sequence = regs.semaphore_sequence;
- // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
- // CoreTiming
- block.timestamp = GetTicks();
- memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
- sizeof(block));
- } else {
- const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
- if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
- (op == GpuSemaphoreOperation::AcquireGequal &&
- static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
- (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) {
- // Nothing to do in this case
+ void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
+ std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
+ size_t current_request_counter{};
+ {
+ std::unique_lock<std::mutex> lk(request_swap_mutex);
+ if (free_swap_counters.empty()) {
+ current_request_counter = request_swap_counters.size();
+ request_swap_counters.emplace_back(num_fences);
} else {
- regs.acquire_source = true;
- regs.acquire_value = regs.semaphore_sequence;
- if (op == GpuSemaphoreOperation::AcquireEqual) {
- regs.acquire_active = true;
- regs.acquire_mode = false;
- } else if (op == GpuSemaphoreOperation::AcquireGequal) {
- regs.acquire_active = true;
- regs.acquire_mode = true;
- } else if (op == GpuSemaphoreOperation::AcquireMask) {
- // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with
- // semaphore_sequence, gives a non-0 result
- LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented");
- } else {
- LOG_ERROR(HW_GPU, "Invalid semaphore operation");
- }
+ current_request_counter = free_swap_counters.front();
+ request_swap_counters[current_request_counter] = num_fences;
+ free_swap_counters.pop_front();
}
}
- }
-
- void ProcessSemaphoreRelease() {
- memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(),
- regs.semaphore_release);
- }
-
- void ProcessSemaphoreAcquire() {
- const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
- const auto value = regs.semaphore_acquire;
- if (word != value) {
- regs.acquire_active = true;
- regs.acquire_value = value;
- // TODO(kemathe73) figure out how to do the acquire_timeout
- regs.acquire_mode = false;
- regs.acquire_source = false;
- }
- }
-
- /// Calls a GPU puller method.
- void CallPullerMethod(const GPU::MethodCall& method_call) {
- regs.reg_array[method_call.method] = method_call.argument;
- const auto method = static_cast<BufferMethods>(method_call.method);
-
- switch (method) {
- case BufferMethods::BindObject: {
- ProcessBindMethod(method_call);
- break;
- }
- case BufferMethods::Nop:
- case BufferMethods::SemaphoreAddressHigh:
- case BufferMethods::SemaphoreAddressLow:
- case BufferMethods::SemaphoreSequence:
- break;
- case BufferMethods::UnkCacheFlush:
- rasterizer->SyncGuestHost();
- break;
- case BufferMethods::WrcacheFlush:
- rasterizer->SignalReference();
- break;
- case BufferMethods::FenceValue:
- break;
- case BufferMethods::RefCnt:
- rasterizer->SignalReference();
- break;
- case BufferMethods::FenceAction:
- ProcessFenceActionMethod();
- break;
- case BufferMethods::WaitForInterrupt:
- rasterizer->WaitForIdle();
- break;
- case BufferMethods::SemaphoreTrigger: {
- ProcessSemaphoreTriggerMethod();
- break;
- }
- case BufferMethods::NotifyIntr: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
- break;
- }
- case BufferMethods::Unk28: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
- break;
- }
- case BufferMethods::SemaphoreAcquire: {
- ProcessSemaphoreAcquire();
- break;
- }
- case BufferMethods::SemaphoreRelease: {
- ProcessSemaphoreRelease();
- break;
- }
- case BufferMethods::Yield: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
- break;
- }
- default:
- LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
- break;
- }
- }
-
- /// Calls a GPU engine method.
- void CallEngineMethod(const GPU::MethodCall& method_call) {
- const EngineID engine = bound_engines[method_call.subchannel];
-
- switch (engine) {
- case EngineID::FERMI_TWOD_A:
- fermi_2d->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::MAXWELL_B:
- maxwell_3d->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::KEPLER_COMPUTE_B:
- kepler_compute->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::MAXWELL_DMA_COPY_A:
- maxwell_dma->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- case EngineID::KEPLER_INLINE_TO_MEMORY_B:
- kepler_memory->CallMethod(method_call.method, method_call.argument,
- method_call.IsLastCall());
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented engine");
- }
- }
-
- /// Calls a GPU engine multivalue method.
- void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending) {
- const EngineID engine = bound_engines[subchannel];
-
- switch (engine) {
- case EngineID::FERMI_TWOD_A:
- fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::MAXWELL_B:
- maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::KEPLER_COMPUTE_B:
- kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::MAXWELL_DMA_COPY_A:
- maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- case EngineID::KEPLER_INLINE_TO_MEMORY_B:
- kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented engine");
- }
- }
-
- /// Determines where the method should be executed.
- [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) {
- const auto buffer_method = static_cast<BufferMethods>(method);
- return buffer_method >= BufferMethods::NonPullerMethods;
- }
-
- struct Regs {
- static constexpr size_t NUM_REGS = 0x40;
-
- union {
- struct {
- INSERT_PADDING_WORDS_NOINIT(0x4);
- struct {
- u32 address_high;
- u32 address_low;
-
- [[nodiscard]] GPUVAddr SemaphoreAddress() const {
- return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
- address_low);
+ const auto wait_fence =
+ RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] {
+ auto& syncpoint_manager = host1x.GetSyncpointManager();
+ if (num_fences == 0) {
+ renderer->SwapBuffers(framebuffer);
+ }
+ const auto executer = [this, current_request_counter,
+ framebuffer_copy = *framebuffer]() {
+ {
+ std::unique_lock<std::mutex> lk(request_swap_mutex);
+ if (--request_swap_counters[current_request_counter] != 0) {
+ return;
+ }
+ free_swap_counters.push_back(current_request_counter);
}
- } semaphore_address;
-
- u32 semaphore_sequence;
- u32 semaphore_trigger;
- INSERT_PADDING_WORDS_NOINIT(0xC);
-
- // The pusher and the puller share the reference counter, the pusher only has read
- // access
- u32 reference_count;
- INSERT_PADDING_WORDS_NOINIT(0x5);
-
- u32 semaphore_acquire;
- u32 semaphore_release;
- u32 fence_value;
- GPU::FenceAction fence_action;
- INSERT_PADDING_WORDS_NOINIT(0xE2);
-
- // Puller state
- u32 acquire_mode;
- u32 acquire_source;
- u32 acquire_active;
- u32 acquire_timeout;
- u32 acquire_value;
- };
- std::array<u32, NUM_REGS> reg_array;
- };
- } regs{};
+ renderer->SwapBuffers(&framebuffer_copy);
+ };
+ for (size_t i = 0; i < num_fences; i++) {
+ syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
+ }
+ });
+ gpu_thread.TickGPU();
+ WaitForSyncOperation(wait_fence);
+ }
GPU& gpu;
Core::System& system;
- std::unique_ptr<Tegra::MemoryManager> memory_manager;
- std::unique_ptr<Tegra::DmaPusher> dma_pusher;
+ Host1x::Host1x& host1x;
+
std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
std::unique_ptr<VideoCore::RendererBase> renderer;
VideoCore::RasterizerInterface* rasterizer = nullptr;
const bool use_nvdec;
- /// Mapping of command subchannels to their bound engine ids
- std::array<EngineID, 8> bound_engines{};
- /// 3D engine
- std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
- /// 2D engine
- std::unique_ptr<Engines::Fermi2D> fermi_2d;
- /// Compute engine
- std::unique_ptr<Engines::KeplerCompute> kepler_compute;
- /// DMA engine
- std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
- /// Inline memory engine
- std::unique_ptr<Engines::KeplerMemory> kepler_memory;
+ s32 new_channel_id{1};
/// Shader build notifier
std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
@@ -692,51 +355,25 @@ struct GPU::Impl {
std::condition_variable sync_cv;
- struct FlushRequest {
- explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_)
- : fence{fence_}, addr{addr_}, size{size_} {}
- u64 fence;
- VAddr addr;
- std::size_t size;
- };
-
- std::list<FlushRequest> flush_requests;
- std::atomic<u64> current_flush_fence{};
- u64 last_flush_fence{};
- std::mutex flush_request_mutex;
+ std::list<std::function<void()>> sync_requests;
+ std::atomic<u64> current_sync_fence{};
+ u64 last_sync_fence{};
+ std::mutex sync_request_mutex;
+ std::condition_variable sync_request_cv;
const bool is_async;
VideoCommon::GPUThread::ThreadManager gpu_thread;
std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context;
-#define ASSERT_REG_POSITION(field_name, position) \
- static_assert(offsetof(Regs, field_name) == position * 4, \
- "Field " #field_name " has invalid position")
-
- ASSERT_REG_POSITION(semaphore_address, 0x4);
- ASSERT_REG_POSITION(semaphore_sequence, 0x6);
- ASSERT_REG_POSITION(semaphore_trigger, 0x7);
- ASSERT_REG_POSITION(reference_count, 0x14);
- ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
- ASSERT_REG_POSITION(semaphore_release, 0x1B);
- ASSERT_REG_POSITION(fence_value, 0x1C);
- ASSERT_REG_POSITION(fence_action, 0x1D);
-
- ASSERT_REG_POSITION(acquire_mode, 0x100);
- ASSERT_REG_POSITION(acquire_source, 0x101);
- ASSERT_REG_POSITION(acquire_active, 0x102);
- ASSERT_REG_POSITION(acquire_timeout, 0x103);
- ASSERT_REG_POSITION(acquire_value, 0x104);
-
-#undef ASSERT_REG_POSITION
-
- enum class GpuSemaphoreOperation {
- AcquireEqual = 0x1,
- WriteLong = 0x2,
- AcquireGequal = 0x4,
- AcquireMask = 0x8,
- };
+ std::unique_ptr<Tegra::Control::Scheduler> scheduler;
+ std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels;
+ Tegra::Control::ChannelState* current_channel;
+ s32 bound_channel{-1};
+
+ std::deque<size_t> free_swap_counters;
+ std::deque<size_t> request_swap_counters;
+ std::mutex request_swap_mutex;
};
GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
@@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
GPU::~GPU() = default;
-void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
- impl->BindRenderer(std::move(renderer));
+std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
+ return impl->AllocateChannel();
+}
+
+void GPU::InitChannel(Control::ChannelState& to_init) {
+ impl->InitChannel(to_init);
+}
+
+void GPU::BindChannel(s32 channel_id) {
+ impl->BindChannel(channel_id);
}
-void GPU::CallMethod(const MethodCall& method_call) {
- impl->CallMethod(method_call);
+void GPU::ReleaseChannel(Control::ChannelState& to_release) {
+ impl->ReleaseChannel(to_release);
}
-void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending) {
- impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending);
+void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
+ impl->InitAddressSpace(memory_manager);
+}
+
+void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
+ impl->BindRenderer(std::move(renderer));
}
void GPU::FlushCommands() {
impl->FlushCommands();
}
-void GPU::SyncGuestHost() {
- impl->SyncGuestHost();
+void GPU::InvalidateGPUCache() {
+ impl->InvalidateGPUCache();
}
void GPU::OnCommandListEnd() {
@@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() {
}
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
- return impl->RequestFlush(addr, size);
+ return impl->RequestSyncOperation(
+ [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
}
-u64 GPU::CurrentFlushRequestFence() const {
- return impl->CurrentFlushRequestFence();
+u64 GPU::CurrentSyncRequestFence() const {
+ return impl->CurrentSyncRequestFence();
+}
+
+void GPU::WaitForSyncOperation(u64 fence) {
+ return impl->WaitForSyncOperation(fence);
}
void GPU::TickWork() {
impl->TickWork();
}
+/// Gets a mutable reference to the Host1x interface
+Host1x::Host1x& GPU::Host1x() {
+ return impl->host1x;
+}
+
+/// Gets an immutable reference to the Host1x interface.
+const Host1x::Host1x& GPU::Host1x() const {
+ return impl->host1x;
+}
+
Engines::Maxwell3D& GPU::Maxwell3D() {
return impl->Maxwell3D();
}
@@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const {
return impl->KeplerCompute();
}
-Tegra::MemoryManager& GPU::MemoryManager() {
- return impl->MemoryManager();
-}
-
-const Tegra::MemoryManager& GPU::MemoryManager() const {
- return impl->MemoryManager();
-}
-
Tegra::DmaPusher& GPU::DmaPusher() {
return impl->DmaPusher();
}
@@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
return impl->ShaderNotify();
}
-void GPU::WaitFence(u32 syncpoint_id, u32 value) {
- impl->WaitFence(syncpoint_id, value);
-}
-
-void GPU::IncrementSyncPoint(u32 syncpoint_id) {
- impl->IncrementSyncPoint(syncpoint_id);
-}
-
-u32 GPU::GetSyncpointValue(u32 syncpoint_id) const {
- return impl->GetSyncpointValue(syncpoint_id);
-}
-
-void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
- impl->RegisterSyncptInterrupt(syncpoint_id, value);
-}
-
-bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
- return impl->CancelSyncptInterrupt(syncpoint_id, value);
+void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
+ std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
+ impl->RequestSwapBuffers(framebuffer, fences, num_fences);
}
u64 GPU::GetTicks() const {
@@ -881,8 +521,8 @@ void GPU::ReleaseContext() {
impl->ReleaseContext();
}
-void GPU::PushGPUEntries(Tegra::CommandList&& entries) {
- impl->PushGPUEntries(std::move(entries));
+void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
+ impl->PushGPUEntries(channel, std::move(entries));
}
void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index b939ba315..0a4a8b14f 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -89,73 +89,58 @@ class Maxwell3D;
class KeplerCompute;
} // namespace Engines
-enum class EngineID {
- FERMI_TWOD_A = 0x902D, // 2D Engine
- MAXWELL_B = 0xB197, // 3D Engine
- KEPLER_COMPUTE_B = 0xB1C0,
- KEPLER_INLINE_TO_MEMORY_B = 0xA140,
- MAXWELL_DMA_COPY_A = 0xB0B5,
-};
+namespace Control {
+struct ChannelState;
+}
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
class MemoryManager;
class GPU final {
public:
- struct MethodCall {
- u32 method{};
- u32 argument{};
- u32 subchannel{};
- u32 method_count{};
-
- explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
- : method(method_), argument(argument_), subchannel(subchannel_),
- method_count(method_count_) {}
-
- [[nodiscard]] bool IsLastCall() const {
- return method_count <= 1;
- }
- };
-
- enum class FenceOperation : u32 {
- Acquire = 0,
- Increment = 1,
- };
-
- union FenceAction {
- u32 raw;
- BitField<0, 1, FenceOperation> op;
- BitField<8, 24, u32> syncpoint_id;
- };
-
explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
~GPU();
/// Binds a renderer to the GPU.
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
- /// Calls a GPU method.
- void CallMethod(const MethodCall& method_call);
-
- /// Calls a GPU multivalue method.
- void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
- u32 methods_pending);
-
/// Flush all current written commands into the host GPU for execution.
void FlushCommands();
/// Synchronizes CPU writes with Host GPU memory.
- void SyncGuestHost();
+ void InvalidateGPUCache();
/// Signal the ending of command list.
void OnCommandListEnd();
+ std::shared_ptr<Control::ChannelState> AllocateChannel();
+
+ void InitChannel(Control::ChannelState& to_init);
+
+ void BindChannel(s32 channel_id);
+
+ void ReleaseChannel(Control::ChannelState& to_release);
+
+ void InitAddressSpace(Tegra::MemoryManager& memory_manager);
+
/// Request a host GPU memory flush from the CPU.
[[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
/// Obtains current flush request fence id.
- [[nodiscard]] u64 CurrentFlushRequestFence() const;
+ [[nodiscard]] u64 CurrentSyncRequestFence() const;
+
+ void WaitForSyncOperation(u64 fence);
/// Tick pending requests within the GPU.
void TickWork();
+ /// Gets a mutable reference to the Host1x interface
+ [[nodiscard]] Host1x::Host1x& Host1x();
+
+ /// Gets an immutable reference to the Host1x interface.
+ [[nodiscard]] const Host1x::Host1x& Host1x() const;
+
/// Returns a reference to the Maxwell3D GPU engine.
[[nodiscard]] Engines::Maxwell3D& Maxwell3D();
@@ -168,12 +153,6 @@ public:
/// Returns a reference to the KeplerCompute GPU engine.
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
- /// Returns a reference to the GPU memory manager.
- [[nodiscard]] Tegra::MemoryManager& MemoryManager();
-
- /// Returns a const reference to the GPU memory manager.
- [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const;
-
/// Returns a reference to the GPU DMA pusher.
[[nodiscard]] Tegra::DmaPusher& DmaPusher();
@@ -192,17 +171,6 @@ public:
/// Returns a const reference to the shader notifier.
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
- /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
- void WaitFence(u32 syncpoint_id, u32 value);
-
- void IncrementSyncPoint(u32 syncpoint_id);
-
- [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
-
- void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
-
- [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
-
[[nodiscard]] u64 GetTicks() const;
[[nodiscard]] bool IsAsync() const;
@@ -211,6 +179,9 @@ public:
void RendererFrameEndNotify();
+ void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
+ std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences);
+
/// Performs any additional setup necessary in order to begin GPU emulation.
/// This can be used to launch any necessary threads and register any necessary
/// core timing events.
@@ -226,7 +197,7 @@ public:
void ReleaseContext();
/// Push GPU command entries to be processed
- void PushGPUEntries(Tegra::CommandList&& entries);
+ void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
/// Push GPU command buffer entries to be processed
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
@@ -248,7 +219,7 @@ public:
private:
struct Impl;
- std::unique_ptr<Impl> impl;
+ mutable std::unique_ptr<Impl> impl;
};
} // namespace Tegra
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index f0e48cfbd..1bd477011 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -8,6 +8,7 @@
#include "common/thread.h"
#include "core/core.h"
#include "core/frontend/emu_window.h"
+#include "video_core/control/scheduler.h"
#include "video_core/dma_pusher.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
@@ -18,7 +19,7 @@ namespace VideoCommon::GPUThread {
/// Runs the GPU thread
static void RunThread(std::stop_token stop_token, Core::System& system,
VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher, SynchState& state) {
+ Tegra::Control::Scheduler& scheduler, SynchState& state) {
std::string name = "GPU";
MicroProfileOnThreadCreate(name.c_str());
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
@@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
break;
}
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
- dma_pusher.Push(std::move(submit_list->entries));
- dma_pusher.DispatchCalls();
+ scheduler.Push(submit_list->channel, std::move(submit_list->entries));
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
@@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default;
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher) {
+ Tegra::Control::Scheduler& scheduler) {
rasterizer = renderer.ReadRasterizer();
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
- std::ref(dma_pusher), std::ref(state));
+ std::ref(scheduler), std::ref(state));
}
-void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
- PushCommand(SubmitListCommand(std::move(entries)));
+void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
+ PushCommand(SubmitListCommand(channel, std::move(entries)));
}
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
@@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) {
}
auto& gpu = system.GPU();
u64 fence = gpu.RequestFlush(addr, size);
- PushCommand(GPUTickCommand(), true);
- ASSERT(fence <= gpu.CurrentFlushRequestFence());
+ TickGPU();
+ gpu.WaitForSyncOperation(fence);
+}
+
+void ThreadManager::TickGPU() {
+ PushCommand(GPUTickCommand());
}
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 2f8210cb9..64628d3e3 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -15,7 +15,9 @@
namespace Tegra {
struct FramebufferConfig;
-class DmaPusher;
+namespace Control {
+class Scheduler;
+}
} // namespace Tegra
namespace Core {
@@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread {
/// Command to signal to the GPU thread that a command list is ready for processing
struct SubmitListCommand final {
- explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {}
+ explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
+ : channel{channel_}, entries{std::move(entries_)} {}
+ s32 channel;
Tegra::CommandList entries;
};
@@ -112,10 +116,10 @@ public:
/// Creates and starts the GPU thread.
void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
- Tegra::DmaPusher& dma_pusher);
+ Tegra::Control::Scheduler& scheduler);
/// Push GPU command entries to be processed
- void SubmitList(Tegra::CommandList&& entries);
+ void SubmitList(s32 channel, Tegra::CommandList&& entries);
/// Swap buffers (render frame)
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
@@ -131,6 +135,8 @@ public:
void OnCommandListEnd();
+ void TickGPU();
+
private:
/// Pushes a command to be executed by the GPU thread
u64 PushCommand(CommandData&& command_data, bool block = false);
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp
index a5eb97b7f..42e7d6e4f 100644
--- a/src/video_core/command_classes/codecs/codec.cpp
+++ b/src/video_core/host1x/codecs/codec.cpp
@@ -6,11 +6,11 @@
#include <vector>
#include "common/assert.h"
#include "common/settings.h"
-#include "video_core/command_classes/codecs/codec.h"
-#include "video_core/command_classes/codecs/h264.h"
-#include "video_core/command_classes/codecs/vp8.h"
-#include "video_core/command_classes/codecs/vp9.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/codec.h"
+#include "video_core/host1x/codecs/h264.h"
+#include "video_core/host1x/codecs/vp8.h"
+#include "video_core/host1x/codecs/vp9.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
extern "C" {
@@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) {
av_frame_free(&ptr);
}
-Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs)
- : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)),
- vp8_decoder(std::make_unique<Decoder::VP8>(gpu)),
- vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {}
+Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
+ : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
+ vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
+ vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
Codec::~Codec() {
if (!initialized) {
@@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() {
void Codec::Initialize() {
const AVCodecID codec = [&] {
switch (current_codec) {
- case NvdecCommon::VideoCodec::H264:
+ case Host1x::NvdecCommon::VideoCodec::H264:
return AV_CODEC_ID_H264;
- case NvdecCommon::VideoCodec::VP8:
+ case Host1x::NvdecCommon::VideoCodec::VP8:
return AV_CODEC_ID_VP8;
- case NvdecCommon::VideoCodec::VP9:
+ case Host1x::NvdecCommon::VideoCodec::VP9:
return AV_CODEC_ID_VP9;
default:
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
@@ -197,7 +197,7 @@ void Codec::Initialize() {
initialized = true;
}
-void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
+void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
if (current_codec != codec) {
current_codec = codec;
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
@@ -215,11 +215,11 @@ void Codec::Decode() {
bool vp9_hidden_frame = false;
const auto& frame_data = [&]() {
switch (current_codec) {
- case Tegra::NvdecCommon::VideoCodec::H264:
+ case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
return h264_decoder->ComposeFrame(state, is_first_frame);
- case Tegra::NvdecCommon::VideoCodec::VP8:
+ case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
return vp8_decoder->ComposeFrame(state);
- case Tegra::NvdecCommon::VideoCodec::VP9:
+ case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
vp9_decoder->ComposeFrame(state);
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
return vp9_decoder->GetFrameBytes();
@@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() {
return frame;
}
-NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
+Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
return current_codec;
}
std::string_view Codec::GetCurrentCodecName() const {
switch (current_codec) {
- case NvdecCommon::VideoCodec::None:
+ case Host1x::NvdecCommon::VideoCodec::None:
return "None";
- case NvdecCommon::VideoCodec::H264:
+ case Host1x::NvdecCommon::VideoCodec::H264:
return "H264";
- case NvdecCommon::VideoCodec::VP8:
+ case Host1x::NvdecCommon::VideoCodec::VP8:
return "VP8";
- case NvdecCommon::VideoCodec::H265:
+ case Host1x::NvdecCommon::VideoCodec::H265:
return "H265";
- case NvdecCommon::VideoCodec::VP9:
+ case Host1x::NvdecCommon::VideoCodec::VP9:
return "VP9";
default:
return "Unknown";
diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h
index 0c2405465..0d45fb7fe 100644
--- a/src/video_core/command_classes/codecs/codec.h
+++ b/src/video_core/host1x/codecs/codec.h
@@ -6,8 +6,8 @@
#include <memory>
#include <string_view>
#include <queue>
-
-#include "video_core/command_classes/nvdec_common.h"
+#include "common/common_types.h"
+#include "video_core/host1x/nvdec_common.h"
extern "C" {
#if defined(__GNUC__) || defined(__clang__)
@@ -21,7 +21,6 @@ extern "C" {
}
namespace Tegra {
-class GPU;
void AVFrameDeleter(AVFrame* ptr);
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
@@ -32,16 +31,20 @@ class VP8;
class VP9;
} // namespace Decoder
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
class Codec {
public:
- explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs);
+ explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
~Codec();
/// Initialize the codec, returning success or failure
void Initialize();
/// Sets NVDEC video stream codec
- void SetTargetCodec(NvdecCommon::VideoCodec codec);
+ void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
/// Call decoders to construct headers, decode AVFrame with ffmpeg
void Decode();
@@ -50,7 +53,7 @@ public:
[[nodiscard]] AVFramePtr GetCurrentFrame();
/// Returns the value of current_codec
- [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const;
+ [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
/// Return name of the current codec
[[nodiscard]] std::string_view GetCurrentCodecName() const;
@@ -63,14 +66,14 @@ private:
bool CreateGpuAvDevice();
bool initialized{};
- NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None};
+ Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
const AVCodec* av_codec{nullptr};
AVCodecContext* av_codec_ctx{nullptr};
AVBufferRef* av_gpu_decoder{nullptr};
- GPU& gpu;
- const NvdecCommon::NvdecRegisters& state;
+ Host1x::Host1x& host1x;
+ const Host1x::NvdecCommon::NvdecRegisters& state;
std::unique_ptr<Decoder::H264> h264_decoder;
std::unique_ptr<Decoder::VP8> vp8_decoder;
std::unique_ptr<Decoder::VP9> vp9_decoder;
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index e2acd54d4..e87bd65fa 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -5,8 +5,8 @@
#include <bit>
#include "common/settings.h"
-#include "video_core/command_classes/codecs/h264.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/h264.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
@@ -24,19 +24,20 @@ constexpr std::array<u8, 16> zig_zag_scan{
};
} // Anonymous namespace
-H264::H264(GPU& gpu_) : gpu(gpu_) {}
+H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
H264::~H264() = default;
-const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state,
+const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
bool is_first_frame) {
H264DecoderContext context;
- gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext));
+ host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
+ sizeof(H264DecoderContext));
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
if (!is_first_frame && frame_number != 0) {
frame.resize(context.stream_len);
- gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
+ host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
return frame;
}
@@ -155,8 +156,8 @@ const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& sta
frame.resize(encoded_header.size() + context.stream_len);
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
- gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset,
- frame.data() + encoded_header.size(), context.stream_len);
+ host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
+ frame.data() + encoded_header.size(), context.stream_len);
return frame;
}
diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h
index 261574364..5cc86454e 100644
--- a/src/video_core/command_classes/codecs/h264.h
+++ b/src/video_core/host1x/codecs/h264.h
@@ -8,10 +8,14 @@
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
-#include "video_core/command_classes/nvdec_common.h"
+#include "video_core/host1x/nvdec_common.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
namespace Decoder {
class H264BitWriter {
@@ -55,16 +59,16 @@ private:
class H264 {
public:
- explicit H264(GPU& gpu);
+ explicit H264(Host1x::Host1x& host1x);
~H264();
/// Compose the H264 frame for FFmpeg decoding
- [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state,
- bool is_first_frame = false);
+ [[nodiscard]] const std::vector<u8>& ComposeFrame(
+ const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
private:
std::vector<u8> frame;
- GPU& gpu;
+ Host1x::Host1x& host1x;
struct H264ParameterSet {
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp
index c83b9bbc2..28fb12cb8 100644
--- a/src/video_core/command_classes/codecs/vp8.cpp
+++ b/src/video_core/host1x/codecs/vp8.cpp
@@ -3,18 +3,18 @@
#include <vector>
-#include "video_core/command_classes/codecs/vp8.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/vp8.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
-VP8::VP8(GPU& gpu_) : gpu(gpu_) {}
+VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
VP8::~VP8() = default;
-const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) {
+const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
VP8PictureInfo info;
- gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
+ host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
const bool is_key_frame = info.key_frame == 1u;
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
@@ -45,7 +45,7 @@ const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& stat
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
}
const u64 bitstream_offset = state.frame_bitstream_offset;
- gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
+ host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
return frame;
}
diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h
index 3357667b0..5bf07ecab 100644
--- a/src/video_core/command_classes/codecs/vp8.h
+++ b/src/video_core/host1x/codecs/vp8.h
@@ -8,23 +8,28 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
-#include "video_core/command_classes/nvdec_common.h"
+#include "video_core/host1x/nvdec_common.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
namespace Decoder {
class VP8 {
public:
- explicit VP8(GPU& gpu);
+ explicit VP8(Host1x::Host1x& host1x);
~VP8();
/// Compose the VP8 frame for FFmpeg decoding
- [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state);
+ [[nodiscard]] const std::vector<u8>& ComposeFrame(
+ const Host1x::NvdecCommon::NvdecRegisters& state);
private:
std::vector<u8> frame;
- GPU& gpu;
+ Host1x::Host1x& host1x;
struct VP8PictureInfo {
INSERT_PADDING_WORDS_NOINIT(14);
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp
index c01431441..cf40c9012 100644
--- a/src/video_core/command_classes/codecs/vp9.cpp
+++ b/src/video_core/host1x/codecs/vp9.cpp
@@ -4,8 +4,8 @@
#include <algorithm> // for std::copy
#include <numeric>
#include "common/assert.h"
-#include "video_core/command_classes/codecs/vp9.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/codecs/vp9.h"
+#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
@@ -236,7 +236,7 @@ constexpr std::array<u8, 254> map_lut{
}
} // Anonymous namespace
-VP9::VP9(GPU& gpu_) : gpu{gpu_} {}
+VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {}
VP9::~VP9() = default;
@@ -355,9 +355,9 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
}
}
-Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) {
+Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
PictureInfo picture_info;
- gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
+ host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
Vp9PictureInfo vp9_info = picture_info.Convert();
InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
@@ -372,18 +372,19 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state)
void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
EntropyProbs entropy;
- gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
+ host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
entropy.Convert(dst);
}
-Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) {
+Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
Vp9FrameContainer current_frame{};
{
- gpu.SyncGuestHost();
+ // gpu.SyncGuestHost(); epic, why?
current_frame.info = GetVp9PictureInfo(state);
current_frame.bit_stream.resize(current_frame.info.bitstream_size);
- gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
- current_frame.info.bitstream_size);
+ host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
+ current_frame.bit_stream.data(),
+ current_frame.info.bitstream_size);
}
if (!next_frame.bit_stream.empty()) {
Vp9FrameContainer temp{
@@ -769,7 +770,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
return uncomp_writer;
}
-void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) {
+void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
std::vector<u8> bitstream;
{
Vp9FrameContainer curr_frame = GetCurrentFrame(state);
diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h
index ecc40e8b1..d4083e8d3 100644
--- a/src/video_core/command_classes/codecs/vp9.h
+++ b/src/video_core/host1x/codecs/vp9.h
@@ -8,11 +8,15 @@
#include "common/common_types.h"
#include "common/stream.h"
-#include "video_core/command_classes/codecs/vp9_types.h"
-#include "video_core/command_classes/nvdec_common.h"
+#include "video_core/host1x/codecs/vp9_types.h"
+#include "video_core/host1x/nvdec_common.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+class Host1x;
+} // namespace Host1x
+
namespace Decoder {
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
@@ -106,7 +110,7 @@ private:
class VP9 {
public:
- explicit VP9(GPU& gpu_);
+ explicit VP9(Host1x::Host1x& host1x);
~VP9();
VP9(const VP9&) = delete;
@@ -117,7 +121,7 @@ public:
/// Composes the VP9 frame from the GPU state information.
/// Based on the official VP9 spec documentation
- void ComposeFrame(const NvdecCommon::NvdecRegisters& state);
+ void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
/// Returns true if the most recent frame was a hidden frame.
[[nodiscard]] bool WasFrameHidden() const {
@@ -162,19 +166,21 @@ private:
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
/// Returns VP9 information from NVDEC provided offset and size
- [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state);
+ [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
+ const Host1x::NvdecCommon::NvdecRegisters& state);
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
/// Returns frame to be decoded after buffering
- [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state);
+ [[nodiscard]] Vp9FrameContainer GetCurrentFrame(
+ const Host1x::NvdecCommon::NvdecRegisters& state);
/// Use NVDEC providied information to compose the headers for the current frame
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
- GPU& gpu;
+ Host1x::Host1x& host1x;
std::vector<u8> frame;
std::array<s8, 4> loop_filter_ref_deltas{};
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h
index bb3d8df6e..adad8ed7e 100644
--- a/src/video_core/command_classes/codecs/vp9_types.h
+++ b/src/video_core/host1x/codecs/vp9_types.h
@@ -9,7 +9,6 @@
#include "common/common_types.h"
namespace Tegra {
-class GPU;
namespace Decoder {
struct Vp9FrameDimensions {
diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp
new file mode 100644
index 000000000..dceefdb7f
--- /dev/null
+++ b/src/video_core/host1x/control.cpp
@@ -0,0 +1,33 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/assert.h"
+#include "video_core/host1x/control.h"
+#include "video_core/host1x/host1x.h"
+
+namespace Tegra::Host1x {
+
+Control::Control(Host1x& host1x_) : host1x(host1x_) {}
+
+Control::~Control() = default;
+
+void Control::ProcessMethod(Method method, u32 argument) {
+ switch (method) {
+ case Method::LoadSyncptPayload32:
+ syncpoint_value = argument;
+ break;
+ case Method::WaitSyncpt:
+ case Method::WaitSyncpt32:
+ Execute(argument);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
+ break;
+ }
+}
+
+void Control::Execute(u32 data) {
+ host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
+}
+
+} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h
index bb48a4381..e117888a3 100644
--- a/src/video_core/command_classes/host1x.h
+++ b/src/video_core/host1x/control.h
@@ -1,15 +1,19 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
#include "common/common_types.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
class Nvdec;
-class Host1x {
+class Control {
public:
enum class Method : u32 {
WaitSyncpt = 0x8,
@@ -17,8 +21,8 @@ public:
WaitSyncpt32 = 0x50,
};
- explicit Host1x(GPU& gpu);
- ~Host1x();
+ explicit Control(Host1x& host1x);
+ ~Control();
/// Writes the method into the state, Invoke Execute() if encountered
void ProcessMethod(Method method, u32 argument);
@@ -28,7 +32,9 @@ private:
void Execute(u32 data);
u32 syncpoint_value{};
- GPU& gpu;
+ Host1x& host1x;
};
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp
new file mode 100644
index 000000000..7c317a85d
--- /dev/null
+++ b/src/video_core/host1x/host1x.cpp
@@ -0,0 +1,17 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/core.h"
+#include "video_core/host1x/host1x.h"
+
+namespace Tegra {
+
+namespace Host1x {
+
+Host1x::Host1x(Core::System& system_)
+ : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
+ allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h
new file mode 100644
index 000000000..57082ae54
--- /dev/null
+++ b/src/video_core/host1x/host1x.h
@@ -0,0 +1,57 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+#include "common/address_space.h"
+#include "video_core/host1x/syncpoint_manager.h"
+#include "video_core/memory_manager.h"
+
+namespace Core {
+class System;
+} // namespace Core
+
+namespace Tegra {
+
+namespace Host1x {
+
+class Host1x {
+public:
+ explicit Host1x(Core::System& system);
+
+ SyncpointManager& GetSyncpointManager() {
+ return syncpoint_manager;
+ }
+
+ const SyncpointManager& GetSyncpointManager() const {
+ return syncpoint_manager;
+ }
+
+ Tegra::MemoryManager& MemoryManager() {
+ return memory_manager;
+ }
+
+ const Tegra::MemoryManager& MemoryManager() const {
+ return memory_manager;
+ }
+
+ Common::FlatAllocator<u32, 0, 32>& Allocator() {
+ return *allocator;
+ }
+
+ const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
+ return *allocator;
+ }
+
+private:
+ Core::System& system;
+ SyncpointManager syncpoint_manager;
+ Tegra::MemoryManager memory_manager;
+ std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
+};
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp
index 4fbbe3da6..a4bd5b79f 100644
--- a/src/video_core/command_classes/nvdec.cpp
+++ b/src/video_core/host1x/nvdec.cpp
@@ -2,15 +2,16 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
-#include "video_core/command_classes/nvdec.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/nvdec.h"
-namespace Tegra {
+namespace Tegra::Host1x {
#define NVDEC_REG_INDEX(field_name) \
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
-Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {}
+Nvdec::Nvdec(Host1x& host1x_)
+ : host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
Nvdec::~Nvdec() = default;
@@ -44,4 +45,4 @@ void Nvdec::Execute() {
}
}
-} // namespace Tegra
+} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h
index 488531fc6..3949d5181 100644
--- a/src/video_core/command_classes/nvdec.h
+++ b/src/video_core/host1x/nvdec.h
@@ -6,14 +6,17 @@
#include <memory>
#include <vector>
#include "common/common_types.h"
-#include "video_core/command_classes/codecs/codec.h"
+#include "video_core/host1x/codecs/codec.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
class Nvdec {
public:
- explicit Nvdec(GPU& gpu);
+ explicit Nvdec(Host1x& host1x);
~Nvdec();
/// Writes the method into the state, Invoke Execute() if encountered
@@ -26,8 +29,11 @@ private:
/// Invoke codec to decode a frame
void Execute();
- GPU& gpu;
+ Host1x& host1x;
NvdecCommon::NvdecRegisters state;
std::unique_ptr<Codec> codec;
};
+
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h
index 521e5b52b..49d67ebbe 100644
--- a/src/video_core/command_classes/nvdec_common.h
+++ b/src/video_core/host1x/nvdec_common.h
@@ -7,7 +7,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
-namespace Tegra::NvdecCommon {
+namespace Tegra::Host1x::NvdecCommon {
enum class VideoCodec : u64 {
None = 0x0,
@@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
#undef ASSERT_REG_POSITION
-} // namespace Tegra::NvdecCommon
+} // namespace Tegra::Host1x::NvdecCommon
diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp
index 67e58046f..5ef9ea217 100644
--- a/src/video_core/command_classes/sync_manager.cpp
+++ b/src/video_core/host1x/sync_manager.cpp
@@ -3,10 +3,13 @@
#include <algorithm>
#include "sync_manager.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/syncpoint_manager.h"
namespace Tegra {
-SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {}
+namespace Host1x {
+
+SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
SyncptIncrManager::~SyncptIncrManager() = default;
void SyncptIncrManager::Increment(u32 id) {
@@ -36,8 +39,12 @@ void SyncptIncrManager::IncrementAllDone() {
if (!increments[done_count].complete) {
break;
}
- gpu.IncrementSyncPoint(increments[done_count].syncpt_id);
+ auto& syncpoint_manager = host1x.GetSyncpointManager();
+ syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
+ syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
}
increments.erase(increments.begin(), increments.begin() + done_count);
}
+
+} // namespace Host1x
} // namespace Tegra
diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h
index 6dfaae080..7bb77fa27 100644
--- a/src/video_core/command_classes/sync_manager.h
+++ b/src/video_core/host1x/sync_manager.h
@@ -8,7 +8,11 @@
#include "common/common_types.h"
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
+
struct SyncptIncr {
u32 id;
u32 class_id;
@@ -21,7 +25,7 @@ struct SyncptIncr {
class SyncptIncrManager {
public:
- explicit SyncptIncrManager(GPU& gpu);
+ explicit SyncptIncrManager(Host1x& host1x);
~SyncptIncrManager();
/// Add syncpoint id and increment all
@@ -41,7 +45,9 @@ private:
std::mutex increment_lock;
u32 current_id{};
- GPU& gpu;
+ Host1x& host1x;
};
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp
new file mode 100644
index 000000000..326e8355a
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.cpp
@@ -0,0 +1,96 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/microprofile.h"
+#include "video_core/host1x/syncpoint_manager.h"
+
+namespace Tegra {
+
+namespace Host1x {
+
+MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
+
+SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
+ std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
+ std::function<void()>&& action) {
+ if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
+ action();
+ return {};
+ }
+
+ std::unique_lock lk(guard);
+ if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
+ action();
+ return {};
+ }
+ auto it = action_storage.begin();
+ while (it != action_storage.end()) {
+ if (it->expected_value >= expected_value) {
+ break;
+ }
+ ++it;
+ }
+ return action_storage.emplace(it, expected_value, std::move(action));
+}
+
+void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
+ ActionHandle& handle) {
+ std::unique_lock lk(guard);
+ action_storage.erase(handle);
+}
+
+void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
+ DeregisterAction(guest_action_storage[syncpoint_id], handle);
+}
+
+void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
+ DeregisterAction(host_action_storage[syncpoint_id], handle);
+}
+
+void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
+ Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
+}
+
+void SyncpointManager::IncrementHost(u32 syncpoint_id) {
+ Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
+}
+
+void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
+ Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
+}
+
+void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
+ MICROPROFILE_SCOPE(GPU_wait);
+ Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
+}
+
+void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
+ std::list<RegisteredAction>& action_storage) {
+ auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
+
+ std::unique_lock lk(guard);
+ auto it = action_storage.begin();
+ while (it != action_storage.end()) {
+ if (it->expected_value > new_value) {
+ break;
+ }
+ it->action();
+ it = action_storage.erase(it);
+ }
+ wait_cv.notify_all();
+}
+
+void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
+ u32 expected_value) {
+ const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
+ if (pred()) {
+ return;
+ }
+
+ std::unique_lock lk(guard);
+ wait_cv.wait(lk, pred);
+}
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h
new file mode 100644
index 000000000..50a264e23
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.h
@@ -0,0 +1,98 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <functional>
+#include <list>
+#include <mutex>
+
+#include "common/common_types.h"
+
+namespace Tegra {
+
+namespace Host1x {
+
+class SyncpointManager {
+public:
+ u32 GetGuestSyncpointValue(u32 id) const {
+ return syncpoints_guest[id].load(std::memory_order_acquire);
+ }
+
+ u32 GetHostSyncpointValue(u32 id) const {
+ return syncpoints_host[id].load(std::memory_order_acquire);
+ }
+
+ struct RegisteredAction {
+ explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
+ : expected_value{expected_value_}, action{std::move(action_)} {}
+ u32 expected_value;
+ std::function<void()> action;
+ };
+ using ActionHandle = std::list<RegisteredAction>::iterator;
+
+ template <typename Func>
+ ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
+ std::function<void()> func(action);
+ return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
+ expected_value, std::move(func));
+ }
+
+ template <typename Func>
+ ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
+ std::function<void()> func(action);
+ return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
+ expected_value, std::move(func));
+ }
+
+ void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
+
+ void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
+
+ void IncrementGuest(u32 syncpoint_id);
+
+ void IncrementHost(u32 syncpoint_id);
+
+ void WaitGuest(u32 syncpoint_id, u32 expected_value);
+
+ void WaitHost(u32 syncpoint_id, u32 expected_value);
+
+ bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
+ return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
+ }
+
+ bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
+ return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
+ }
+
+private:
+ void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
+ std::list<RegisteredAction>& action_storage);
+
+ ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
+ std::list<RegisteredAction>& action_storage, u32 expected_value,
+ std::function<void()>&& action);
+
+ void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
+
+ void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
+
+ static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
+
+ std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
+ std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
+
+ std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
+ std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
+
+ std::mutex guard;
+ std::condition_variable wait_guest_cv;
+ std::condition_variable wait_host_cv;
+};
+
+} // namespace Host1x
+
+} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp
index 7c17df353..ac0b7d20e 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -18,14 +18,17 @@ extern "C" {
#include "common/bit_field.h"
#include "common/logging/log.h"
-#include "video_core/command_classes/nvdec.h"
-#include "video_core/command_classes/vic.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
+#include "video_core/host1x/host1x.h"
+#include "video_core/host1x/nvdec.h"
+#include "video_core/host1x/vic.h"
#include "video_core/memory_manager.h"
#include "video_core/textures/decoders.h"
namespace Tegra {
+
+namespace Host1x {
+
namespace {
enum class VideoPixelFormat : u64_le {
RGBA8 = 0x1f,
@@ -46,8 +49,8 @@ union VicConfig {
BitField<46, 14, u64_le> surface_height_minus1;
};
-Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
- : gpu(gpu_),
+Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
+ : host1x(host1x_),
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
Vic::~Vic() = default;
@@ -78,7 +81,7 @@ void Vic::Execute() {
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
return;
}
- const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)};
+ const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
const auto* frame = frame_ptr.get();
if (!frame) {
@@ -153,15 +156,16 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
luma_buffer.resize(size);
- Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
- converted_frame_buf_addr, block_height, 0, 0);
+ std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
+ Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
+ block_height, 0, width * 4);
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
+ host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
} else {
// send pitch linear frame
const size_t linear_size = width * height * 4;
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
- linear_size);
+ host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
+ linear_size);
}
}
@@ -189,8 +193,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
luma_buffer[dst + x] = luma_src[src + x];
}
}
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
- luma_buffer.size());
+ host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
+ luma_buffer.size());
// Chroma
const std::size_t half_height = frame_height / 2;
@@ -231,8 +235,10 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
ASSERT(false);
break;
}
- gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
- chroma_buffer.size());
+ host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
+ chroma_buffer.size());
}
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h
index 010daa6b6..2b78786e8 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/host1x/vic.h
@@ -10,7 +10,10 @@
struct SwsContext;
namespace Tegra {
-class GPU;
+
+namespace Host1x {
+
+class Host1x;
class Nvdec;
union VicConfig;
@@ -25,7 +28,7 @@ public:
SetOutputSurfaceChromaUnusedOffset = 0x1ca
};
- explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor);
+ explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor);
~Vic();
@@ -39,8 +42,8 @@ private:
void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
- GPU& gpu;
- std::shared_ptr<Tegra::Nvdec> nvdec_processor;
+ Host1x& host1x;
+ std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
/// Avoid reallocation of the following buffers every frame, as their
/// size does not change during a stream
@@ -58,4 +61,6 @@ private:
s32 scaler_height{};
};
+} // namespace Host1x
+
} // namespace Tegra
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
index 43f8b5904..f61d5998e 100644
--- a/src/video_core/macro/macro.cpp
+++ b/src/video_core/macro/macro.cpp
@@ -8,6 +8,7 @@
#include <boost/container_hash/hash.hpp>
+#include <fstream>
#include "common/assert.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index bf9eb735d..cca401c74 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -7,6 +7,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/device_memory.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
@@ -16,172 +17,198 @@
namespace Tegra {
-MemoryManager::MemoryManager(Core::System& system_)
- : system{system_}, page_table(page_table_size) {}
+std::atomic<size_t> MemoryManager::unique_identifier_generator{};
+
+MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
+ u64 page_bits_)
+ : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()},
+ address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
+ entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
+ page_bits != big_page_bits ? page_bits : 0},
+ unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} {
+ address_space_size = 1ULL << address_space_bits;
+ page_size = 1ULL << page_bits;
+ page_mask = page_size - 1ULL;
+ big_page_size = 1ULL << big_page_bits;
+ big_page_mask = big_page_size - 1ULL;
+ const u64 page_table_bits = address_space_bits - page_bits;
+ const u64 big_page_table_bits = address_space_bits - big_page_bits;
+ const u64 page_table_size = 1ULL << page_table_bits;
+ const u64 big_page_table_size = 1ULL << big_page_table_bits;
+ page_table_mask = page_table_size - 1;
+ big_page_table_mask = big_page_table_size - 1;
+
+ big_entries.resize(big_page_table_size / 32, 0);
+ big_page_table_cpu.resize(big_page_table_size);
+ big_page_continous.resize(big_page_table_size / continous_bits, 0);
+ entries.resize(page_table_size / 32, 0);
+}
MemoryManager::~MemoryManager() = default;
-void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
- rasterizer = rasterizer_;
-}
-
-GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
- u64 remaining_size{size};
- for (u64 offset{}; offset < size; offset += page_size) {
- if (remaining_size < page_size) {
- SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size);
- } else {
- SetPageEntry(gpu_addr + offset, page_entry + offset);
- }
- remaining_size -= page_size;
+template <bool is_big_page>
+MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const u64 entry_mask = big_entries[position / 32];
+ const size_t sub_index = position % 32;
+ return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
+ } else {
+ position = position >> page_bits;
+ const u64 entry_mask = entries[position / 32];
+ const size_t sub_index = position % 32;
+ return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
}
- return gpu_addr;
}
-GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
- const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
- if (it != map_ranges.end() && it->first == gpu_addr) {
- it->second = size;
+template <bool is_big_page>
+void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
+ if constexpr (is_big_page) {
+ position = position >> big_page_bits;
+ const u64 entry_mask = big_entries[position / 32];
+ const size_t sub_index = position % 32;
+ big_entries[position / 32] =
+ (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
} else {
- map_ranges.insert(it, MapRange{gpu_addr, size});
+ position = position >> page_bits;
+ const u64 entry_mask = entries[position / 32];
+ const size_t sub_index = position % 32;
+ entries[position / 32] =
+ (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
}
- return UpdateRange(gpu_addr, cpu_addr, size);
}
-GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) {
- return Map(cpu_addr, *FindFreeRange(size, align), size);
+inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
+ const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
+ const size_t sub_index = big_page_index % continous_bits;
+ return ((entry_mask >> sub_index) & 0x1ULL) != 0;
}
-GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) {
- const std::optional<GPUVAddr> gpu_addr = FindFreeRange(size, 1, true);
- ASSERT(gpu_addr);
- return Map(cpu_addr, *gpu_addr, size);
+inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) {
+ const u64 continous_mask = big_page_continous[big_page_index / continous_bits];
+ const size_t sub_index = big_page_index % continous_bits;
+ big_page_continous[big_page_index / continous_bits] =
+ (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0);
}
-void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
- if (size == 0) {
- return;
- }
- const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
- if (it != map_ranges.end()) {
- ASSERT(it->first == gpu_addr);
- map_ranges.erase(it);
- } else {
- ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
- }
- const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
-
- for (const auto& [map_addr, map_size] : submapped_ranges) {
- // Flush and invalidate through the GPU interface, to be asynchronous if possible.
- const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
- ASSERT(cpu_addr);
-
- rasterizer->UnmapMemory(*cpu_addr, map_size);
+template <MemoryManager::EntryType entry_type>
+GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
+ size_t size) {
+ u64 remaining_size{size};
+ if constexpr (entry_type == EntryType::Mapped) {
+ page_table.ReserveRange(gpu_addr, size);
}
-
- UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
-}
-
-std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
for (u64 offset{}; offset < size; offset += page_size) {
- if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) {
- return std::nullopt;
+ const GPUVAddr current_gpu_addr = gpu_addr + offset;
+ [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
+ SetEntry<false>(current_gpu_addr, entry_type);
+ if (current_entry_type != entry_type) {
+ rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
+ }
+ if constexpr (entry_type == EntryType::Mapped) {
+ const VAddr current_cpu_addr = cpu_addr + offset;
+ const auto index = PageEntryIndex<false>(current_gpu_addr);
+ const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
+ page_table[index] = sub_value;
}
+ remaining_size -= page_size;
}
-
- return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
-}
-
-GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
- return *AllocateFixed(*FindFreeRange(size, align), size);
+ return gpu_addr;
}
-void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) {
- if (!page_entry.IsValid()) {
- return;
+template <MemoryManager::EntryType entry_type>
+GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
+ size_t size) {
+ u64 remaining_size{size};
+ for (u64 offset{}; offset < size; offset += big_page_size) {
+ const GPUVAddr current_gpu_addr = gpu_addr + offset;
+ [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
+ SetEntry<true>(current_gpu_addr, entry_type);
+ if (current_entry_type != entry_type) {
+ rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
+ }
+ if constexpr (entry_type == EntryType::Mapped) {
+ const VAddr current_cpu_addr = cpu_addr + offset;
+ const auto index = PageEntryIndex<true>(current_gpu_addr);
+ const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
+ big_page_table_cpu[index] = sub_value;
+ const bool is_continous = ([&] {
+ uintptr_t base_ptr{
+ reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
+ if (base_ptr == 0) {
+ return false;
+ }
+ for (VAddr start_cpu = current_cpu_addr + page_size;
+ start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
+ base_ptr += page_size;
+ auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu));
+ if (next_ptr == 0 || base_ptr != next_ptr) {
+ return false;
+ }
+ }
+ return true;
+ })();
+ SetBigPageContinous(index, is_continous);
+ }
+ remaining_size -= big_page_size;
}
-
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
- .IsSuccess());
+ return gpu_addr;
}
-void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
- if (!page_entry.IsValid()) {
- return;
- }
-
- ASSERT(system.CurrentProcess()
- ->PageTable()
- .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
- .IsSuccess());
+void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
+ rasterizer = rasterizer_;
}
-PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
- return page_table[PageEntryIndex(gpu_addr)];
+GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
+ bool is_big_pages) {
+ if (is_big_pages) [[likely]] {
+ return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
+ }
+ return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
}
-void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
- // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to
- // improper tracking, but should be fixed in the future.
-
- //// Unlock the old page
- // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
-
- //// Lock the new page
- // TryLockPage(page_entry, size);
- auto& current_page = page_table[PageEntryIndex(gpu_addr)];
-
- if ((!current_page.IsValid() && page_entry.IsValid()) ||
- current_page.ToAddress() != page_entry.ToAddress()) {
- rasterizer->ModifyGPUMemory(gpu_addr, size);
+GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
+ if (is_big_pages) [[likely]] {
+ return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
}
-
- current_page = page_entry;
+ return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
}
-std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align,
- bool start_32bit_address) const {
- if (!align) {
- align = page_size;
- } else {
- align = Common::AlignUp(align, page_size);
+void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
+ if (size == 0) {
+ return;
}
+ const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
- u64 available_size{};
- GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start};
- while (gpu_addr + available_size < address_space_size) {
- if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) {
- available_size += page_size;
-
- if (available_size >= size) {
- return gpu_addr;
- }
- } else {
- gpu_addr += available_size + page_size;
- available_size = 0;
+ for (const auto& [map_addr, map_size] : submapped_ranges) {
+ // Flush and invalidate through the GPU interface, to be asynchronous if possible.
+ const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
+ ASSERT(cpu_addr);
- const auto remainder{gpu_addr % align};
- if (remainder) {
- gpu_addr = (gpu_addr - remainder) + align;
- }
- }
+ rasterizer->UnmapMemory(*cpu_addr, map_size);
}
- return std::nullopt;
+ BigPageTableOp<EntryType::Free>(gpu_addr, 0, size);
+ PageTableOp<EntryType::Free>(gpu_addr, 0, size);
}
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
- if (gpu_addr == 0) {
+ if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
return std::nullopt;
}
- const auto page_entry{GetPageEntry(gpu_addr)};
- if (!page_entry.IsValid()) {
- return std::nullopt;
+ if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] {
+ if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
+ return std::nullopt;
+ }
+
+ const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
+ << cpu_page_bits;
+ return cpu_addr_base + (gpu_addr & page_mask);
}
- return page_entry.ToAddress() + (gpu_addr & page_mask);
+ const VAddr cpu_addr_base =
+ static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
+ return cpu_addr_base + (gpu_addr & big_page_mask);
}
std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
@@ -189,7 +216,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s
const size_t page_last{(addr + size + page_size - 1) >> page_bits};
while (page_index < page_last) {
const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (page_addr && *page_addr != 0) {
+ if (page_addr) {
return page_addr;
}
++page_index;
@@ -232,126 +259,298 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
- if (!GetPageEntry(gpu_addr).IsValid()) {
- return {};
- }
-
const auto address{GpuToCpuAddress(gpu_addr)};
if (!address) {
return {};
}
- return system.Memory().GetPointer(*address);
+ return memory.GetPointer(*address);
}
const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
- if (!GetPageEntry(gpu_addr).IsValid()) {
- return {};
- }
-
const auto address{GpuToCpuAddress(gpu_addr)};
if (!address) {
return {};
}
- return system.Memory().GetPointer(*address);
-}
-
-size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept {
- auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first);
- --it;
- return it->second - (gpu_addr - it->first);
-}
-
-void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
- bool is_safe) const {
+ return memory.GetPointer(*address);
+}
+
+#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
+#pragma inline_recursion(on)
+#endif
+
+template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
+inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size,
+ FuncMapped&& func_mapped, FuncReserved&& func_reserved,
+ FuncUnmapped&& func_unmapped) const {
+ static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>;
+ static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>;
+ static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>;
+ u64 used_page_size;
+ u64 used_page_mask;
+ u64 used_page_bits;
+ if constexpr (is_big_pages) {
+ used_page_size = big_page_size;
+ used_page_mask = big_page_mask;
+ used_page_bits = big_page_bits;
+ } else {
+ used_page_size = page_size;
+ used_page_mask = page_mask;
+ used_page_bits = page_bits;
+ }
std::size_t remaining_size{size};
- std::size_t page_index{gpu_src_addr >> page_bits};
- std::size_t page_offset{gpu_src_addr & page_mask};
+ std::size_t page_index{gpu_src_addr >> used_page_bits};
+ std::size_t page_offset{gpu_src_addr & used_page_mask};
+ GPUVAddr current_address = gpu_src_addr;
while (remaining_size > 0) {
const std::size_t copy_amount{
- std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (page_addr && *page_addr != 0) {
- const auto src_addr{*page_addr + page_offset};
- if (is_safe) {
- // Flush must happen on the rasterizer interface, such that memory is always
- // synchronous when it is read (even when in asynchronous GPU mode).
- // Fixes Dead Cells title menu.
- rasterizer->FlushRegion(src_addr, copy_amount);
+ std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
+ auto entry = GetEntry<is_big_pages>(current_address);
+ if (entry == EntryType::Mapped) [[likely]] {
+ if constexpr (BOOL_BREAK_MAPPED) {
+ if (func_mapped(page_index, page_offset, copy_amount)) {
+ return;
+ }
+ } else {
+ func_mapped(page_index, page_offset, copy_amount);
}
- system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
- } else {
- std::memset(dest_buffer, 0, copy_amount);
- }
+ } else if (entry == EntryType::Reserved) {
+ if constexpr (BOOL_BREAK_RESERVED) {
+ if (func_reserved(page_index, page_offset, copy_amount)) {
+ return;
+ }
+ } else {
+ func_reserved(page_index, page_offset, copy_amount);
+ }
+
+ } else [[unlikely]] {
+ if constexpr (BOOL_BREAK_UNMAPPED) {
+ if (func_unmapped(page_index, page_offset, copy_amount)) {
+ return;
+ }
+ } else {
+ func_unmapped(page_index, page_offset, copy_amount);
+ }
+ }
page_index++;
page_offset = 0;
- dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
remaining_size -= copy_amount;
+ current_address += copy_amount;
}
}
+template <bool is_safe>
+void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer,
+ std::size_t size) const {
+ auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
+ std::memset(dest_buffer, 0, copy_amount);
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ };
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ }
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(dest_buffer, physical, copy_amount);
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ }
+ if (!IsBigPageContinous(page_index)) [[unlikely]] {
+ memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
+ } else {
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(dest_buffer, physical, copy_amount);
+ }
+ dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
+ };
+ auto read_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, set_to_zero, set_to_zero);
+ };
+ MemoryOperation<true>(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages);
+}
+
void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
- ReadBlockImpl(gpu_src_addr, dest_buffer, size, true);
+ ReadBlockImpl<true>(gpu_src_addr, dest_buffer, size);
}
void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
const std::size_t size) const {
- ReadBlockImpl(gpu_src_addr, dest_buffer, size, false);
+ ReadBlockImpl<false>(gpu_src_addr, dest_buffer, size);
}
-void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
- bool is_safe) {
- std::size_t remaining_size{size};
- std::size_t page_index{gpu_dest_addr >> page_bits};
- std::size_t page_offset{gpu_dest_addr & page_mask};
-
- while (remaining_size > 0) {
- const std::size_t copy_amount{
- std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (page_addr && *page_addr != 0) {
- const auto dest_addr{*page_addr + page_offset};
-
- if (is_safe) {
- // Invalidate must happen on the rasterizer interface, such that memory is always
- // synchronous when it is written (even when in asynchronous GPU mode).
- rasterizer->InvalidateRegion(dest_addr, copy_amount);
- }
- system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
+template <bool is_safe>
+void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer,
+ std::size_t size) {
+ auto just_advance = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
+ src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
+ };
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
}
-
- page_index++;
- page_offset = 0;
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(physical, src_buffer, copy_amount);
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
- remaining_size -= copy_amount;
- }
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if constexpr (is_safe) {
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
+ }
+ if (!IsBigPageContinous(page_index)) [[unlikely]] {
+ memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
+ } else {
+ u8* physical = memory.GetPointer(cpu_addr_base);
+ std::memcpy(physical, src_buffer, copy_amount);
+ }
+ src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
+ };
+ auto write_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, just_advance, just_advance);
+ };
+ MemoryOperation<true>(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages);
}
void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) {
- WriteBlockImpl(gpu_dest_addr, src_buffer, size, true);
+ WriteBlockImpl<true>(gpu_dest_addr, src_buffer, size);
}
void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
std::size_t size) {
- WriteBlockImpl(gpu_dest_addr, src_buffer, size, false);
+ WriteBlockImpl<false>(gpu_dest_addr, src_buffer, size);
}
void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const {
- size_t remaining_size{size};
- size_t page_index{gpu_addr >> page_bits};
- size_t page_offset{gpu_addr & page_mask};
- while (remaining_size > 0) {
- const size_t num_bytes{std::min(page_size - page_offset, remaining_size)};
- if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) {
- rasterizer->FlushRegion(*page_addr + page_offset, num_bytes);
+ auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {};
+
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ rasterizer->FlushRegion(cpu_addr_base, copy_amount);
+ };
+ auto flush_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
+ };
+ MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, flush_short_pages);
+}
+
+bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const {
+ bool result = false;
+ auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) { return false; };
+
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
+ return result;
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
+ return result;
+ };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
+ return result;
+ };
+ MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, check_short_pages);
+ return result;
+}
+
+size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
+ std::optional<VAddr> old_page_addr{};
+ size_t range_so_far = 0;
+ bool result{false};
+ auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ std::size_t copy_amount) {
+ result = true;
+ return true;
+ };
+ auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ result = true;
+ return true;
}
- ++page_index;
- page_offset = 0;
- remaining_size -= num_bytes;
- }
+ range_so_far += copy_amount;
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ return true;
+ }
+ range_so_far += copy_amount;
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
+ return result;
+ };
+ MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
+ return range_so_far;
+}
+
+void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const {
+ auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {};
+
+ auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
+ };
+ auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
+ };
+ auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
+ };
+ MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages);
}
void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) {
@@ -365,87 +564,134 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
}
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
- const auto cpu_addr{GpuToCpuAddress(gpu_addr)};
- if (!cpu_addr) {
+ if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
+ size_t page_index = gpu_addr >> big_page_bits;
+ if (IsBigPageContinous(page_index)) [[likely]] {
+ const std::size_t page{(page_index & big_page_mask) + size};
+ return page <= big_page_size;
+ }
+ const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
+ return page <= Core::Memory::YUZU_PAGESIZE;
+ }
+ if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
return false;
}
- const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
+ const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
return page <= Core::Memory::YUZU_PAGESIZE;
}
bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
- size_t page_index{gpu_addr >> page_bits};
- const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
std::optional<VAddr> old_page_addr{};
- while (page_index != page_last) {
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (!page_addr || *page_addr == 0) {
- return false;
+ bool result{true};
+ auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ std::size_t copy_amount) {
+ result = false;
+ return true;
+ };
+ auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ result = false;
+ return true;
}
- if (old_page_addr) {
- if (*old_page_addr + page_size != *page_addr) {
- return false;
- }
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr && *old_page_addr != cpu_addr_base) {
+ result = false;
+ return true;
}
- old_page_addr = page_addr;
- ++page_index;
- }
- return true;
+ old_page_addr = {cpu_addr_base + copy_amount};
+ return false;
+ };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
+ return !result;
+ };
+ MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
+ return result;
}
bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const {
- size_t page_index{gpu_addr >> page_bits};
- const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
- while (page_index < page_last) {
- if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) {
- return false;
- }
- ++page_index;
- }
- return true;
+ bool result{true};
+ auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {
+ result = false;
+ return true;
+ };
+ auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) { return false; };
+ auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, pass, pass, fail);
+ return !result;
+ };
+ MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages);
+ return result;
}
std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
GPUVAddr gpu_addr, std::size_t size) const {
std::vector<std::pair<GPUVAddr, std::size_t>> result{};
- size_t page_index{gpu_addr >> page_bits};
- size_t remaining_size{size};
- size_t page_offset{gpu_addr & page_mask};
std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{};
std::optional<VAddr> old_page_addr{};
- const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) {
- if (!last_segment) {
- const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset;
- last_segment = {new_base_addr, bytes};
- } else {
- last_segment->second += bytes;
- }
- };
- const auto split = [&last_segment, &result] {
+ const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index,
+ [[maybe_unused]] std::size_t offset,
+ [[maybe_unused]] std::size_t copy_amount) {
if (last_segment) {
result.push_back(*last_segment);
last_segment = std::nullopt;
}
};
- while (remaining_size > 0) {
- const size_t num_bytes{std::min(page_size - page_offset, remaining_size)};
- const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
- if (!page_addr || *page_addr == 0) {
- split();
- } else if (old_page_addr) {
- if (*old_page_addr + page_size != *page_addr) {
- split();
+ const auto extend_size_big = [this, &split, &old_page_addr,
+ &last_segment](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr) {
+ if (*old_page_addr != cpu_addr_base) {
+ split(0, 0, 0);
+ }
+ }
+ old_page_addr = {cpu_addr_base + copy_amount};
+ if (!last_segment) {
+ const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset;
+ last_segment = {new_base_addr, copy_amount};
+ } else {
+ last_segment->second += copy_amount;
+ }
+ };
+ const auto extend_size_short = [this, &split, &old_page_addr,
+ &last_segment](std::size_t page_index, std::size_t offset,
+ std::size_t copy_amount) {
+ const VAddr cpu_addr_base =
+ (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
+ if (old_page_addr) {
+ if (*old_page_addr != cpu_addr_base) {
+ split(0, 0, 0);
}
- extend_size(num_bytes);
+ }
+ old_page_addr = {cpu_addr_base + copy_amount};
+ if (!last_segment) {
+ const GPUVAddr new_base_addr = (page_index << page_bits) + offset;
+ last_segment = {new_base_addr, copy_amount};
} else {
- extend_size(num_bytes);
+ last_segment->second += copy_amount;
}
- ++page_index;
- page_offset = 0;
- remaining_size -= num_bytes;
- old_page_addr = page_addr;
- }
- split();
+ };
+ auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
+ GPUVAddr base = (page_index << big_page_bits) + offset;
+ MemoryOperation<false>(base, copy_amount, extend_size_short, split, split);
+ };
+ MemoryOperation<true>(gpu_addr, size, extend_size_big, split, do_short_pages);
+ split(0, 0, 0);
return result;
}
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 74f9ce175..f992e29f3 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -3,73 +3,39 @@
#pragma once
+#include <atomic>
#include <map>
#include <optional>
#include <vector>
#include "common/common_types.h"
+#include "common/multi_level_page_table.h"
+#include "common/virtual_buffer.h"
namespace VideoCore {
class RasterizerInterface;
}
namespace Core {
+class DeviceMemory;
+namespace Memory {
+class Memory;
+} // namespace Memory
class System;
-}
+} // namespace Core
namespace Tegra {
-class PageEntry final {
-public:
- enum class State : u32 {
- Unmapped = static_cast<u32>(-1),
- Allocated = static_cast<u32>(-2),
- };
-
- constexpr PageEntry() = default;
- constexpr PageEntry(State state_) : state{state_} {}
- constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
-
- [[nodiscard]] constexpr bool IsUnmapped() const {
- return state == State::Unmapped;
- }
-
- [[nodiscard]] constexpr bool IsAllocated() const {
- return state == State::Allocated;
- }
-
- [[nodiscard]] constexpr bool IsValid() const {
- return !IsUnmapped() && !IsAllocated();
- }
-
- [[nodiscard]] constexpr VAddr ToAddress() const {
- if (!IsValid()) {
- return {};
- }
-
- return static_cast<VAddr>(state) << ShiftBits;
- }
-
- [[nodiscard]] constexpr PageEntry operator+(u64 offset) const {
- // If this is a reserved value, offsets do not apply
- if (!IsValid()) {
- return *this;
- }
- return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
- }
-
-private:
- static constexpr std::size_t ShiftBits{12};
-
- State state{State::Unmapped};
-};
-static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
-
class MemoryManager final {
public:
- explicit MemoryManager(Core::System& system_);
+ explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
+ u64 big_page_bits_ = 16, u64 page_bits_ = 12);
~MemoryManager();
+ size_t GetID() const {
+ return unique_identifier;
+ }
+
/// Binds a renderer to the memory manager.
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
@@ -86,9 +52,6 @@ public:
[[nodiscard]] u8* GetPointer(GPUVAddr addr);
[[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
- /// Returns the number of bytes until the end of the memory map containing the given GPU address
- [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
-
/**
* ReadBlock and WriteBlock are full read and write operations over virtual
* GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -135,54 +98,95 @@ public:
std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
std::size_t size) const;
- [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size);
- [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align);
- [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size);
- [[nodiscard]] std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size);
- [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
+ GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true);
+ GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
void Unmap(GPUVAddr gpu_addr, std::size_t size);
void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
+ void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const;
+
+ bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const;
+
+ size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
+
+ bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
+ return gpu_addr < address_space_size;
+ }
+
private:
- [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
- void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
- GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size);
- [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align,
- bool start_32bit_address = false) const;
-
- void TryLockPage(PageEntry page_entry, std::size_t size);
- void TryUnlockPage(PageEntry page_entry, std::size_t size);
-
- void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
- bool is_safe) const;
- void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
- bool is_safe);
-
- [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) {
- return (gpu_addr >> page_bits) & page_table_mask;
+ template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
+ inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
+ FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const;
+
+ template <bool is_safe>
+ void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const;
+
+ template <bool is_safe>
+ void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
+
+ template <bool is_big_page>
+ [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
+ if constexpr (is_big_page) {
+ return (gpu_addr >> big_page_bits) & big_page_table_mask;
+ } else {
+ return (gpu_addr >> page_bits) & page_table_mask;
+ }
}
- static constexpr u64 address_space_size = 1ULL << 40;
- static constexpr u64 address_space_start = 1ULL << 32;
- static constexpr u64 address_space_start_low = 1ULL << 16;
- static constexpr u64 page_bits{16};
- static constexpr u64 page_size{1 << page_bits};
- static constexpr u64 page_mask{page_size - 1};
- static constexpr u64 page_table_bits{24};
- static constexpr u64 page_table_size{1 << page_table_bits};
- static constexpr u64 page_table_mask{page_table_size - 1};
+ inline bool IsBigPageContinous(size_t big_page_index) const;
+ inline void SetBigPageContinous(size_t big_page_index, bool value);
Core::System& system;
+ Core::Memory::Memory& memory;
+ Core::DeviceMemory& device_memory;
+
+ const u64 address_space_bits;
+ const u64 page_bits;
+ u64 address_space_size;
+ u64 page_size;
+ u64 page_mask;
+ u64 page_table_mask;
+ static constexpr u64 cpu_page_bits{12};
+
+ const u64 big_page_bits;
+ u64 big_page_size;
+ u64 big_page_mask;
+ u64 big_page_table_mask;
VideoCore::RasterizerInterface* rasterizer = nullptr;
- std::vector<PageEntry> page_table;
+ enum class EntryType : u64 {
+ Free = 0,
+ Reserved = 1,
+ Mapped = 2,
+ };
+
+ std::vector<u64> entries;
+ std::vector<u64> big_entries;
+
+ template <EntryType entry_type>
+ GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
+
+ template <EntryType entry_type>
+ GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
+
+ template <bool is_big_page>
+ inline EntryType GetEntry(size_t position) const;
+
+ template <bool is_big_page>
+ inline void SetEntry(size_t position, EntryType entry);
+
+ Common::MultiLevelPageTable<u32> page_table;
+ Common::VirtualBuffer<u32> big_page_table_cpu;
+
+ std::vector<u64> big_page_continous;
+
+ constexpr static size_t continous_bits = 64;
- using MapRange = std::pair<GPUVAddr, size_t>;
- std::vector<MapRange> map_ranges;
+ const size_t unique_identifier;
- std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue;
+ static std::atomic<size_t> unique_identifier_generator;
};
} // namespace Tegra
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 889b606b3..b0ebe71b7 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -17,6 +17,7 @@
#include "common/assert.h"
#include "common/settings.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
@@ -90,13 +91,10 @@ private:
};
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
-class QueryCacheBase {
+class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
- explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::MemoryManager& gpu_memory_)
- : rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
- gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
+ explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_)
+ : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
VideoCore::QueryType::SamplesPassed}}} {}
void InvalidateRegion(VAddr addr, std::size_t size) {
@@ -117,13 +115,13 @@ public:
*/
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
std::unique_lock lock{mutex};
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
CachedQuery* query = TryGet(*cpu_addr);
if (!query) {
ASSERT_OR_EXECUTE(cpu_addr, return;);
- u8* const host_ptr = gpu_memory.GetPointer(gpu_addr);
+ u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
}
@@ -137,8 +135,10 @@ public:
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
void UpdateCounters() {
std::unique_lock lock{mutex};
- const auto& regs = maxwell3d.regs;
- Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable);
+ if (maxwell3d) {
+ const auto& regs = maxwell3d->regs;
+ Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable);
+ }
}
/// Resets a counter to zero. It doesn't disable the query after resetting.
@@ -264,8 +264,6 @@ private:
static constexpr unsigned YUZU_PAGEBITS = 12;
VideoCore::RasterizerInterface& rasterizer;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::MemoryManager& gpu_memory;
std::recursive_mutex mutex;
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index a04a76481..d2d40884c 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -16,6 +16,9 @@ class MemoryManager;
namespace Engines {
class AccelerateDMAInterface;
}
+namespace Control {
+struct ChannelState;
+}
} // namespace Tegra
namespace VideoCore {
@@ -59,7 +62,10 @@ public:
virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0;
/// Signal a GPU based semaphore as a fence
- virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0;
+ virtual void SignalFence(std::function<void()>&& func) = 0;
+
+ /// Send an operation to be done after a certain amount of flushes.
+ virtual void SyncOperation(std::function<void()>&& func) = 0;
/// Signal a GPU based syncpoint as a fence
virtual void SignalSyncPoint(u32 value) = 0;
@@ -86,13 +92,13 @@ public:
virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
/// Sync memory between guest and host.
- virtual void SyncGuestHost() = 0;
+ virtual void InvalidateGPUCache() = 0;
/// Unmap memory range
virtual void UnmapMemory(VAddr addr, u64 size) = 0;
/// Remap GPU memory range. This means underneath backing memory changed
- virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0;
+ virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
@@ -123,7 +129,7 @@ public:
[[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0;
virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) = 0;
+ std::span<const u8> memory) = 0;
/// Attempt to use a faster method to display the framebuffer to screen
[[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
@@ -137,5 +143,11 @@ public:
/// Initialize disk cached resources for the game being emulated
virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const DiskResourceLoadCallback& callback) {}
+
+ virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {}
+
+ virtual void BindChannel(Tegra::Control::ChannelState& channel) {}
+
+ virtual void ReleaseChannel(s32 channel_id) {}
};
} // namespace VideoCore
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
index 1f0f156ed..26d066004 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
@@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep
}
ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_,
- BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- ProgramManager& program_manager_, const Shader::Info& info_,
- std::string code, std::vector<u32> code_v)
- : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_},
- kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} {
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ const Shader::Info& info_, std::string code,
+ std::vector<u32> code_v)
+ : texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
+ program_manager{program_manager_}, info{info_} {
switch (device.GetShaderBackend()) {
case Settings::ShaderBackend::GLSL:
source_program = CreateProgram(code, GL_COMPUTE_SHADER);
@@ -86,7 +85,7 @@ void ComputePipeline::Configure() {
GLsizei texture_binding{};
GLsizei image_binding{};
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const auto& cbufs{qmd.const_buffer_config};
const bool via_header_index{qmd.linked_tsc != 0};
const auto read_handle{[&](const auto& desc, u32 index) {
@@ -101,12 +100,13 @@ void ComputePipeline::Configure() {
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
secondary_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
+ << desc.secondary_shift_left};
return TexturePair(lhs_raw | rhs_raw, via_header_index);
}
}
- return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc, bool blacklist) {
for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h
index 723f27f11..6534dec32 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h
@@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineKey>);
class ComputePipeline {
public:
explicit ComputePipeline(const Device& device, TextureCache& texture_cache_,
- BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- ProgramManager& program_manager_, const Shader::Info& info_,
- std::string code, std::vector<u32> code_v);
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ const Shader::Info& info_, std::string code, std::vector<u32> code_v);
void Configure();
@@ -60,11 +58,17 @@ public:
return writes_global_memory;
}
+ void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_,
+ Tegra::MemoryManager* gpu_memory_) {
+ kepler_compute = kepler_compute_;
+ gpu_memory = gpu_memory_;
+ }
+
private:
TextureCache& texture_cache;
BufferCache& buffer_cache;
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::KeplerCompute& kepler_compute;
+ Tegra::MemoryManager* gpu_memory;
+ Tegra::Engines::KeplerCompute* kepler_compute;
ProgramManager& program_manager;
Shader::Info info;
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
index 6e82c2e28..91463f854 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -10,10 +10,7 @@
namespace OpenGL {
-GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {}
-
-GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_)
- : FenceBase{address_, payload_, is_stubbed_} {}
+GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {}
GLInnerFence::~GLInnerFence() = default;
@@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize
BufferCache& buffer_cache_, QueryCache& query_cache_)
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {}
-Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) {
- return std::make_shared<GLInnerFence>(value, is_stubbed);
-}
-
-Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
- return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
+Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) {
+ return std::make_shared<GLInnerFence>(is_stubbed);
}
void FenceManagerOpenGL::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
index 14ff00db2..f1446e732 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.h
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -16,8 +16,7 @@ namespace OpenGL {
class GLInnerFence : public VideoCommon::FenceBase {
public:
- explicit GLInnerFence(u32 payload_, bool is_stubbed_);
- explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_);
+ explicit GLInnerFence(bool is_stubbed_);
~GLInnerFence();
void Queue();
@@ -40,8 +39,7 @@ public:
QueryCache& query_cache);
protected:
- Fence CreateFence(u32 value, bool is_stubbed) override;
- Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ Fence CreateFence(bool is_stubbed) override;
void QueueFence(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) const override;
void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
index 67eae369d..41493a7da 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
@@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 ena
}
} // Anonymous namespace
-GraphicsPipeline::GraphicsPipeline(
- const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_,
- Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
- ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker,
- VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources,
- std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos,
- const GraphicsPipelineKey& key_)
- : texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
- gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_},
+GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, ShaderWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify,
+ std::array<std::string, 5> sources,
+ std::array<std::vector<u32>, 5> sources_spirv,
+ const std::array<const Shader::Info*, 5>& infos,
+ const GraphicsPipelineKey& key_)
+ : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
state_tracker{state_tracker_}, key{key_} {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
@@ -285,7 +285,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings);
buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers);
- const auto& regs{maxwell3d.regs};
+ const auto& regs{maxwell3d->regs};
const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
const Shader::Info& info{stage_infos[stage]};
@@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
++ssbo_index;
}
}
- const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers};
+ const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(cbufs[desc.cbuf_index].enabled);
const u32 index_offset{index << desc.size_shift};
@@ -312,13 +312,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
second_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
+ << desc.secondary_shift_left};
const u32 raw{lhs_raw | rhs_raw};
return TexturePair(raw, via_header_index);
}
}
- return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
index 4ec15b966..a0f0e63cb 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
@@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>);
class GraphicsPipeline {
public:
explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
- BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- ProgramManager& program_manager_, StateTracker& state_tracker_,
- ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, ShaderWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify,
std::array<std::string, 5> sources,
std::array<std::vector<u32>, 5> sources_spirv,
const std::array<const Shader::Info*, 5>& infos,
@@ -107,6 +106,11 @@ public:
};
}
+ void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
+ maxwell3d = maxwell3d_;
+ gpu_memory = gpu_memory_;
+ }
+
private:
template <typename Spec>
void ConfigureImpl(bool is_indexed);
@@ -119,8 +123,8 @@ private:
TextureCache& texture_cache;
BufferCache& buffer_cache;
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::MemoryManager* gpu_memory;
+ Tegra::Engines::Maxwell3D* maxwell3d;
ProgramManager& program_manager;
StateTracker& state_tracker;
const GraphicsPipelineKey key;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index ed40f5791..5070db441 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
} // Anonymous namespace
-QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::MemoryManager& gpu_memory_)
- : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {}
+QueryCache::QueryCache(RasterizerOpenGL& rasterizer_)
+ : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {}
QueryCache::~QueryCache() = default;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 8a49f1ef0..14ce59990 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
class QueryCache final
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::MemoryManager& gpu_memory_);
+ explicit QueryCache(RasterizerOpenGL& rasterizer_);
~QueryCache();
OGLQuery AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index a0d048b0b..c2d80605d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -16,7 +16,7 @@
#include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/settings.h"
-
+#include "video_core/control/channel_state.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
@@ -56,22 +56,20 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
Core::Memory::Memory& cpu_memory_, const Device& device_,
ScreenInfo& screen_info_, ProgramManager& program_manager_,
StateTracker& state_tracker_)
- : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()),
- kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_),
- screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_),
+ : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_),
+ program_manager(program_manager_), state_tracker(state_tracker_),
texture_cache_runtime(device, program_manager, state_tracker),
- texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
- buffer_cache_runtime(device),
- buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
- shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache,
- buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()),
- query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache),
+ texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device),
+ buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
+ shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
+ state_tracker, gpu.ShaderNotify()),
+ query_cache(*this), accelerate_dma(buffer_cache),
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {}
RasterizerOpenGL::~RasterizerOpenGL() = default;
void RasterizerOpenGL::SyncVertexFormats() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::VertexFormats]) {
return;
}
@@ -89,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() {
}
flags[Dirty::VertexFormat0 + index] = false;
- const auto attrib = maxwell3d.regs.vertex_attrib_format[index];
+ const auto attrib = maxwell3d->regs.vertex_attrib_format[index];
const auto gl_index = static_cast<GLuint>(index);
// Disable constant attributes.
@@ -113,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() {
}
void RasterizerOpenGL::SyncVertexInstances() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::VertexInstances]) {
return;
}
flags[Dirty::VertexInstances] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) {
if (!flags[Dirty::VertexInstance0 + index]) {
continue;
@@ -140,11 +138,11 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load
void RasterizerOpenGL::Clear() {
MICROPROFILE_SCOPE(OpenGL_Clears);
- if (!maxwell3d.ShouldExecute()) {
+ if (!maxwell3d->ShouldExecute()) {
return;
}
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
bool use_color{};
bool use_depth{};
bool use_stencil{};
@@ -217,22 +215,26 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
if (!pipeline) {
return;
}
+
+ gpu.TickWork();
+
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ pipeline->SetEngine(maxwell3d, gpu_memory);
pipeline->Configure(is_indexed);
SyncState();
- const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology);
+ const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
BeginTransformFeedback(pipeline, primitive_mode);
- const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance);
+ const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.vb_base_instance);
const GLsizei num_instances =
- static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1);
+ static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1);
if (is_indexed) {
- const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base);
- const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count);
+ const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vb_element_base);
+ const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_array.count);
const GLvoid* const offset = buffer_cache_runtime.IndexOffset();
- const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format);
+ const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_array.format);
if (num_instances == 1 && base_instance == 0 && base_vertex == 0) {
glDrawElements(primitive_mode, num_vertices, format, offset);
} else if (num_instances == 1 && base_instance == 0) {
@@ -251,8 +253,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
base_instance);
}
} else {
- const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first);
- const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count);
+ const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vertex_buffer.first);
+ const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.vertex_buffer.count);
if (num_instances == 1 && base_instance == 0) {
glDrawArrays(primitive_mode, base_vertex, num_vertices);
} else if (base_instance == 0) {
@@ -273,8 +275,9 @@ void RasterizerOpenGL::DispatchCompute() {
if (!pipeline) {
return;
}
+ pipeline->SetEngine(kepler_compute, gpu_memory);
pipeline->Configure();
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z);
++num_queued_commands;
has_written_global_memory |= pipeline->WritesGlobalMemory();
@@ -359,7 +362,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
}
}
-void RasterizerOpenGL::SyncGuestHost() {
+void RasterizerOpenGL::InvalidateGPUCache() {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
shader_cache.SyncGuestHost();
{
@@ -380,40 +383,30 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) {
shader_cache.OnCPUWrite(addr, size);
}
-void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) {
+void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
{
std::scoped_lock lock{texture_cache.mutex};
- texture_cache.UnmapGPUMemory(addr, size);
+ texture_cache.UnmapGPUMemory(as_id, addr, size);
}
}
-void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
- if (!gpu.IsAsync()) {
- gpu_memory.Write<u32>(addr, value);
- return;
- }
- fence_manager.SignalSemaphore(addr, value);
+void RasterizerOpenGL::SignalFence(std::function<void()>&& func) {
+ fence_manager.SignalFence(std::move(func));
+}
+
+void RasterizerOpenGL::SyncOperation(std::function<void()>&& func) {
+ fence_manager.SyncOperation(std::move(func));
}
void RasterizerOpenGL::SignalSyncPoint(u32 value) {
- if (!gpu.IsAsync()) {
- gpu.IncrementSyncPoint(value);
- return;
- }
fence_manager.SignalSyncPoint(value);
}
void RasterizerOpenGL::SignalReference() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.SignalOrdering();
}
void RasterizerOpenGL::ReleaseFences() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.WaitPendingFences();
}
@@ -430,6 +423,7 @@ void RasterizerOpenGL::WaitForIdle() {
}
void RasterizerOpenGL::FragmentBarrier() {
+ glTextureBarrier();
glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT);
}
@@ -482,13 +476,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA()
}
void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) {
- auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
+ std::span<const u8> memory) {
+ auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
if (!cpu_addr) [[unlikely]] {
- gpu_memory.WriteBlock(address, memory.data(), copy_size);
+ gpu_memory->WriteBlock(address, memory.data(), copy_size);
return;
}
- gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
+ gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
{
std::unique_lock<std::mutex> lock{buffer_cache.mutex};
if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -551,8 +545,8 @@ void RasterizerOpenGL::SyncState() {
}
void RasterizerOpenGL::SyncViewport() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports];
const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports;
@@ -657,23 +651,23 @@ void RasterizerOpenGL::SyncViewport() {
}
void RasterizerOpenGL::SyncDepthClamp() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::DepthClampEnabled]) {
return;
}
flags[Dirty::DepthClampEnabled] = false;
- oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0);
+ oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.view_volume_clip_control.depth_clamp_disabled == 0);
}
void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) {
return;
}
flags[Dirty::ClipDistances] = false;
- clip_mask &= maxwell3d.regs.clip_distance_enabled;
+ clip_mask &= maxwell3d->regs.clip_distance_enabled;
if (clip_mask == last_clip_distance_mask) {
return;
}
@@ -689,8 +683,8 @@ void RasterizerOpenGL::SyncClipCoef() {
}
void RasterizerOpenGL::SyncCullMode() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
if (flags[Dirty::CullTest]) {
flags[Dirty::CullTest] = false;
@@ -705,23 +699,23 @@ void RasterizerOpenGL::SyncCullMode() {
}
void RasterizerOpenGL::SyncPrimitiveRestart() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PrimitiveRestart]) {
return;
}
flags[Dirty::PrimitiveRestart] = false;
- if (maxwell3d.regs.primitive_restart.enabled) {
+ if (maxwell3d->regs.primitive_restart.enabled) {
glEnable(GL_PRIMITIVE_RESTART);
- glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index);
+ glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index);
} else {
glDisable(GL_PRIMITIVE_RESTART);
}
}
void RasterizerOpenGL::SyncDepthTestState() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
if (flags[Dirty::DepthMask]) {
flags[Dirty::DepthMask] = false;
@@ -740,13 +734,13 @@ void RasterizerOpenGL::SyncDepthTestState() {
}
void RasterizerOpenGL::SyncStencilTestState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::StencilTest]) {
return;
}
flags[Dirty::StencilTest] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
oglEnable(GL_STENCIL_TEST, regs.stencil_enable);
glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func),
@@ -771,23 +765,23 @@ void RasterizerOpenGL::SyncStencilTestState() {
}
void RasterizerOpenGL::SyncRasterizeEnable() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::RasterizeEnable]) {
return;
}
flags[Dirty::RasterizeEnable] = false;
- oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0);
+ oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0);
}
void RasterizerOpenGL::SyncPolygonModes() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PolygonModes]) {
return;
}
flags[Dirty::PolygonModes] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.fill_rectangle) {
if (!GLAD_GL_NV_fill_rectangle) {
LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported");
@@ -820,7 +814,7 @@ void RasterizerOpenGL::SyncPolygonModes() {
}
void RasterizerOpenGL::SyncColorMask() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::ColorMasks]) {
return;
}
@@ -829,7 +823,7 @@ void RasterizerOpenGL::SyncColorMask() {
const bool force = flags[Dirty::ColorMaskCommon];
flags[Dirty::ColorMaskCommon] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.color_mask_common) {
if (!force && !flags[Dirty::ColorMask0]) {
return;
@@ -854,30 +848,30 @@ void RasterizerOpenGL::SyncColorMask() {
}
void RasterizerOpenGL::SyncMultiSampleState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::MultisampleControl]) {
return;
}
flags[Dirty::MultisampleControl] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage);
oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one);
}
void RasterizerOpenGL::SyncFragmentColorClampState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::FragmentClampColor]) {
return;
}
flags[Dirty::FragmentClampColor] = false;
- glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE);
+ glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d->regs.frag_color_clamp ? GL_TRUE : GL_FALSE);
}
void RasterizerOpenGL::SyncBlendState() {
- auto& flags = maxwell3d.dirty.flags;
- const auto& regs = maxwell3d.regs;
+ auto& flags = maxwell3d->dirty.flags;
+ const auto& regs = maxwell3d->regs;
if (flags[Dirty::BlendColor]) {
flags[Dirty::BlendColor] = false;
@@ -934,13 +928,13 @@ void RasterizerOpenGL::SyncBlendState() {
}
void RasterizerOpenGL::SyncLogicOpState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::LogicOp]) {
return;
}
flags[Dirty::LogicOp] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.logic_op.enable) {
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation));
@@ -950,7 +944,7 @@ void RasterizerOpenGL::SyncLogicOpState() {
}
void RasterizerOpenGL::SyncScissorTest() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) {
return;
}
@@ -959,7 +953,7 @@ void RasterizerOpenGL::SyncScissorTest() {
const bool force = flags[VideoCommon::Dirty::RescaleScissors];
flags[VideoCommon::Dirty::RescaleScissors] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
const auto& resolution = Settings::values.resolution_info;
const bool is_rescaling{texture_cache.IsRescaling()};
@@ -995,39 +989,39 @@ void RasterizerOpenGL::SyncScissorTest() {
}
void RasterizerOpenGL::SyncPointState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PointSize]) {
return;
}
flags[Dirty::PointSize] = false;
- oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable);
- oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable);
+ oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable);
+ oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.vp_point_size.enable);
const bool is_rescaling{texture_cache.IsRescaling()};
const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f;
- glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale));
+ glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale));
}
void RasterizerOpenGL::SyncLineState() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::LineWidth]) {
return;
}
flags[Dirty::LineWidth] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable);
glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased);
}
void RasterizerOpenGL::SyncPolygonOffset() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::PolygonOffset]) {
return;
}
flags[Dirty::PolygonOffset] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable);
oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable);
oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable);
@@ -1041,13 +1035,13 @@ void RasterizerOpenGL::SyncPolygonOffset() {
}
void RasterizerOpenGL::SyncAlphaTest() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::AlphaTest]) {
return;
}
flags[Dirty::AlphaTest] = false;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.alpha_test_enabled) {
glEnable(GL_ALPHA_TEST);
glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref);
@@ -1057,17 +1051,17 @@ void RasterizerOpenGL::SyncAlphaTest() {
}
void RasterizerOpenGL::SyncFramebufferSRGB() {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::FramebufferSRGB]) {
return;
}
flags[Dirty::FramebufferSRGB] = false;
- oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb);
+ oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb);
}
void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.tfb_enabled == 0) {
return;
}
@@ -1086,11 +1080,48 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum
}
void RasterizerOpenGL::EndTransformFeedback() {
- if (maxwell3d.regs.tfb_enabled != 0) {
+ if (maxwell3d->regs.tfb_enabled != 0) {
glEndTransformFeedback();
}
}
+void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) {
+ CreateChannel(channel);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.CreateChannel(channel);
+ buffer_cache.CreateChannel(channel);
+ }
+ shader_cache.CreateChannel(channel);
+ query_cache.CreateChannel(channel);
+ state_tracker.SetupTables(channel);
+}
+
+void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) {
+ const s32 channel_id = channel.bind_id;
+ BindToChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.BindToChannel(channel_id);
+ buffer_cache.BindToChannel(channel_id);
+ }
+ shader_cache.BindToChannel(channel_id);
+ query_cache.BindToChannel(channel_id);
+ state_tracker.ChangeChannel(channel);
+ state_tracker.InvalidateState();
+}
+
+void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
+ EraseChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.EraseChannel(channel_id);
+ buffer_cache.EraseChannel(channel_id);
+ }
+ shader_cache.EraseChannel(channel_id);
+ query_cache.EraseChannel(channel_id);
+}
+
AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 31a16fcba..45131b785 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -12,6 +12,7 @@
#include <glad/glad.h>
#include "common/common_types.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
@@ -58,7 +59,8 @@ private:
BufferCache& buffer_cache;
};
-class RasterizerOpenGL : public VideoCore::RasterizerAccelerated {
+class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
+ protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
Core::Memory::Memory& cpu_memory_, const Device& device_,
@@ -78,10 +80,11 @@ public:
bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void OnCPUWrite(VAddr addr, u64 size) override;
- void SyncGuestHost() override;
+ void InvalidateGPUCache() override;
void UnmapMemory(VAddr addr, u64 size) override;
- void ModifyGPUMemory(GPUVAddr addr, u64 size) override;
- void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
+ void SignalFence(std::function<void()>&& func) override;
+ void SyncOperation(std::function<void()>&& func) override;
void SignalSyncPoint(u32 value) override;
void SignalReference() override;
void ReleaseFences() override;
@@ -96,7 +99,7 @@ public:
const Tegra::Engines::Fermi2D::Config& copy_config) override;
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) override;
+ std::span<const u8> memory) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
@@ -107,6 +110,12 @@ public:
return num_queued_commands > 0;
}
+ void InitializeChannel(Tegra::Control::ChannelState& channel) override;
+
+ void BindChannel(Tegra::Control::ChannelState& channel) override;
+
+ void ReleaseChannel(s32 channel_id) override;
+
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;
@@ -191,9 +200,6 @@ private:
void EndTransformFeedback();
Tegra::GPU& gpu;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
const Device& device;
ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 0b8d8ec92..5a29a41d2 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -151,16 +151,13 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs
} // Anonymous namespace
ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
- TextureCache& texture_cache_, BufferCache& buffer_cache_,
- ProgramManager& program_manager_, StateTracker& state_tracker_,
- VideoCore::ShaderNotify& shader_notify_)
- : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
- emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_},
- buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_},
- shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()},
+ const Device& device_, TextureCache& texture_cache_,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_)
+ : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_},
+ texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
+ state_tracker{state_tracker_}, shader_notify{shader_notify_},
+ use_asynchronous_shaders{device.UseAsynchronousShaders()},
profile{
.supported_spirv = 0x00010000,
@@ -310,7 +307,7 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() {
current_pipeline = nullptr;
return nullptr;
}
- const auto& regs{maxwell3d.regs};
+ const auto& regs{maxwell3d->regs};
graphics_key.raw = 0;
graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0
@@ -351,13 +348,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n
}
// If something is using depth, we can assume that games are not rendering anything which
// will be used one time.
- if (maxwell3d.regs.zeta_enable) {
+ if (maxwell3d->regs.zeta_enable) {
return nullptr;
}
// If games are using a small index count, we can assume these are full screen quads.
// Usually these shaders are only used once for building textures so we can assume they
// can't be built async
- if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
+ if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
return pipeline;
}
return nullptr;
@@ -368,7 +365,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() {
if (!shader) {
return nullptr;
}
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const ComputePipelineKey key{
.unique_hash = shader->unique_hash,
.shared_memory_size = qmd.shared_alloc,
@@ -480,9 +477,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
previous_program = &program;
}
auto* const thread_worker{build_in_parallel ? workers.get() : nullptr};
- return std::make_unique<GraphicsPipeline>(
- device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker,
- thread_worker, &shader_notify, sources, sources_spirv, infos, key);
+ return std::make_unique<GraphicsPipeline>(device, texture_cache, buffer_cache, program_manager,
+ state_tracker, thread_worker, &shader_notify, sources,
+ sources_spirv, infos, key);
} catch (Shader::Exception& exception) {
LOG_ERROR(Render_OpenGL, "{}", exception.what());
@@ -491,9 +488,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) {
- const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
- const auto& qmd{kepler_compute.launch_description};
- ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
+ const auto& qmd{kepler_compute->launch_description};
+ ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
env.SetCachedSize(shader->size_bytes);
main_pools.ReleaseContents();
@@ -536,9 +533,8 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
break;
}
- return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, gpu_memory,
- kepler_compute, program_manager, program.info, code,
- code_spirv);
+ return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, program_manager,
+ program.info, code, code_spirv);
} catch (Shader::Exception& exception) {
LOG_ERROR(Render_OpenGL, "{}", exception.what());
return nullptr;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index a14269dea..89f181fe3 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
class ShaderCache : public VideoCommon::ShaderCache {
public:
explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
- TextureCache& texture_cache_, BufferCache& buffer_cache_,
- ProgramManager& program_manager_, StateTracker& state_tracker_,
- VideoCore::ShaderNotify& shader_notify_);
+ const Device& device_, TextureCache& texture_cache_,
+ BufferCache& buffer_cache_, ProgramManager& program_manager_,
+ StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_);
~ShaderCache();
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index 912725ef7..a8f3a0f57 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -7,8 +7,8 @@
#include "common/common_types.h"
#include "core/core.h"
+#include "video_core/control/channel_state.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
#include "video_core/renderer_opengl/gl_state_tracker.h"
#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -202,9 +202,8 @@ void SetupDirtyMisc(Tables& tables) {
} // Anonymous namespace
-StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} {
- auto& dirty = gpu.Maxwell3D().dirty;
- auto& tables = dirty.tables;
+void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
+ auto& tables{channel_state.maxwell_3d->dirty.tables};
SetupDirtyFlags(tables);
SetupDirtyColorMasks(tables);
SetupDirtyViewports(tables);
@@ -230,4 +229,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags}
SetupDirtyMisc(tables);
}
+void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
+ flags = &channel_state.maxwell_3d->dirty.flags;
+}
+
+void StateTracker::InvalidateState() {
+ flags->set();
+}
+
+StateTracker::StateTracker() : flags{&default_flags} {}
+
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h
index 04e024f08..19bcf3f35 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.h
+++ b/src/video_core/renderer_opengl/gl_state_tracker.h
@@ -12,8 +12,10 @@
#include "video_core/engines/maxwell_3d.h"
namespace Tegra {
-class GPU;
+namespace Control {
+struct ChannelState;
}
+} // namespace Tegra
namespace OpenGL {
@@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits<u8>::max());
class StateTracker {
public:
- explicit StateTracker(Tegra::GPU& gpu);
+ explicit StateTracker();
void BindIndexBuffer(GLuint new_index_buffer) {
if (index_buffer == new_index_buffer) {
@@ -121,94 +123,107 @@ public:
}
void NotifyScreenDrawVertexArray() {
- flags[OpenGL::Dirty::VertexFormats] = true;
- flags[OpenGL::Dirty::VertexFormat0 + 0] = true;
- flags[OpenGL::Dirty::VertexFormat0 + 1] = true;
+ (*flags)[OpenGL::Dirty::VertexFormats] = true;
+ (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true;
+ (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true;
- flags[VideoCommon::Dirty::VertexBuffers] = true;
- flags[VideoCommon::Dirty::VertexBuffer0] = true;
+ (*flags)[VideoCommon::Dirty::VertexBuffers] = true;
+ (*flags)[VideoCommon::Dirty::VertexBuffer0] = true;
- flags[OpenGL::Dirty::VertexInstances] = true;
- flags[OpenGL::Dirty::VertexInstance0 + 0] = true;
- flags[OpenGL::Dirty::VertexInstance0 + 1] = true;
+ (*flags)[OpenGL::Dirty::VertexInstances] = true;
+ (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true;
+ (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true;
}
void NotifyPolygonModes() {
- flags[OpenGL::Dirty::PolygonModes] = true;
- flags[OpenGL::Dirty::PolygonModeFront] = true;
- flags[OpenGL::Dirty::PolygonModeBack] = true;
+ (*flags)[OpenGL::Dirty::PolygonModes] = true;
+ (*flags)[OpenGL::Dirty::PolygonModeFront] = true;
+ (*flags)[OpenGL::Dirty::PolygonModeBack] = true;
}
void NotifyViewport0() {
- flags[OpenGL::Dirty::Viewports] = true;
- flags[OpenGL::Dirty::Viewport0] = true;
+ (*flags)[OpenGL::Dirty::Viewports] = true;
+ (*flags)[OpenGL::Dirty::Viewport0] = true;
}
void NotifyScissor0() {
- flags[OpenGL::Dirty::Scissors] = true;
- flags[OpenGL::Dirty::Scissor0] = true;
+ (*flags)[OpenGL::Dirty::Scissors] = true;
+ (*flags)[OpenGL::Dirty::Scissor0] = true;
}
void NotifyColorMask(size_t index) {
- flags[OpenGL::Dirty::ColorMasks] = true;
- flags[OpenGL::Dirty::ColorMask0 + index] = true;
+ (*flags)[OpenGL::Dirty::ColorMasks] = true;
+ (*flags)[OpenGL::Dirty::ColorMask0 + index] = true;
}
void NotifyBlend0() {
- flags[OpenGL::Dirty::BlendStates] = true;
- flags[OpenGL::Dirty::BlendState0] = true;
+ (*flags)[OpenGL::Dirty::BlendStates] = true;
+ (*flags)[OpenGL::Dirty::BlendState0] = true;
}
void NotifyFramebuffer() {
- flags[VideoCommon::Dirty::RenderTargets] = true;
+ (*flags)[VideoCommon::Dirty::RenderTargets] = true;
}
void NotifyFrontFace() {
- flags[OpenGL::Dirty::FrontFace] = true;
+ (*flags)[OpenGL::Dirty::FrontFace] = true;
}
void NotifyCullTest() {
- flags[OpenGL::Dirty::CullTest] = true;
+ (*flags)[OpenGL::Dirty::CullTest] = true;
}
void NotifyDepthMask() {
- flags[OpenGL::Dirty::DepthMask] = true;
+ (*flags)[OpenGL::Dirty::DepthMask] = true;
}
void NotifyDepthTest() {
- flags[OpenGL::Dirty::DepthTest] = true;
+ (*flags)[OpenGL::Dirty::DepthTest] = true;
}
void NotifyStencilTest() {
- flags[OpenGL::Dirty::StencilTest] = true;
+ (*flags)[OpenGL::Dirty::StencilTest] = true;
}
void NotifyPolygonOffset() {
- flags[OpenGL::Dirty::PolygonOffset] = true;
+ (*flags)[OpenGL::Dirty::PolygonOffset] = true;
}
void NotifyRasterizeEnable() {
- flags[OpenGL::Dirty::RasterizeEnable] = true;
+ (*flags)[OpenGL::Dirty::RasterizeEnable] = true;
}
void NotifyFramebufferSRGB() {
- flags[OpenGL::Dirty::FramebufferSRGB] = true;
+ (*flags)[OpenGL::Dirty::FramebufferSRGB] = true;
}
void NotifyLogicOp() {
- flags[OpenGL::Dirty::LogicOp] = true;
+ (*flags)[OpenGL::Dirty::LogicOp] = true;
}
void NotifyClipControl() {
- flags[OpenGL::Dirty::ClipControl] = true;
+ (*flags)[OpenGL::Dirty::ClipControl] = true;
}
void NotifyAlphaTest() {
- flags[OpenGL::Dirty::AlphaTest] = true;
+ (*flags)[OpenGL::Dirty::AlphaTest] = true;
}
+ void NotifyRange(u8 start, u8 end) {
+ for (auto flag = start; flag <= end; flag++) {
+ (*flags)[flag] = true;
+ }
+ }
+
+ void SetupTables(Tegra::Control::ChannelState& channel_state);
+
+ void ChangeChannel(Tegra::Control::ChannelState& channel_state);
+
+ void InvalidateState();
+
private:
- Tegra::Engines::Maxwell3D::DirtyState::Flags& flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{};
GLuint framebuffer = 0;
GLuint index_buffer = 0;
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index dfe7f26ca..004421236 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -87,7 +87,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM
- {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM
+ {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 34f3f7a67..8bd5eba7e 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
std::unique_ptr<Core::Frontend::GraphicsContext> context_)
: RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
- emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu},
+ emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{},
program_manager{device},
rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 6703b8e68..e7104d377 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -184,7 +184,7 @@ struct FormatTuple {
{VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB
{VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB
{VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM
- {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM
+ {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM
{VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
{VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
{VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 7c78d0299..d8131232a 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -102,13 +102,13 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
surface(CreateSurface(instance, render_window)),
device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false),
- state_tracker(gpu), scheduler(device, state_tracker),
+ state_tracker(), scheduler(device, state_tracker),
swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
render_window.GetFramebufferLayout().height, false),
blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
screen_info),
- rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device,
- memory_allocator, state_tracker, scheduler) {
+ rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator,
+ state_tracker, scheduler) {
Report();
} catch (const vk::Exception& exception) {
LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what());
@@ -142,7 +142,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
const auto recreate_swapchain = [&] {
if (!has_been_recreated) {
has_been_recreated = true;
- scheduler.WaitWorker();
+ scheduler.Finish();
}
const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
swapchain.Create(layout.width, layout.height, is_srgb);
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 444c29f68..cb7fa2078 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
// Finish any pending renderpass
scheduler.RequestOutsideRenderPassOperationContext();
+ if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) {
+ image_count = swapchain_images;
+ Recreate();
+ }
+
const std::size_t image_index = swapchain.GetImageIndex();
scheduler.Wait(resource_ticks[image_index]);
@@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE
void BlitScreen::CreateStaticResources() {
CreateShaders();
+ CreateSampler();
+}
+
+void BlitScreen::CreateDynamicResources() {
CreateSemaphores();
CreateDescriptorPool();
CreateDescriptorSetLayout();
CreateDescriptorSets();
CreatePipelineLayout();
- CreateSampler();
-}
-
-void BlitScreen::CreateDynamicResources() {
CreateRenderPass();
CreateFramebuffers();
CreateGraphicsPipeline();
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index b8c67bef0..29e2ea925 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -109,7 +109,7 @@ private:
MemoryAllocator& memory_allocator;
Swapchain& swapchain;
Scheduler& scheduler;
- const std::size_t image_count;
+ std::size_t image_count;
const ScreenInfo& screen_info;
vk::ShaderModule vertex_shader;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index f17a5ccd6..241d7573e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -26,8 +26,6 @@
namespace Vulkan {
-using Tegra::Texture::SWIZZLE_TABLE;
-
namespace {
constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 6447210e2..7906e11a8 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
secondary_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory.Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr) << desc.secondary_shift_left};
return TexturePair(lhs_raw | rhs_raw, via_header_index);
}
}
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index c249b34d4..0214b103a 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -11,11 +11,8 @@
namespace Vulkan {
-InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_)
- : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
-
-InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
- : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
+InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_)
+ : FenceBase{is_stubbed_}, scheduler{scheduler_} {}
InnerFence::~InnerFence() = default;
@@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
scheduler{scheduler_} {}
-Fence FenceManager::CreateFence(u32 value, bool is_stubbed) {
- return std::make_shared<InnerFence>(scheduler, value, is_stubbed);
-}
-
-Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
- return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
+Fence FenceManager::CreateFence(bool is_stubbed) {
+ return std::make_shared<InnerFence>(scheduler, is_stubbed);
}
void FenceManager::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 7c0bbd80a..7fe2afcd9 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -25,8 +25,7 @@ class Scheduler;
class InnerFence : public VideoCommon::FenceBase {
public:
- explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_);
- explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
+ explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_);
~InnerFence();
void Queue();
@@ -50,8 +49,7 @@ public:
QueryCache& query_cache, const Device& device, Scheduler& scheduler);
protected:
- Fence CreateFence(u32 value, bool is_stubbed) override;
- Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
+ Fence CreateFence(bool is_stubbed) override;
void QueueFence(Fence& fence) override;
bool IsFenceSignaled(Fence& fence) const override;
void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 5aca8f038..f47786f48 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -215,15 +215,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
} // Anonymous namespace
GraphicsPipeline::GraphicsPipeline(
- Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_,
- BufferCache& buffer_cache_, TextureCache& texture_cache_,
+ Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos)
- : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_},
- texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_},
+ : key{key_}, device{device_}, texture_cache{texture_cache_},
+ buffer_cache{buffer_cache_}, scheduler{scheduler_},
update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
@@ -288,7 +287,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes);
- const auto& regs{maxwell3d.regs};
+ const auto& regs{maxwell3d->regs};
const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
const Shader::Info& info{stage_infos[stage]};
@@ -302,7 +301,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
++ssbo_index;
}
}
- const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers};
+ const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
const auto read_handle{[&](const auto& desc, u32 index) {
ASSERT(cbufs[desc.cbuf_index].enabled);
const u32 index_offset{index << desc.size_shift};
@@ -315,13 +314,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
second_offset};
- const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
- const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
+ const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
+ << desc.secondary_shift_left};
const u32 raw{lhs_raw | rhs_raw};
return TexturePair(raw, via_header_index);
}
}
- return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
}};
const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index e8949a9ab..85602592b 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -69,15 +69,16 @@ class GraphicsPipeline {
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
public:
- explicit GraphicsPipeline(
- Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
- Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
- VideoCore::ShaderNotify* shader_notify, const Device& device,
- DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue,
- Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics,
- RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key,
- std::array<vk::ShaderModule, NUM_STAGES> stages,
- const std::array<const Shader::Info*, NUM_STAGES>& infos);
+ explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache,
+ TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify,
+ const Device& device, DescriptorPool& descriptor_pool,
+ UpdateDescriptorQueue& update_descriptor_queue,
+ Common::ThreadWorker* worker_thread,
+ PipelineStatistics* pipeline_statistics,
+ RenderPassCache& render_pass_cache,
+ const GraphicsPipelineCacheKey& key,
+ std::array<vk::ShaderModule, NUM_STAGES> stages,
+ const std::array<const Shader::Info*, NUM_STAGES>& infos);
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
@@ -109,6 +110,11 @@ public:
return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); };
}
+ void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
+ maxwell3d = maxwell3d_;
+ gpu_memory = gpu_memory_;
+ }
+
private:
template <typename Spec>
void ConfigureImpl(bool is_indexed);
@@ -120,8 +126,8 @@ private:
void Validate();
const GraphicsPipelineCacheKey key;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::MemoryManager& gpu_memory;
+ Tegra::Engines::Maxwell3D* maxwell3d;
+ Tegra::MemoryManager* gpu_memory;
const Device& device;
TextureCache& texture_cache;
BufferCache& buffer_cache;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index accbfc8e1..732e7b6f2 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -259,17 +259,15 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
return std::memcmp(&rhs, this, Size()) == 0;
}
-PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
+PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_,
Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
UpdateDescriptorQueue& update_descriptor_queue_,
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
- : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
- device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
- update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
- buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_},
+ : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_},
+ descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_},
+ render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_},
+ texture_cache{texture_cache_}, shader_notify{shader_notify_},
use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"),
serialization_thread(1, "VkPipelineSerialization") {
@@ -337,7 +335,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
current_pipeline = nullptr;
return nullptr;
}
- graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(),
+ graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(),
device.IsExtVertexInputDynamicStateSupported());
if (current_pipeline) {
@@ -357,7 +355,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() {
if (!shader) {
return nullptr;
}
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const ComputePipelineCacheKey key{
.unique_hash = shader->unique_hash,
.shared_memory_size = qmd.shared_alloc,
@@ -486,13 +484,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const
}
// If something is using depth, we can assume that games are not rendering anything which
// will be used one time.
- if (maxwell3d.regs.zeta_enable) {
+ if (maxwell3d->regs.zeta_enable) {
return nullptr;
}
// If games are using a small index count, we can assume these are full screen quads.
// Usually these shaders are only used once for building textures so we can assume they
// can't be built async
- if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
+ if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
return pipeline;
}
return nullptr;
@@ -557,10 +555,10 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
previous_stage = &program;
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
- return std::make_unique<GraphicsPipeline>(
- maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device,
- descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key,
- std::move(modules), infos);
+ return std::make_unique<GraphicsPipeline>(scheduler, buffer_cache, texture_cache,
+ &shader_notify, device, descriptor_pool,
+ update_descriptor_queue, thread_worker, statistics,
+ render_pass_cache, key, std::move(modules), infos);
} catch (const Shader::Exception& exception) {
LOG_ERROR(Render_Vulkan, "{}", exception.what());
@@ -592,9 +590,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
- const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
- const auto& qmd{kepler_compute.launch_description};
- ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
+ const auto& qmd{kepler_compute->launch_description};
+ ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
env.SetCachedSize(shader->size_bytes);
main_pools.ReleaseContents();
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 127957dbf..61f9e9366 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -100,10 +100,8 @@ struct ShaderPools {
class PipelineCache : public VideoCommon::ShaderCache {
public:
- explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
- Tegra::Engines::KeplerCompute& kepler_compute,
- Tegra::MemoryManager& gpu_memory, const Device& device,
- Scheduler& scheduler, DescriptorPool& descriptor_pool,
+ explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler,
+ DescriptorPool& descriptor_pool,
UpdateDescriptorQueue& update_descriptor_queue,
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 2b859c6b8..7cb02631c 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -65,10 +65,9 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
}
-QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
- const Device& device_, Scheduler& scheduler_)
- : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
+QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
+ Scheduler& scheduler_)
+ : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_},
query_pools{
QueryPool{device_, scheduler_, QueryType::SamplesPassed},
} {}
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b0d86c4f8..26762ee09 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -52,9 +52,8 @@ private:
class QueryCache final
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
- const Device& device_, Scheduler& scheduler_);
+ explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
+ Scheduler& scheduler_);
~QueryCache();
std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 7e40c2df1..acfd5da7d 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -11,6 +11,7 @@
#include "common/microprofile.h"
#include "common/scope_exit.h"
#include "common/settings.h"
+#include "video_core/control/channel_state.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/blit_image.h"
@@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
} // Anonymous namespace
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
- Tegra::MemoryManager& gpu_memory_,
Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
const Device& device_, MemoryAllocator& memory_allocator_,
StateTracker& state_tracker_, Scheduler& scheduler_)
- : RasterizerAccelerated{cpu_memory_}, gpu{gpu_},
- gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()},
- screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
- state_tracker{state_tracker_}, scheduler{scheduler_},
+ : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_},
+ memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_},
staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
update_descriptor_queue(device, scheduler),
blit_image(device, scheduler, state_tracker, descriptor_pool),
@@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
memory_allocator, staging_pool,
blit_image, astc_decoder_pass,
render_pass_cache},
- texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
+ texture_cache(texture_cache_runtime, *this),
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
update_descriptor_queue, descriptor_pool),
- buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
- pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
- descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
- texture_cache, gpu.ShaderNotify()),
- query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
+ buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
+ pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
+ render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
+ query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache},
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
wfi_event(device.GetLogical().CreateEvent()) {
scheduler.SetQueryCache(query_cache);
@@ -193,14 +190,16 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
return;
}
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ // update engine as channel may be different.
+ pipeline->SetEngine(maxwell3d, gpu_memory);
pipeline->Configure(is_indexed);
BeginTransformFeedback();
UpdateDynamicStates();
- const auto& regs{maxwell3d.regs};
- const u32 num_instances{maxwell3d.mme_draw.instance_count};
+ const auto& regs{maxwell3d->regs};
+ const u32 num_instances{maxwell3d->mme_draw.instance_count};
const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
@@ -218,14 +217,14 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
void RasterizerVulkan::Clear() {
MICROPROFILE_SCOPE(Vulkan_Clearing);
- if (!maxwell3d.ShouldExecute()) {
+ if (!maxwell3d->ShouldExecute()) {
return;
}
FlushWork();
query_cache.UpdateCounters();
- auto& regs = maxwell3d.regs;
+ auto& regs = maxwell3d->regs;
const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
regs.clear_buffers.A;
const bool use_depth = regs.clear_buffers.Z;
@@ -248,8 +247,15 @@ void RasterizerVulkan::Clear() {
}
UpdateViewportsState(regs);
+ VkRect2D default_scissor;
+ default_scissor.offset.x = 0;
+ default_scissor.offset.y = 0;
+ default_scissor.extent.width = std::numeric_limits<s32>::max();
+ default_scissor.extent.height = std::numeric_limits<s32>::max();
+
VkClearRect clear_rect{
- .rect = GetScissorState(regs, 0, up_scale, down_shift),
+ .rect = regs.clear_flags.scissor ? GetScissorState(regs, 0, up_scale, down_shift)
+ : default_scissor,
.baseArrayLayer = regs.clear_buffers.layer,
.layerCount = 1,
};
@@ -339,9 +345,9 @@ void RasterizerVulkan::DispatchCompute() {
return;
}
std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
- pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache);
+ pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache);
- const auto& qmd{kepler_compute.launch_description};
+ const auto& qmd{kepler_compute->launch_description};
const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
@@ -422,7 +428,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
}
}
-void RasterizerVulkan::SyncGuestHost() {
+void RasterizerVulkan::InvalidateGPUCache() {
pipeline_cache.SyncGuestHost();
{
std::scoped_lock lock{buffer_cache.mutex};
@@ -442,40 +448,30 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) {
pipeline_cache.OnCPUWrite(addr, size);
}
-void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) {
+void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
{
std::scoped_lock lock{texture_cache.mutex};
- texture_cache.UnmapGPUMemory(addr, size);
+ texture_cache.UnmapGPUMemory(as_id, addr, size);
}
}
-void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
- if (!gpu.IsAsync()) {
- gpu_memory.Write<u32>(addr, value);
- return;
- }
- fence_manager.SignalSemaphore(addr, value);
+void RasterizerVulkan::SignalFence(std::function<void()>&& func) {
+ fence_manager.SignalFence(std::move(func));
+}
+
+void RasterizerVulkan::SyncOperation(std::function<void()>&& func) {
+ fence_manager.SyncOperation(std::move(func));
}
void RasterizerVulkan::SignalSyncPoint(u32 value) {
- if (!gpu.IsAsync()) {
- gpu.IncrementSyncPoint(value);
- return;
- }
fence_manager.SignalSyncPoint(value);
}
void RasterizerVulkan::SignalReference() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.SignalOrdering();
}
void RasterizerVulkan::ReleaseFences() {
- if (!gpu.IsAsync()) {
- return;
- }
fence_manager.WaitPendingFences();
}
@@ -552,13 +548,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA()
}
void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) {
- auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
+ std::span<const u8> memory) {
+ auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
if (!cpu_addr) [[unlikely]] {
- gpu_memory.WriteBlock(address, memory.data(), copy_size);
+ gpu_memory->WriteBlock(address, memory.data(), copy_size);
return;
}
- gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
+ gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
{
std::unique_lock<std::mutex> lock{buffer_cache.mutex};
if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -627,7 +623,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
}
void RasterizerVulkan::UpdateDynamicStates() {
- auto& regs = maxwell3d.regs;
+ auto& regs = maxwell3d->regs;
UpdateViewportsState(regs);
UpdateScissorsState(regs);
UpdateDepthBias(regs);
@@ -651,7 +647,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
}
void RasterizerVulkan::BeginTransformFeedback() {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.tfb_enabled == 0) {
return;
}
@@ -667,7 +663,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
}
void RasterizerVulkan::EndTransformFeedback() {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (regs.tfb_enabled == 0) {
return;
}
@@ -917,7 +913,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
}
void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
- auto& dirty{maxwell3d.dirty.flags};
+ auto& dirty{maxwell3d->dirty.flags};
if (!dirty[Dirty::VertexInput]) {
return;
}
@@ -974,4 +970,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
});
}
+void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
+ CreateChannel(channel);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.CreateChannel(channel);
+ buffer_cache.CreateChannel(channel);
+ }
+ pipeline_cache.CreateChannel(channel);
+ query_cache.CreateChannel(channel);
+ state_tracker.SetupTables(channel);
+}
+
+void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
+ const s32 channel_id = channel.bind_id;
+ BindToChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.BindToChannel(channel_id);
+ buffer_cache.BindToChannel(channel_id);
+ }
+ pipeline_cache.BindToChannel(channel_id);
+ query_cache.BindToChannel(channel_id);
+ state_tracker.ChangeChannel(channel);
+ state_tracker.InvalidateState();
+}
+
+void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
+ EraseChannel(channel_id);
+ {
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ texture_cache.EraseChannel(channel_id);
+ buffer_cache.EraseChannel(channel_id);
+ }
+ pipeline_cache.EraseChannel(channel_id);
+ query_cache.EraseChannel(channel_id);
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0370ea39b..4cde3c983 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -8,6 +8,7 @@
#include <boost/container/static_vector.hpp>
#include "common/common_types.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
@@ -54,13 +55,13 @@ private:
BufferCache& buffer_cache;
};
-class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
+class RasterizerVulkan final : public VideoCore::RasterizerAccelerated,
+ protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
- Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
- ScreenInfo& screen_info_, const Device& device_,
- MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
- Scheduler& scheduler_);
+ Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
+ const Device& device_, MemoryAllocator& memory_allocator_,
+ StateTracker& state_tracker_, Scheduler& scheduler_);
~RasterizerVulkan() override;
void Draw(bool is_indexed, bool is_instanced) override;
@@ -75,10 +76,11 @@ public:
bool MustFlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void OnCPUWrite(VAddr addr, u64 size) override;
- void SyncGuestHost() override;
+ void InvalidateGPUCache() override;
void UnmapMemory(VAddr addr, u64 size) override;
- void ModifyGPUMemory(GPUVAddr addr, u64 size) override;
- void SignalSemaphore(GPUVAddr addr, u32 value) override;
+ void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
+ void SignalFence(std::function<void()>&& func) override;
+ void SyncOperation(std::function<void()>&& func) override;
void SignalSyncPoint(u32 value) override;
void SignalReference() override;
void ReleaseFences() override;
@@ -93,12 +95,18 @@ public:
const Tegra::Engines::Fermi2D::Config& copy_config) override;
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
- std::span<u8> memory) override;
+ std::span<const u8> memory) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) override;
+ void InitializeChannel(Tegra::Control::ChannelState& channel) override;
+
+ void BindChannel(Tegra::Control::ChannelState& channel) override;
+
+ void ReleaseChannel(s32 channel_id) override;
+
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;
@@ -134,9 +142,6 @@ private:
void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
Tegra::GPU& gpu;
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
ScreenInfo& screen_info;
const Device& device;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 9ad096431..f234e1a31 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -7,9 +7,9 @@
#include "common/common_types.h"
#include "core/core.h"
+#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -174,9 +174,8 @@ void SetupDirtyVertexBindings(Tables& tables) {
}
} // Anonymous namespace
-StateTracker::StateTracker(Tegra::GPU& gpu)
- : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
- auto& tables{gpu.Maxwell3D().dirty.tables};
+void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
+ auto& tables{channel_state.maxwell_3d->dirty.tables};
SetupDirtyFlags(tables);
SetupDirtyViewports(tables);
SetupDirtyScissors(tables);
@@ -199,4 +198,15 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyVertexBindings(tables);
}
+void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
+ flags = &channel_state.maxwell_3d->dirty.flags;
+}
+
+void StateTracker::InvalidateState() {
+ flags->set();
+}
+
+StateTracker::StateTracker()
+ : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index a85bc1c10..2296dea60 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -10,6 +10,12 @@
#include "video_core/dirty_flags.h"
#include "video_core/engines/maxwell_3d.h"
+namespace Tegra {
+namespace Control {
+struct ChannelState;
+}
+} // namespace Tegra
+
namespace Vulkan {
namespace Dirty {
@@ -53,19 +59,19 @@ class StateTracker {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
public:
- explicit StateTracker(Tegra::GPU& gpu);
+ explicit StateTracker();
void InvalidateCommandBufferState() {
- flags |= invalidation_flags;
+ (*flags) |= invalidation_flags;
current_topology = INVALID_TOPOLOGY;
}
void InvalidateViewports() {
- flags[Dirty::Viewports] = true;
+ (*flags)[Dirty::Viewports] = true;
}
void InvalidateScissors() {
- flags[Dirty::Scissors] = true;
+ (*flags)[Dirty::Scissors] = true;
}
bool TouchViewports() {
@@ -139,16 +145,23 @@ public:
return has_changed;
}
+ void SetupTables(Tegra::Control::ChannelState& channel_state);
+
+ void ChangeChannel(Tegra::Control::ChannelState& channel_state);
+
+ void InvalidateState();
+
private:
static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u);
bool Exchange(std::size_t id, bool new_value) const noexcept {
- const bool is_dirty = flags[id];
- flags[id] = new_value;
+ const bool is_dirty = (*flags)[id];
+ (*flags)[id] = new_value;
return is_dirty;
}
- Tegra::Engines::Maxwell3D::DirtyState::Flags& flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags;
Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags;
Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY;
};
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index a69ae7725..706d9ba74 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
// Mailbox (triple buffering) doesn't lock the application like fifo (vsync),
// prefer it if vsync option is not selected
const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
- if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) {
+ if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless &&
+ found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) {
return VK_PRESENT_MODE_MAILBOX_KHR;
}
if (!Settings::values.use_speed_limit.GetValue()) {
@@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3
present_mode = ChooseSwapPresentMode(present_modes);
u32 requested_image_count{capabilities.minImageCount + 1};
- if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
- requested_image_count = capabilities.maxImageCount;
+ // Ensure Tripple buffering if possible.
+ if (capabilities.maxImageCount > 0) {
+ if (requested_image_count > capabilities.maxImageCount) {
+ requested_image_count = capabilities.maxImageCount;
+ } else {
+ requested_image_count =
+ std::max(requested_image_count, std::min(3U, capabilities.maxImageCount));
+ }
+ } else {
+ requested_image_count = std::max(requested_image_count, 3U);
}
VkSwapchainCreateInfoKHR swapchain_ci{
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index caca79d79..305ad8aee 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4
case PixelFormat::A5B5G5R1_UNORM:
std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial);
break;
- case PixelFormat::R4G4_UNORM:
+ case PixelFormat::G4R4_UNORM:
std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed);
break;
default:
@@ -1474,13 +1474,14 @@ bool Image::BlitScaleHelper(bool scale_up) {
};
const VkExtent2D extent{
.width = std::max(scaled_width, info.size.width),
- .height = std::max(scaled_height, info.size.width),
+ .height = std::max(scaled_height, info.size.height),
};
auto* view_ptr = blit_view.get();
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
if (!blit_framebuffer) {
- blit_framebuffer = std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent);
+ blit_framebuffer =
+ std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up);
}
const auto color_view = blit_view->Handle(Shader::TextureType::Color2D);
@@ -1488,7 +1489,8 @@ bool Image::BlitScaleHelper(bool scale_up) {
src_region, operation, BLIT_OPERATION);
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (!blit_framebuffer) {
- blit_framebuffer = std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent);
+ blit_framebuffer =
+ std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up);
}
runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(),
blit_view->StencilView(), dst_region,
@@ -1756,34 +1758,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
.width = key.size.width,
.height = key.size.height,
}} {
- CreateFramebuffer(runtime, color_buffers, depth_buffer);
+ CreateFramebuffer(runtime, color_buffers, depth_buffer, key.is_rescaled);
if (runtime.device.HasDebuggingToolAttached()) {
framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str());
}
}
Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
- ImageView* depth_buffer, VkExtent2D extent)
+ ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled)
: render_area{extent} {
std::array<ImageView*, NUM_RT> color_buffers{color_buffer};
- CreateFramebuffer(runtime, color_buffers, depth_buffer);
+ CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled);
}
Framebuffer::~Framebuffer() = default;
void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
std::span<ImageView*, NUM_RT> color_buffers,
- ImageView* depth_buffer) {
+ ImageView* depth_buffer, bool is_rescaled) {
std::vector<VkImageView> attachments;
RenderPassKey renderpass_key{};
s32 num_layers = 1;
+ const auto& resolution = runtime.resolution;
+
+ u32 width = 0;
+ u32 height = 0;
for (size_t index = 0; index < NUM_RT; ++index) {
const ImageView* const color_buffer = color_buffers[index];
if (!color_buffer) {
renderpass_key.color_formats[index] = PixelFormat::Invalid;
continue;
}
+ width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width)
+ : color_buffer->size.width);
+ height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height)
+ : color_buffer->size.height);
attachments.push_back(color_buffer->RenderTarget());
renderpass_key.color_formats[index] = color_buffer->format;
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@@ -1794,6 +1804,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
}
const size_t num_colors = attachments.size();
if (depth_buffer) {
+ width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width)
+ : depth_buffer->size.width);
+ height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height)
+ : depth_buffer->size.height);
attachments.push_back(depth_buffer->RenderTarget());
renderpass_key.depth_format = depth_buffer->format;
num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@@ -1810,6 +1824,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
renderpass_key.samples = samples;
renderpass = runtime.render_pass_cache.Get(renderpass_key);
+ render_area.width = std::min(render_area.width, width);
+ render_area.height = std::min(render_area.height, height);
num_color_buffers = static_cast<u32>(num_colors);
framebuffer = runtime.device.GetLogical().CreateFramebuffer({
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 69f06ee7b..0b7ac0df1 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -268,7 +268,7 @@ public:
ImageView* depth_buffer, const VideoCommon::RenderTargets& key);
explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
- ImageView* depth_buffer, VkExtent2D extent);
+ ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled);
~Framebuffer();
@@ -279,7 +279,8 @@ public:
Framebuffer& operator=(Framebuffer&&) = default;
void CreateFramebuffer(TextureCacheRuntime& runtime,
- std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer);
+ std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer,
+ bool is_rescaled = false);
[[nodiscard]] VkFramebuffer Handle() const noexcept {
return *framebuffer;
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index 164e4ee0e..f53066579 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -8,6 +8,7 @@
#include "common/assert.h"
#include "shader_recompiler/frontend/maxwell/control_flow.h"
#include "shader_recompiler/object_pool.h"
+#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
@@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() {
RemovePendingShaders();
}
-ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_)
- : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_},
- rasterizer{rasterizer_} {}
+ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {}
bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
- auto& dirty{maxwell3d.dirty.flags};
+ auto& dirty{maxwell3d->dirty.flags};
if (!dirty[VideoCommon::Dirty::Shaders]) {
return last_shaders_valid;
}
dirty[VideoCommon::Dirty::Shaders] = false;
- const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()};
+ const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()};
for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) {
- if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
+ if (!maxwell3d->regs.IsShaderConfigEnabled(index)) {
unique_hashes[index] = 0;
continue;
}
- const auto& shader_config{maxwell3d.regs.shader_config[index]};
+ const auto& shader_config{maxwell3d->regs.shader_config[index]};
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
const GPUVAddr shader_addr{base_addr + shader_config.offset};
- const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)};
+ const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
if (!cpu_shader_addr) {
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
last_shaders_valid = false;
@@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
if (!shader_info) {
const u32 start_address{shader_config.offset};
- GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address};
+ GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address};
shader_info = MakeShaderInfo(env, *cpu_shader_addr);
}
shader_infos[index] = shader_info;
@@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
}
const ShaderInfo* ShaderCache::ComputeShader() {
- const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
- const auto& qmd{kepler_compute.launch_description};
+ const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
+ const auto& qmd{kepler_compute->launch_description};
const GPUVAddr shader_addr{program_base + qmd.program_start};
- const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)};
+ const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
if (!cpu_shader_addr) {
LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
return nullptr;
@@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() {
if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) {
return shader;
}
- ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
return MakeShaderInfo(env, *cpu_shader_addr);
}
void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
size_t env_index{};
- const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()};
+ const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()};
for (size_t index = 0; index < NUM_PROGRAMS; ++index) {
if (unique_hashes[index] == 0) {
continue;
}
const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
auto& env{result.envs[index]};
- const u32 start_address{maxwell3d.regs.shader_config[index].offset};
- env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address};
+ const u32 start_address{maxwell3d->regs.shader_config[index].offset};
+ env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address};
env.SetCachedSize(shader_infos[index]->size_bytes);
result.env_ptrs[env_index++] = &env;
}
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index f67cea8c4..a4391202d 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -12,6 +12,7 @@
#include <vector>
#include "common/common_types.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/shader_environment.h"
@@ -19,6 +20,10 @@ namespace Tegra {
class MemoryManager;
}
+namespace Tegra::Control {
+struct ChannelState;
+}
+
namespace VideoCommon {
class GenericEnvironment;
@@ -28,7 +33,7 @@ struct ShaderInfo {
size_t size_bytes{};
};
-class ShaderCache {
+class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
static constexpr u64 YUZU_PAGEBITS = 14;
static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
@@ -71,9 +76,7 @@ protected:
}
};
- explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_,
- Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_);
+ explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_);
/// @brief Update the hashes and information of shader stages
/// @param unique_hashes Shader hashes to store into when a stage is enabled
@@ -88,10 +91,6 @@ protected:
void GetGraphicsEnvironments(GraphicsEnvironments& result,
const std::array<u64, NUM_PROGRAMS>& unique_hashes);
- Tegra::MemoryManager& gpu_memory;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
-
std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{};
bool last_shaders_valid = false;
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index 5fd82357c..57ca7f597 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -82,7 +82,7 @@ enum class PixelFormat {
BC3_SRGB,
BC7_SRGB,
A4B4G4R4_UNORM,
- R4G4_UNORM,
+ G4R4_UNORM,
ASTC_2D_4X4_SRGB,
ASTC_2D_8X8_SRGB,
ASTC_2D_8X5_SRGB,
@@ -218,7 +218,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{
4, // BC3_SRGB
4, // BC7_SRGB
1, // A4B4G4R4_UNORM
- 1, // R4G4_UNORM
+ 1, // G4R4_UNORM
4, // ASTC_2D_4X4_SRGB
8, // ASTC_2D_8X8_SRGB
8, // ASTC_2D_8X5_SRGB
@@ -323,7 +323,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{
4, // BC3_SRGB
4, // BC7_SRGB
1, // A4B4G4R4_UNORM
- 1, // R4G4_UNORM
+ 1, // G4R4_UNORM
4, // ASTC_2D_4X4_SRGB
8, // ASTC_2D_8X8_SRGB
5, // ASTC_2D_8X5_SRGB
@@ -428,7 +428,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{
128, // BC3_SRGB
128, // BC7_UNORM
16, // A4B4G4R4_UNORM
- 8, // R4G4_UNORM
+ 8, // G4R4_UNORM
128, // ASTC_2D_4X4_SRGB
128, // ASTC_2D_8X8_SRGB
128, // ASTC_2D_8X5_SRGB
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index c71694d2a..ad935d386 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
case Hash(TextureFormat::A4B4G4R4, UNORM):
return PixelFormat::A4B4G4R4_UNORM;
case Hash(TextureFormat::G4R4, UNORM):
- return PixelFormat::R4G4_UNORM;
+ return PixelFormat::G4R4_UNORM;
case Hash(TextureFormat::A5B5G5R1, UNORM):
return PixelFormat::A5B5G5R1_UNORM;
case Hash(TextureFormat::R8, UNORM):
diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h
index 6881e4c90..acc854715 100644
--- a/src/video_core/texture_cache/formatter.h
+++ b/src/video_core/texture_cache/formatter.h
@@ -153,8 +153,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str
return "BC7_SRGB";
case PixelFormat::A4B4G4R4_UNORM:
return "A4B4G4R4_UNORM";
- case PixelFormat::R4G4_UNORM:
- return "R4G4_UNORM";
+ case PixelFormat::G4R4_UNORM:
+ return "G4R4_UNORM";
case PixelFormat::ASTC_2D_4X4_SRGB:
return "ASTC_2D_4X4_SRGB";
case PixelFormat::ASTC_2D_8X8_SRGB:
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp
index f61e09ac7..91512022f 100644
--- a/src/video_core/texture_cache/image_base.cpp
+++ b/src/video_core/texture_cache/image_base.cpp
@@ -7,6 +7,7 @@
#include <vector>
#include "common/common_types.h"
+#include "common/div_ceil.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/formatter.h"
#include "video_core/texture_cache/image_base.h"
@@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
};
const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1;
const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1;
- if (is_lhs_compressed && is_rhs_compressed) {
- LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented");
- return;
- }
const s32 lhs_mips = lhs.info.resources.levels;
const s32 rhs_mips = rhs.info.resources.levels;
const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips);
@@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level);
Extent3D rhs_size = MipSize(rhs.info.size, mip_level);
if (is_lhs_compressed) {
- lhs_size.width /= lhs_block.width;
- lhs_size.height /= lhs_block.height;
+ lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width);
+ lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height);
}
if (is_rhs_compressed) {
- rhs_size.width /= rhs_block.width;
- rhs_size.height /= rhs_block.height;
+ rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width);
+ rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height);
}
const Extent3D copy_size{
.width = std::min(lhs_size.width, rhs_size.width),
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index 1f85ec9da..620565684 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -88,6 +88,9 @@ struct ImageBase {
u32 scale_rating = 0;
u64 scale_tick = 0;
bool has_scaled = false;
+
+ size_t channel = 0;
+
ImageFlagBits flags = ImageFlagBits::CpuModified;
GPUVAddr gpu_addr = 0;
diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h
index da8ffe9ec..1efbd6507 100644
--- a/src/video_core/texture_cache/render_targets.h
+++ b/src/video_core/texture_cache/render_targets.h
@@ -26,6 +26,7 @@ struct RenderTargets {
ImageViewId depth_buffer_id{};
std::array<u8, NUM_RT> draw_buffers{};
Extent2D size{};
+ bool is_rescaled{};
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp
new file mode 100644
index 000000000..8a9a32f44
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.cpp
@@ -0,0 +1,15 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "video_core/control/channel_state_cache.inc"
+#include "video_core/texture_cache/texture_cache_base.h"
+
+namespace VideoCommon {
+
+TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept
+ : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory},
+ compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {}
+
+template class VideoCommon::ChannelSetupCaches<VideoCommon::TextureCacheChannelInfo>;
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 1dbe01bc0..eaf4a1c95 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1,5 +1,5 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
@@ -7,6 +7,7 @@
#include "common/alignment.h"
#include "common/settings.h"
+#include "video_core/control/channel_state.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/texture_cache/image_view_base.h"
@@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType;
using namespace Common::Literals;
template <class P>
-TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_)
- : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
- kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} {
+TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_)
+ : runtime{runtime_}, rasterizer{rasterizer_} {
// Configure null sampler
TSCEntry sampler_descriptor{};
sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear);
@@ -93,7 +90,7 @@ void TextureCache<P>::RunGarbageCollector() {
const auto copies = FullDownloadCopies(image.info);
image.DownloadMemory(map, copies);
runtime.Finish();
- SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
}
if (True(image.flags & ImageFlagBits::Tracked)) {
UntrackImage(image, image_id);
@@ -152,22 +149,24 @@ void TextureCache<P>::MarkModification(ImageId id) noexcept {
template <class P>
template <bool has_blacklists>
void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) {
- FillImageViews<has_blacklists>(graphics_image_table, graphics_image_view_ids, views);
+ FillImageViews<has_blacklists>(channel_state->graphics_image_table,
+ channel_state->graphics_image_view_ids, views);
}
template <class P>
void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) {
- FillImageViews<true>(compute_image_table, compute_image_view_ids, views);
+ FillImageViews<true>(channel_state->compute_image_table, channel_state->compute_image_view_ids,
+ views);
}
template <class P>
typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
- if (index > graphics_sampler_table.Limit()) {
+ if (index > channel_state->graphics_sampler_table.Limit()) {
LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
return &slot_samplers[NULL_SAMPLER_ID];
}
- const auto [descriptor, is_new] = graphics_sampler_table.Read(index);
- SamplerId& id = graphics_sampler_ids[index];
+ const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index);
+ SamplerId& id = channel_state->graphics_sampler_ids[index];
if (is_new) {
id = FindSampler(descriptor);
}
@@ -176,12 +175,12 @@ typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
template <class P>
typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
- if (index > compute_sampler_table.Limit()) {
+ if (index > channel_state->compute_sampler_table.Limit()) {
LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
return &slot_samplers[NULL_SAMPLER_ID];
}
- const auto [descriptor, is_new] = compute_sampler_table.Read(index);
- SamplerId& id = compute_sampler_ids[index];
+ const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index);
+ SamplerId& id = channel_state->compute_sampler_ids[index];
if (is_new) {
id = FindSampler(descriptor);
}
@@ -191,34 +190,36 @@ typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
template <class P>
void TextureCache<P>::SynchronizeGraphicsDescriptors() {
using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex;
- const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex;
- const u32 tic_limit = maxwell3d.regs.tic.limit;
- const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit;
- if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) {
- graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
+ const bool linked_tsc = maxwell3d->regs.sampler_index == SamplerIndex::ViaHeaderIndex;
+ const u32 tic_limit = maxwell3d->regs.tic.limit;
+ const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tsc.limit;
+ if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(),
+ tsc_limit)) {
+ channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
}
- if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) {
- graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
+ if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) {
+ channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
}
}
template <class P>
void TextureCache<P>::SynchronizeComputeDescriptors() {
- const bool linked_tsc = kepler_compute.launch_description.linked_tsc;
- const u32 tic_limit = kepler_compute.regs.tic.limit;
- const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit;
- const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address();
- if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) {
- compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
+ const bool linked_tsc = kepler_compute->launch_description.linked_tsc;
+ const u32 tic_limit = kepler_compute->regs.tic.limit;
+ const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit;
+ const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address();
+ if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) {
+ channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
}
- if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) {
- compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
+ if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(),
+ tic_limit)) {
+ channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
}
}
template <class P>
bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
u32 scale_rating = 0;
bool rescaled = false;
std::array<ImageId, NUM_RT> tmp_color_images{};
@@ -315,7 +316,7 @@ bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
template <class P>
void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
using namespace VideoCommon::Dirty;
- auto& flags = maxwell3d.dirty.flags;
+ auto& flags = maxwell3d->dirty.flags;
if (!flags[Dirty::RenderTargets]) {
for (size_t index = 0; index < NUM_RT; ++index) {
ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index];
@@ -342,7 +343,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id));
for (size_t index = 0; index < NUM_RT; ++index) {
- render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index));
+ render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d->regs.rt_control.Map(index));
}
u32 up_scale = 1;
u32 down_shift = 0;
@@ -351,9 +352,10 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
down_shift = Settings::values.resolution_info.down_shift;
}
render_targets.size = Extent2D{
- (maxwell3d.regs.render_area.width * up_scale) >> down_shift,
- (maxwell3d.regs.render_area.height * up_scale) >> down_shift,
+ (maxwell3d->regs.render_area.width * up_scale) >> down_shift,
+ (maxwell3d->regs.render_area.height * up_scale) >> down_shift,
};
+ render_targets.is_rescaled = is_rescaling;
flags[Dirty::DepthBiasGlobal] = true;
}
@@ -458,7 +460,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
const auto copies = FullDownloadCopies(image.info);
image.DownloadMemory(map, copies);
runtime.Finish();
- SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
}
}
@@ -477,12 +479,20 @@ void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
}
template <class P>
-void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
+void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) {
std::vector<ImageId> deleted_images;
- ForEachImageInRegionGPU(gpu_addr, size,
+ ForEachImageInRegionGPU(as_id, gpu_addr, size,
[&](ImageId id, Image&) { deleted_images.push_back(id); });
for (const ImageId id : deleted_images) {
Image& image = slot_images[id];
+ if (True(image.flags & ImageFlagBits::CpuModified)) {
+ return;
+ }
+ image.flags |= ImageFlagBits::CpuModified;
+ if (True(image.flags & ImageFlagBits::Tracked)) {
+ UntrackImage(image, id);
+ }
+ /*
if (True(image.flags & ImageFlagBits::Remapped)) {
continue;
}
@@ -490,6 +500,7 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
if (True(image.flags & ImageFlagBits::Tracked)) {
UntrackImage(image, id);
}
+ */
}
}
@@ -655,7 +666,7 @@ void TextureCache<P>::PopAsyncFlushes() {
for (const ImageId image_id : download_ids) {
const ImageBase& image = slot_images[image_id];
const auto copies = FullDownloadCopies(image.info);
- SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span);
download_map.offset += image.unswizzled_size_bytes;
download_span = download_span.subspan(image.unswizzled_size_bytes);
}
@@ -714,26 +725,26 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
const GPUVAddr gpu_addr = image.gpu_addr;
if (True(image.flags & ImageFlagBits::AcceleratedUpload)) {
- gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
+ gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
const auto uploads = FullUploadSwizzles(image.info);
runtime.AccelerateImageUpload(image, staging, uploads);
} else if (True(image.flags & ImageFlagBits::Converted)) {
std::vector<u8> unswizzled_data(image.unswizzled_size_bytes);
- auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data);
+ auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data);
ConvertImage(unswizzled_data, image.info, mapped_span, copies);
image.UploadMemory(staging, copies);
} else {
- const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span);
+ const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span);
image.UploadMemory(staging, copies);
}
}
template <class P>
ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) {
- if (!IsValidEntry(gpu_memory, config)) {
+ if (!IsValidEntry(*gpu_memory, config)) {
return NULL_IMAGE_VIEW_ID;
}
- const auto [pair, is_new] = image_views.try_emplace(config);
+ const auto [pair, is_new] = channel_state->image_views.try_emplace(config);
ImageViewId& image_view_id = pair->second;
if (is_new) {
image_view_id = CreateImageView(config);
@@ -777,9 +788,9 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a
template <class P>
ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
RelaxedOptions options) {
- std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr) {
- cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
+ cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
if (!cpu_addr) {
return ImageId{};
}
@@ -860,7 +871,7 @@ void TextureCache<P>::InvalidateScale(Image& image) {
image.scale_tick = frame_tick + 1;
}
const std::span<const ImageViewId> image_view_ids = image.image_view_ids;
- auto& dirty = maxwell3d.dirty.flags;
+ auto& dirty = maxwell3d->dirty.flags;
dirty[Dirty::RenderTargets] = true;
dirty[Dirty::ZetaBuffer] = true;
for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -880,12 +891,15 @@ void TextureCache<P>::InvalidateScale(Image& image) {
}
image.image_view_ids.clear();
image.image_view_infos.clear();
- if constexpr (ENABLE_VALIDATION) {
- std::ranges::fill(graphics_image_view_ids, CORRUPT_ID);
- std::ranges::fill(compute_image_view_ids, CORRUPT_ID);
+ for (size_t c : active_channel_ids) {
+ auto& channel_info = channel_storage[c];
+ if constexpr (ENABLE_VALIDATION) {
+ std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
+ std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
+ }
+ channel_info.graphics_image_table.Invalidate();
+ channel_info.compute_image_table.Invalidate();
}
- graphics_image_table.Invalidate();
- compute_image_table.Invalidate();
has_deleted_images = true;
}
@@ -929,10 +943,10 @@ bool TextureCache<P>::ScaleDown(Image& image) {
template <class P>
ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr,
RelaxedOptions options) {
- std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr) {
const auto size = CalculateGuestSizeInBytes(info);
- cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size);
+ cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size);
if (!cpu_addr) {
const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space;
virtual_invalid_space += Common::AlignUp(size, 32);
@@ -1050,7 +1064,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
Image& new_image = slot_images[new_image_id];
- if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
+ if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
new_image.flags |= ImageFlagBits::Sparse;
}
@@ -1192,7 +1206,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) {
return NULL_SAMPLER_ID;
}
- const auto [pair, is_new] = samplers.try_emplace(config);
+ const auto [pair, is_new] = channel_state->samplers.try_emplace(config);
if (is_new) {
pair->second = slot_samplers.insert(runtime, config);
}
@@ -1201,7 +1215,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
template <class P>
ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (index >= regs.rt_control.count) {
return ImageViewId{};
}
@@ -1219,7 +1233,7 @@ ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
template <class P>
ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) {
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
if (!regs.zeta_enable) {
return ImageViewId{};
}
@@ -1316,11 +1330,17 @@ void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f
template <class P>
template <typename Func>
-void TextureCache<P>::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) {
+void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size,
+ Func&& func) {
using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
boost::container::small_vector<ImageId, 8> images;
- ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) {
+ auto storage_id = getStorageID(as_id);
+ if (!storage_id) {
+ return;
+ }
+ auto& gpu_page_table = gpu_page_table_storage[*storage_id];
+ ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) {
const auto it = gpu_page_table.find(page);
if (it == gpu_page_table.end()) {
if constexpr (BOOL_BREAK) {
@@ -1403,9 +1423,9 @@ template <typename Func>
void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) {
using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type;
static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>;
- const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
+ const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
for (const auto& [gpu_addr, size] : segments) {
- std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
+ std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
if constexpr (RETURNS_BOOL) {
if (func(gpu_addr, *cpu_addr, size)) {
@@ -1448,8 +1468,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
}
image.lru_index = lru_cache.Insert(image_id, frame_tick);
- ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
- [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); });
+ ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
+ (*channel_state->gpu_page_table)[page].push_back(image_id);
+ });
if (False(image.flags & ImageFlagBits::Sparse)) {
auto map_id =
slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id);
@@ -1480,9 +1501,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
image.flags &= ~ImageFlagBits::BadOverlap;
lru_cache.Free(image.lru_index);
const auto& clear_page_table =
- [this, image_id](
- u64 page,
- std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
+ [this, image_id](u64 page,
+ std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>&
+ selected_page_table) {
const auto page_it = selected_page_table.find(page);
if (page_it == selected_page_table.end()) {
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
@@ -1497,8 +1518,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
}
image_ids.erase(vector_it);
};
- ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
- [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); });
+ ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) {
+ clear_page_table(page, (*channel_state->gpu_page_table));
+ });
if (False(image.flags & ImageFlagBits::Sparse)) {
const auto map_id = image.map_view_id;
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
@@ -1631,7 +1653,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered");
// Mark render targets as dirty
- auto& dirty = maxwell3d.dirty.flags;
+ auto& dirty = maxwell3d->dirty.flags;
dirty[Dirty::RenderTargets] = true;
dirty[Dirty::ZetaBuffer] = true;
for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -1681,24 +1703,30 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
if (alloc_images.empty()) {
image_allocs_table.erase(alloc_it);
}
- if constexpr (ENABLE_VALIDATION) {
- std::ranges::fill(graphics_image_view_ids, CORRUPT_ID);
- std::ranges::fill(compute_image_view_ids, CORRUPT_ID);
+ for (size_t c : active_channel_ids) {
+ auto& channel_info = channel_storage[c];
+ if constexpr (ENABLE_VALIDATION) {
+ std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
+ std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
+ }
+ channel_info.graphics_image_table.Invalidate();
+ channel_info.compute_image_table.Invalidate();
}
- graphics_image_table.Invalidate();
- compute_image_table.Invalidate();
has_deleted_images = true;
}
template <class P>
void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) {
- auto it = image_views.begin();
- while (it != image_views.end()) {
- const auto found = std::ranges::find(removed_views, it->second);
- if (found != removed_views.end()) {
- it = image_views.erase(it);
- } else {
- ++it;
+ for (size_t c : active_channel_ids) {
+ auto& channel_info = channel_storage[c];
+ auto it = channel_info.image_views.begin();
+ while (it != channel_info.image_views.end()) {
+ const auto found = std::ranges::find(removed_views, it->second);
+ if (found != removed_views.end()) {
+ it = channel_info.image_views.erase(it);
+ } else {
+ ++it;
+ }
}
}
}
@@ -1729,6 +1757,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
boost::container::small_vector<const AliasedImage*, 1> aliased_images;
Image& image = slot_images[image_id];
bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled);
+ bool any_modified = True(image.flags & ImageFlagBits::GpuModified);
u64 most_recent_tick = image.modification_tick;
for (const AliasedImage& aliased : image.aliased_images) {
ImageBase& aliased_image = slot_images[aliased.id];
@@ -1736,9 +1765,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
aliased_images.push_back(&aliased);
any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
- if (True(aliased_image.flags & ImageFlagBits::GpuModified)) {
- image.flags |= ImageFlagBits::GpuModified;
- }
+ any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified);
}
}
if (aliased_images.empty()) {
@@ -1753,6 +1780,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
}
}
image.modification_tick = most_recent_tick;
+ if (any_modified) {
+ image.flags |= ImageFlagBits::GpuModified;
+ }
std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) {
const ImageBase& lhs_image = slot_images[lhs->id];
const ImageBase& rhs_image = slot_images[rhs->id];
@@ -1931,6 +1961,7 @@ std::pair<FramebufferId, ImageViewId> TextureCache<P>::RenderTargetFromImage(
.color_buffer_ids = {color_view_id},
.depth_buffer_id = depth_view_id,
.size = {extent.width >> samples_x, extent.height >> samples_y},
+ .is_rescaled = is_rescaled,
});
return {framebuffer_id, view_id};
}
@@ -1943,7 +1974,7 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
const ImageViewBase& image_view = slot_image_views[id];
const ImageBase& image = slot_images[image_view.image_id];
const Extent3D size = image_view.size;
- const auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d->regs;
const auto& scissor = regs.scissor_test[0];
if (image.info.resources.levels > 1 || image.info.resources.layers > 1) {
// Images with multiple resources can't be cleared in a single call
@@ -1958,4 +1989,19 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
scissor.max_y >= size.height;
}
+template <class P>
+void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
+ VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo>::CreateChannel(channel);
+ const auto it = channel_map.find(channel.bind_id);
+ auto* this_state = &channel_storage[it->second];
+ const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()];
+ this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id];
+}
+
+/// Bind a channel for execution.
+template <class P>
+void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) {
+ gpu_page_table_storage.emplace_back();
+}
+
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 7e6c6cef2..2fa8445eb 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -1,8 +1,10 @@
-// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
+#include <deque>
+#include <limits>
#include <mutex>
#include <span>
#include <type_traits>
@@ -11,9 +13,11 @@
#include <queue>
#include "common/common_types.h"
+#include "common/hash.h"
#include "common/literals.h"
#include "common/lru_cache.h"
#include "video_core/compatible_formats.h"
+#include "video_core/control/channel_state_cache.h"
#include "video_core/delayed_destruction_ring.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/surface.h"
@@ -26,6 +30,10 @@
#include "video_core/texture_cache/types.h"
#include "video_core/textures/texture.h"
+namespace Tegra::Control {
+struct ChannelState;
+}
+
namespace VideoCommon {
using Tegra::Texture::SwizzleSource;
@@ -44,8 +52,35 @@ struct ImageViewInOut {
ImageViewId id{};
};
+using TextureCacheGPUMap = std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>;
+
+class TextureCacheChannelInfo : public ChannelInfo {
+public:
+ TextureCacheChannelInfo() = delete;
+ TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept;
+ TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete;
+ TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete;
+ TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default;
+ TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default;
+
+ DescriptorTable<TICEntry> graphics_image_table{gpu_memory};
+ DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory};
+ std::vector<SamplerId> graphics_sampler_ids;
+ std::vector<ImageViewId> graphics_image_view_ids;
+
+ DescriptorTable<TICEntry> compute_image_table{gpu_memory};
+ DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
+ std::vector<SamplerId> compute_sampler_ids;
+ std::vector<ImageViewId> compute_image_view_ids;
+
+ std::unordered_map<TICEntry, ImageViewId> image_views;
+ std::unordered_map<TSCEntry, SamplerId> samplers;
+
+ TextureCacheGPUMap* gpu_page_table;
+};
+
template <class P>
-class TextureCache {
+class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> {
/// Address shift for caching images into a hash table
static constexpr u64 YUZU_PAGEBITS = 20;
@@ -58,6 +93,8 @@ class TextureCache {
/// True when the API can provide info about the memory of the device.
static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
+ static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
+
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;
static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB;
@@ -77,16 +114,8 @@ class TextureCache {
PixelFormat src_format;
};
- template <typename T>
- struct IdentityHash {
- [[nodiscard]] size_t operator()(T value) const noexcept {
- return static_cast<size_t>(value);
- }
- };
-
public:
- explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&,
- Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&);
+ explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&);
/// Notify the cache that a new frame has been queued
void TickFrame();
@@ -142,7 +171,7 @@ public:
void UnmapMemory(VAddr cpu_addr, size_t size);
/// Remove images in a region
- void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size);
+ void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size);
/// Blit an image with the given parameters
void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
@@ -171,6 +200,9 @@ public:
[[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept;
+ /// Create channel state.
+ void CreateChannel(Tegra::Control::ChannelState& channel) final override;
+
std::mutex mutex;
private:
@@ -205,6 +237,8 @@ private:
}
}
+ void OnGPUASRegister(size_t map_id) final override;
+
/// Runs the Garbage Collector.
void RunGarbageCollector();
@@ -273,7 +307,7 @@ private:
void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func);
template <typename Func>
- void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func);
+ void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
template <typename Func>
void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func);
@@ -338,31 +372,16 @@ private:
u64 GetScaledImageSizeBytes(ImageBase& image);
Runtime& runtime;
- VideoCore::RasterizerInterface& rasterizer;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
- DescriptorTable<TICEntry> graphics_image_table{gpu_memory};
- DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory};
- std::vector<SamplerId> graphics_sampler_ids;
- std::vector<ImageViewId> graphics_image_view_ids;
-
- DescriptorTable<TICEntry> compute_image_table{gpu_memory};
- DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
- std::vector<SamplerId> compute_sampler_ids;
- std::vector<ImageViewId> compute_image_view_ids;
+ VideoCore::RasterizerInterface& rasterizer;
+ std::deque<TextureCacheGPUMap> gpu_page_table_storage;
RenderTargets render_targets;
- std::unordered_map<TICEntry, ImageViewId> image_views;
- std::unordered_map<TSCEntry, SamplerId> samplers;
std::unordered_map<RenderTargets, FramebufferId> framebuffers;
- std::unordered_map<u64, std::vector<ImageMapId>, IdentityHash<u64>> page_table;
- std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> gpu_page_table;
- std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> sparse_page_table;
-
+ std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table;
+ std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table;
std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views;
VAddr virtual_invalid_space{};
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 1820823b2..1223df5a0 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block;
UNIMPLEMENTED_IF(info.tile_width_spacing > 0);
-
UNIMPLEMENTED_IF(copy.image_offset.x != 0);
UNIMPLEMENTED_IF(copy.image_offset.y != 0);
UNIMPLEMENTED_IF(copy.image_offset.z != 0);
@@ -755,7 +754,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config
if (address == 0) {
return false;
}
- if (address > (1ULL << 48)) {
+ if (address >= (1ULL << 40)) {
return false;
}
if (gpu_memory.GpuToCpuAddress(address).has_value()) {
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 913f8ebcb..52d067a2d 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -35,7 +35,7 @@ void incrpdep(u32& value) {
template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth,
- u32 block_height, u32 block_depth, u32 stride_alignment) {
+ u32 block_height, u32 block_depth, u32 stride) {
// The origin of the transformation can be configured here, leave it as zero as the current API
// doesn't expose it.
static constexpr u32 origin_x = 0;
@@ -45,7 +45,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
// We can configure here a custom pitch
// As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
const u32 pitch = width * BYTES_PER_PIXEL;
- const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL;
const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
@@ -89,6 +88,69 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
}
}
+template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
+void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height,
+ u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines,
+ u32 block_height, u32 block_depth, u32 pitch_linear) {
+ // The origin of the transformation can be configured here, leave it as zero as the current API
+ // doesn't expose it.
+ static constexpr u32 origin_z = 0;
+
+ // We can configure here a custom pitch
+ // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
+ const u32 pitch = pitch_linear;
+ const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT);
+
+ const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
+ const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
+ const u32 slice_size =
+ Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size;
+
+ const u32 block_height_mask = (1U << block_height) - 1;
+ const u32 block_depth_mask = (1U << block_depth) - 1;
+ const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth;
+
+ u32 unprocessed_lines = num_lines;
+ u32 extent_y = std::min(num_lines, height - origin_y);
+
+ for (u32 slice = 0; slice < depth; ++slice) {
+ const u32 z = slice + origin_z;
+ const u32 offset_z = (z >> block_depth) * slice_size +
+ ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height));
+ const u32 lines_in_y = std::min(unprocessed_lines, extent_y);
+ for (u32 line = 0; line < lines_in_y; ++line) {
+ const u32 y = line + origin_y;
+ const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y);
+
+ const u32 block_y = y >> GOB_SIZE_Y_SHIFT;
+ const u32 offset_y = (block_y >> block_height) * block_size +
+ ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
+
+ u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
+ for (u32 column = 0; column < extent_x;
+ ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
+ const u32 x = (column + origin_x) * BYTES_PER_PIXEL;
+ const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift;
+
+ const u32 base_swizzled_offset = offset_z + offset_y + offset_x;
+ const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y);
+
+ const u32 unswizzled_offset =
+ slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL;
+
+ u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset];
+ const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset];
+
+ std::memcpy(dst, src, BYTES_PER_PIXEL);
+ }
+ }
+ unprocessed_lines -= lines_in_y;
+ if (unprocessed_lines == 0) {
+ return;
+ }
+ }
+}
+
template <bool TO_LINEAR>
void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) {
@@ -111,122 +173,39 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
}
}
-template <u32 BYTES_PER_PIXEL>
-void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit,
- u32 offset_x, u32 offset_y) {
- const u32 block_height = 1U << block_height_bit;
- const u32 image_width_in_gobs =
- (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X;
- for (u32 line = 0; line < subrect_height; ++line) {
- const u32 dst_y = line + offset_y;
- const u32 gob_address_y =
- (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
- ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
-
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(dst_y);
- u32 swizzled_x = pdep<SWIZZLE_X_BITS>(offset_x * BYTES_PER_PIXEL);
- for (u32 x = 0; x < subrect_width;
- ++x, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
- const u32 dst_x = x + offset_x;
- const u32 gob_address =
- gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height;
- const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y);
- const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL;
-
- const u8* const source_line = unswizzled_data + unswizzled_offset;
- u8* const dest_addr = swizzled_data + swizzled_offset;
- std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL);
- }
- }
-}
-
-template <u32 BYTES_PER_PIXEL>
-void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height,
- u32 origin_x, u32 origin_y, u8* output, const u8* input) {
- const u32 stride = width * BYTES_PER_PIXEL;
- const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
- const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height);
-
- const u32 block_height_mask = (1U << block_height) - 1;
- const u32 x_shift = GOB_SIZE_SHIFT + block_height;
-
- for (u32 line = 0; line < line_count; ++line) {
- const u32 src_y = line + origin_y;
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(src_y);
-
- const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT;
- const u32 src_offset_y = (block_y >> block_height) * block_size +
- ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
-
- u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
- for (u32 column = 0; column < line_length_in;
- ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
- const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL;
- const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift;
-
- const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y);
- const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL;
-
- std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL);
- }
- }
-}
-
-template <u32 BYTES_PER_PIXEL>
-void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
- u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output,
- const u8* input) {
- UNIMPLEMENTED_IF(origin_x > 0);
- UNIMPLEMENTED_IF(origin_y > 0);
-
- const u32 stride = width * BYTES_PER_PIXEL;
- const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
- const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
-
- const u32 block_height_mask = (1U << block_height) - 1;
- const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth;
-
- for (u32 line = 0; line < line_count; ++line) {
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(line);
- const u32 block_y = line / GOB_SIZE_Y;
- const u32 dst_offset_y =
- (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE;
-
- u32 swizzled_x = 0;
- for (u32 x = 0; x < line_length_in; ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
- const u32 dst_offset =
- ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y);
- const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch;
- std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL);
- }
- }
-}
} // Anonymous namespace
void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth,
u32 stride_alignment) {
+ const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
+ const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
+ width = (width * bytes_per_pixel) >> new_bpp;
+ bytes_per_pixel = 1U << new_bpp;
Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
- stride_alignment);
+ stride);
}
void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
u32 height, u32 depth, u32 block_height, u32 block_depth,
u32 stride_alignment) {
+ const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
+ const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
+ width = (width * bytes_per_pixel) >> new_bpp;
+ bytes_per_pixel = 1U << new_bpp;
Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
- stride_alignment);
+ stride);
}
-void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data,
- u32 block_height_bit, u32 offset_x, u32 offset_y) {
+void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
+ u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
+ u32 block_height, u32 block_depth, u32 pitch_linear) {
switch (bytes_per_pixel) {
#define BPP_CASE(x) \
case x: \
- return SwizzleSubrect<x>(subrect_width, subrect_height, source_pitch, swizzled_width, \
- swizzled_data, unswizzled_data, block_height_bit, offset_x, \
- offset_y);
+ return SwizzleSubrectImpl<true, x>(output, input, width, height, depth, origin_x, \
+ origin_y, extent_x, extent_y, block_height, \
+ block_depth, pitch_linear);
BPP_CASE(1)
BPP_CASE(2)
BPP_CASE(3)
@@ -241,13 +220,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
}
}
-void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel,
- u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) {
+void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
+ u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
+ u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) {
switch (bytes_per_pixel) {
#define BPP_CASE(x) \
case x: \
- return UnswizzleSubrect<x>(line_length_in, line_count, pitch, width, block_height, \
- origin_x, origin_y, output, input);
+ return SwizzleSubrectImpl<false, x>(output, input, width, height, depth, origin_x, \
+ origin_y, extent_x, extent_y, block_height, \
+ block_depth, pitch_linear);
BPP_CASE(1)
BPP_CASE(2)
BPP_CASE(3)
@@ -262,55 +243,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width,
}
}
-void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
- u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
- u32 origin_y, u8* output, const u8* input) {
- switch (bytes_per_pixel) {
-#define BPP_CASE(x) \
- case x: \
- return SwizzleSliceToVoxel<x>(line_length_in, line_count, pitch, width, height, \
- block_height, block_depth, origin_x, origin_y, output, \
- input);
- BPP_CASE(1)
- BPP_CASE(2)
- BPP_CASE(3)
- BPP_CASE(4)
- BPP_CASE(6)
- BPP_CASE(8)
- BPP_CASE(12)
- BPP_CASE(16)
-#undef BPP_CASE
- default:
- ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
- }
-}
-
-void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
- const u32 block_height_bit, const std::size_t copy_size, const u8* source_data,
- u8* swizzle_data) {
- const u32 block_height = 1U << block_height_bit;
- const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X};
- std::size_t count = 0;
- for (std::size_t y = dst_y; y < height && count < copy_size; ++y) {
- const std::size_t gob_address_y =
- (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
- ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
- const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(static_cast<u32>(y));
- u32 swizzled_x = pdep<SWIZZLE_X_BITS>(dst_x);
- for (std::size_t x = dst_x; x < width && count < copy_size;
- ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
- const std::size_t gob_address =
- gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height;
- const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y);
- const u8* source_line = source_data + count;
- u8* dest_addr = swizzle_data + swizzled_offset;
- count++;
-
- *dest_addr = *source_line;
- }
- }
-}
-
std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height, u32 block_depth) {
if (tiled) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index 31a11708f..e70407692 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() {
}
return table;
}
-constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable();
/// Unswizzles a block linear texture into linear memory.
void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
@@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
u32 block_height, u32 block_depth);
/// Copies an untiled subrectangle into a tiled surface.
-void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data,
- u32 block_height_bit, u32 offset_x, u32 offset_y);
+void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
+ u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
+ u32 block_height, u32 block_depth, u32 pitch_linear);
/// Copies a tiled subrectangle into a linear surface.
-void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel,
- u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input);
-
-/// @brief Swizzles a 2D array of pixels into a 3D texture
-/// @param line_length_in Number of pixels per line
-/// @param line_count Number of lines
-/// @param pitch Number of bytes per line
-/// @param width Width of the swizzled texture
-/// @param height Height of the swizzled texture
-/// @param bytes_per_pixel Number of bytes used per pixel
-/// @param block_height Block height shift
-/// @param block_depth Block depth shift
-/// @param origin_x Column offset in pixels of the swizzled texture
-/// @param origin_y Row offset in pixels of the swizzled texture
-/// @param output Pointer to the pixels of the swizzled texture
-/// @param input Pointer to the 2D array of pixels used as input
-/// @pre input and output points to an array large enough to hold the number of bytes used
-void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
- u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
- u32 origin_y, u8* output, const u8* input);
-
-void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
- std::size_t copy_size, const u8* source_data, u8* swizzle_data);
+void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
+ u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
+ u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear);
/// Obtains the offset of the gob for positions 'dst_x' & 'dst_y'
u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index 795f16bfb..1b3f493bd 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -519,9 +519,7 @@ public:
dld{rhs.dld} {}
/// Assign an allocation transfering ownership from another allocation.
- /// Releases any previously held allocation.
PoolAllocations& operator=(PoolAllocations&& rhs) noexcept {
- Release();
allocations = std::move(rhs.allocations);
num = rhs.num;
device = rhs.device;
@@ -530,11 +528,6 @@ public:
return *this;
}
- /// Destroys any held allocation.
- ~PoolAllocations() {
- Release();
- }
-
/// Returns the number of allocations.
std::size_t size() const noexcept {
return num;
@@ -557,19 +550,6 @@ public:
}
private:
- /// Destroys the held allocations if they exist.
- void Release() noexcept {
- if (!allocations) {
- return;
- }
- const Span<AllocationType> span(allocations.get(), num);
- const VkResult result = Free(device, pool, span, *dld);
- // There's no way to report errors from a destructor.
- if (result != VK_SUCCESS) {
- std::terminate();
- }
- }
-
std::unique_ptr<AllocationType[]> allocations;
std::size_t num = 0;
VkDevice device = nullptr;
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index c63ce3a30..4146ebc2c 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -3,6 +3,7 @@
#include <cinttypes>
#include <clocale>
+#include <cmath>
#include <memory>
#include <thread>
#ifdef __APPLE__
@@ -3451,9 +3452,10 @@ void GMainWindow::UpdateStatusBar() {
}
if (!Settings::values.use_speed_limit) {
game_fps_label->setText(
- tr("Game: %1 FPS (Unlocked)").arg(results.average_game_fps, 0, 'f', 0));
+ tr("Game: %1 FPS (Unlocked)").arg(std::round(results.average_game_fps), 0, 'f', 0));
} else {
- game_fps_label->setText(tr("Game: %1 FPS").arg(results.average_game_fps, 0, 'f', 0));
+ game_fps_label->setText(
+ tr("Game: %1 FPS").arg(std::round(results.average_game_fps), 0, 'f', 0));
}
emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2));