summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt25
-rw-r--r--src/video_core/buffer_cache/buffer_block.h76
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h447
-rw-r--r--src/video_core/buffer_cache/map_interval.h89
-rw-r--r--src/video_core/dma_pusher.cpp3
-rw-r--r--src/video_core/engines/const_buffer_info.h17
-rw-r--r--src/video_core/engines/engine_upload.cpp6
-rw-r--r--src/video_core/engines/engine_upload.h6
-rw-r--r--src/video_core/engines/fermi_2d.cpp28
-rw-r--r--src/video_core/engines/fermi_2d.h56
-rw-r--r--src/video_core/engines/kepler_compute.cpp62
-rw-r--r--src/video_core/engines/kepler_compute.h23
-rw-r--r--src/video_core/engines/kepler_memory.cpp4
-rw-r--r--src/video_core/engines/kepler_memory.h1
-rw-r--r--src/video_core/engines/maxwell_3d.cpp362
-rw-r--r--src/video_core/engines/maxwell_3d.h157
-rw-r--r--src/video_core/engines/maxwell_dma.cpp54
-rw-r--r--src/video_core/engines/maxwell_dma.h13
-rw-r--r--src/video_core/engines/shader_bytecode.h218
-rw-r--r--src/video_core/gpu.cpp98
-rw-r--r--src/video_core/gpu.h64
-rw-r--r--src/video_core/gpu_asynch.cpp14
-rw-r--r--src/video_core/gpu_asynch.h8
-rw-r--r--src/video_core/gpu_synch.cpp7
-rw-r--r--src/video_core/gpu_synch.h9
-rw-r--r--src/video_core/gpu_thread.cpp35
-rw-r--r--src/video_core/gpu_thread.h35
-rw-r--r--src/video_core/macro_interpreter.cpp26
-rw-r--r--src/video_core/macro_interpreter.h8
-rw-r--r--src/video_core/memory_manager.cpp29
-rw-r--r--src/video_core/memory_manager.h12
-rw-r--r--src/video_core/morton.cpp116
-rw-r--r--src/video_core/morton.h3
-rw-r--r--src/video_core/rasterizer_cache.h3
-rw-r--r--src/video_core/rasterizer_interface.h16
-rw-r--r--src/video_core/renderer_base.h3
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp122
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h77
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp52
-rw-r--r--src/video_core/renderer_opengl/gl_device.h21
-rw-r--r--src/video_core/renderer_opengl/gl_framebuffer_cache.cpp75
-rw-r--r--src/video_core/renderer_opengl/gl_framebuffer_cache.h68
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.cpp101
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.h82
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp725
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h99
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp1362
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h572
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.cpp24
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.h28
-rw-r--r--src/video_core/renderer_opengl/gl_sampler_cache.h4
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp357
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h70
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp1393
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h25
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.cpp63
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.h97
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.cpp45
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.h5
-rw-r--r--src/video_core/renderer_opengl/gl_shader_util.cpp24
-rw-r--r--src/video_core/renderer_opengl/gl_state.cpp134
-rw-r--r--src/video_core/renderer_opengl/gl_state.h62
-rw-r--r--src/video_core/renderer_opengl/gl_stream_buffer.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_stream_buffer.h3
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp624
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h147
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h4
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp117
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.h8
-rw-r--r--src/video_core/renderer_opengl/utils.cpp52
-rw-r--r--src/video_core/renderer_opengl/utils.h43
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp301
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h62
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.h7
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h78
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp237
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h2
-rw-r--r--src/video_core/shader/control_flow.cpp481
-rw-r--r--src/video_core/shader/control_flow.h79
-rw-r--r--src/video_core/shader/decode.cpp179
-rw-r--r--src/video_core/shader/decode/arithmetic.cpp13
-rw-r--r--src/video_core/shader/decode/arithmetic_half_immediate.cpp4
-rw-r--r--src/video_core/shader/decode/conversion.cpp44
-rw-r--r--src/video_core/shader/decode/decode_integer_set.cpp0
-rw-r--r--src/video_core/shader/decode/ffma.cpp10
-rw-r--r--src/video_core/shader/decode/float_set.cpp1
-rw-r--r--src/video_core/shader/decode/float_set_predicate.cpp10
-rw-r--r--src/video_core/shader/decode/half_set_predicate.cpp70
-rw-r--r--src/video_core/shader/decode/hfma2.cpp4
-rw-r--r--src/video_core/shader/decode/image.cpp164
-rw-r--r--src/video_core/shader/decode/integer_set.cpp1
-rw-r--r--src/video_core/shader/decode/integer_set_predicate.cpp1
-rw-r--r--src/video_core/shader/decode/memory.cpp83
-rw-r--r--src/video_core/shader/decode/other.cpp89
-rw-r--r--src/video_core/shader/decode/predicate_set_register.cpp1
-rw-r--r--src/video_core/shader/decode/shift.cpp19
-rw-r--r--src/video_core/shader/decode/texture.cpp87
-rw-r--r--src/video_core/shader/decode/warp.cpp102
-rw-r--r--src/video_core/shader/decode/xmad.cpp12
-rw-r--r--src/video_core/shader/node.h174
-rw-r--r--src/video_core/shader/node_helper.cpp2
-rw-r--r--src/video_core/shader/shader_ir.cpp150
-rw-r--r--src/video_core/shader/shader_ir.h85
-rw-r--r--src/video_core/shader/track.cpp37
-rw-r--r--src/video_core/surface.cpp33
-rw-r--r--src/video_core/surface.h227
-rw-r--r--src/video_core/texture_cache.cpp386
-rw-r--r--src/video_core/texture_cache.h586
-rw-r--r--src/video_core/texture_cache/copy_params.h36
-rw-r--r--src/video_core/texture_cache/surface_base.cpp302
-rw-r--r--src/video_core/texture_cache/surface_base.h325
-rw-r--r--src/video_core/texture_cache/surface_params.cpp389
-rw-r--r--src/video_core/texture_cache/surface_params.h286
-rw-r--r--src/video_core/texture_cache/surface_view.cpp23
-rw-r--r--src/video_core/texture_cache/surface_view.h67
-rw-r--r--src/video_core/texture_cache/texture_cache.h835
-rw-r--r--src/video_core/textures/convert.cpp14
-rw-r--r--src/video_core/textures/convert.h7
-rw-r--r--src/video_core/textures/decoders.cpp54
-rw-r--r--src/video_core/textures/decoders.h7
-rw-r--r--src/video_core/textures/texture.h31
125 files changed, 9590 insertions, 5460 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 2d4caa08d..e2f85c5f1 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -1,8 +1,12 @@
add_library(video_core STATIC
+ buffer_cache/buffer_block.h
+ buffer_cache/buffer_cache.h
+ buffer_cache/map_interval.h
dma_pusher.cpp
dma_pusher.h
debug_utils/debug_utils.cpp
debug_utils/debug_utils.h
+ engines/const_buffer_info.h
engines/engine_upload.cpp
engines/engine_upload.h
engines/fermi_2d.cpp
@@ -40,12 +44,10 @@ add_library(video_core STATIC
renderer_opengl/gl_buffer_cache.h
renderer_opengl/gl_device.cpp
renderer_opengl/gl_device.h
- renderer_opengl/gl_global_cache.cpp
- renderer_opengl/gl_global_cache.h
+ renderer_opengl/gl_framebuffer_cache.cpp
+ renderer_opengl/gl_framebuffer_cache.h
renderer_opengl/gl_rasterizer.cpp
renderer_opengl/gl_rasterizer.h
- renderer_opengl/gl_rasterizer_cache.cpp
- renderer_opengl/gl_rasterizer_cache.h
renderer_opengl/gl_resource_manager.cpp
renderer_opengl/gl_resource_manager.h
renderer_opengl/gl_sampler_cache.cpp
@@ -66,6 +68,8 @@ add_library(video_core STATIC
renderer_opengl/gl_state.h
renderer_opengl/gl_stream_buffer.cpp
renderer_opengl/gl_stream_buffer.h
+ renderer_opengl/gl_texture_cache.cpp
+ renderer_opengl/gl_texture_cache.h
renderer_opengl/maxwell_to_gl.h
renderer_opengl/renderer_opengl.cpp
renderer_opengl/renderer_opengl.h
@@ -87,6 +91,7 @@ add_library(video_core STATIC
shader/decode/conversion.cpp
shader/decode/memory.cpp
shader/decode/texture.cpp
+ shader/decode/image.cpp
shader/decode/float_set_predicate.cpp
shader/decode/integer_set_predicate.cpp
shader/decode/half_set_predicate.cpp
@@ -97,8 +102,11 @@ add_library(video_core STATIC
shader/decode/integer_set.cpp
shader/decode/half_set.cpp
shader/decode/video.cpp
+ shader/decode/warp.cpp
shader/decode/xmad.cpp
shader/decode/other.cpp
+ shader/control_flow.cpp
+ shader/control_flow.h
shader/decode.cpp
shader/node_helper.cpp
shader/node_helper.h
@@ -108,6 +116,13 @@ add_library(video_core STATIC
shader/track.cpp
surface.cpp
surface.h
+ texture_cache/surface_base.cpp
+ texture_cache/surface_base.h
+ texture_cache/surface_params.cpp
+ texture_cache/surface_params.h
+ texture_cache/surface_view.cpp
+ texture_cache/surface_view.h
+ texture_cache/texture_cache.h
textures/astc.cpp
textures/astc.h
textures/convert.cpp
@@ -115,8 +130,6 @@ add_library(video_core STATIC
textures/decoders.cpp
textures/decoders.h
textures/texture.h
- texture_cache.cpp
- texture_cache.h
video_core.cpp
video_core.h
)
diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h
new file mode 100644
index 000000000..4b9193182
--- /dev/null
+++ b/src/video_core/buffer_cache/buffer_block.h
@@ -0,0 +1,76 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <unordered_set>
+#include <utility>
+
+#include "common/alignment.h"
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+
+namespace VideoCommon {
+
+class BufferBlock {
+public:
+ bool Overlaps(const CacheAddr start, const CacheAddr end) const {
+ return (cache_addr < end) && (cache_addr_end > start);
+ }
+
+ bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const {
+ return cache_addr <= other_start && other_end <= cache_addr_end;
+ }
+
+ u8* GetWritableHostPtr() const {
+ return FromCacheAddr(cache_addr);
+ }
+
+ u8* GetWritableHostPtr(std::size_t offset) const {
+ return FromCacheAddr(cache_addr + offset);
+ }
+
+ std::size_t GetOffset(const CacheAddr in_addr) {
+ return static_cast<std::size_t>(in_addr - cache_addr);
+ }
+
+ CacheAddr GetCacheAddr() const {
+ return cache_addr;
+ }
+
+ CacheAddr GetCacheAddrEnd() const {
+ return cache_addr_end;
+ }
+
+ void SetCacheAddr(const CacheAddr new_addr) {
+ cache_addr = new_addr;
+ cache_addr_end = new_addr + size;
+ }
+
+ std::size_t GetSize() const {
+ return size;
+ }
+
+ void SetEpoch(u64 new_epoch) {
+ epoch = new_epoch;
+ }
+
+ u64 GetEpoch() {
+ return epoch;
+ }
+
+protected:
+ explicit BufferBlock(CacheAddr cache_addr, const std::size_t size) : size{size} {
+ SetCacheAddr(cache_addr);
+ }
+ ~BufferBlock() = default;
+
+private:
+ CacheAddr cache_addr{};
+ CacheAddr cache_addr_end{};
+ std::size_t size{};
+ u64 epoch{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
new file mode 100644
index 000000000..2442ddfd6
--- /dev/null
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -0,0 +1,447 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "common/alignment.h"
+#include "common/common_types.h"
+#include "core/core.h"
+#include "video_core/buffer_cache/buffer_block.h"
+#include "video_core/buffer_cache/map_interval.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace VideoCommon {
+
+using MapInterval = std::shared_ptr<MapIntervalBase>;
+
+template <typename TBuffer, typename TBufferType, typename StreamBuffer>
+class BufferCache {
+public:
+ using BufferInfo = std::pair<const TBufferType*, u64>;
+
+ BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4,
+ bool is_written = false) {
+ std::lock_guard lock{mutex};
+
+ auto& memory_manager = system.GPU().MemoryManager();
+ const auto host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return {GetEmptyBuffer(size), 0};
+ }
+ const auto cache_addr = ToCacheAddr(host_ptr);
+
+ // Cache management is a big overhead, so only cache entries with a given size.
+ // TODO: Figure out which size is the best for given games.
+ constexpr std::size_t max_stream_size = 0x800;
+ if (size < max_stream_size) {
+ if (!is_written && !IsRegionWritten(cache_addr, cache_addr + size - 1)) {
+ return StreamBufferUpload(host_ptr, size, alignment);
+ }
+ }
+
+ auto block = GetBlock(cache_addr, size);
+ auto map = MapAddress(block, gpu_addr, cache_addr, size);
+ if (is_written) {
+ map->MarkAsModified(true, GetModifiedTicks());
+ if (!map->IsWritten()) {
+ map->MarkAsWritten(true);
+ MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
+ }
+ } else {
+ if (map->IsWritten()) {
+ WriteBarrier();
+ }
+ }
+
+ const u64 offset = static_cast<u64>(block->GetOffset(cache_addr));
+
+ return {ToHandle(block), offset};
+ }
+
+ /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
+ BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size,
+ std::size_t alignment = 4) {
+ std::lock_guard lock{mutex};
+ return StreamBufferUpload(raw_pointer, size, alignment);
+ }
+
+ void Map(std::size_t max_size) {
+ std::lock_guard lock{mutex};
+
+ std::tie(buffer_ptr, buffer_offset_base, invalidated) = stream_buffer->Map(max_size, 4);
+ buffer_offset = buffer_offset_base;
+ }
+
+ /// Finishes the upload stream, returns true on bindings invalidation.
+ bool Unmap() {
+ std::lock_guard lock{mutex};
+
+ stream_buffer->Unmap(buffer_offset - buffer_offset_base);
+ return std::exchange(invalidated, false);
+ }
+
+ void TickFrame() {
+ ++epoch;
+ while (!pending_destruction.empty()) {
+ if (pending_destruction.front()->GetEpoch() + 1 > epoch) {
+ break;
+ }
+ pending_destruction.pop_front();
+ }
+ }
+
+ /// Write any cached resources overlapping the specified region back to memory
+ void FlushRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ std::vector<MapInterval> objects = GetMapsInRange(addr, size);
+ std::sort(objects.begin(), objects.end(), [](const MapInterval& a, const MapInterval& b) {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (auto& object : objects) {
+ if (object->IsModified() && object->IsRegistered()) {
+ FlushMap(object);
+ }
+ }
+ }
+
+ /// Mark the specified region as being invalidated
+ void InvalidateRegion(CacheAddr addr, u64 size) {
+ std::lock_guard lock{mutex};
+
+ std::vector<MapInterval> objects = GetMapsInRange(addr, size);
+ for (auto& object : objects) {
+ if (object->IsRegistered()) {
+ Unregister(object);
+ }
+ }
+ }
+
+ virtual const TBufferType* GetEmptyBuffer(std::size_t size) = 0;
+
+protected:
+ explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
+ std::unique_ptr<StreamBuffer> stream_buffer)
+ : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)},
+ stream_buffer_handle{this->stream_buffer->GetHandle()} {}
+
+ ~BufferCache() = default;
+
+ virtual const TBufferType* ToHandle(const TBuffer& storage) = 0;
+
+ virtual void WriteBarrier() = 0;
+
+ virtual TBuffer CreateBlock(CacheAddr cache_addr, std::size_t size) = 0;
+
+ virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size,
+ const u8* data) = 0;
+
+ virtual void DownloadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size,
+ u8* data) = 0;
+
+ virtual void CopyBlock(const TBuffer& src, const TBuffer& dst, std::size_t src_offset,
+ std::size_t dst_offset, std::size_t size) = 0;
+
+ /// Register an object into the cache
+ void Register(const MapInterval& new_map, bool inherit_written = false) {
+ const CacheAddr cache_ptr = new_map->GetStart();
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(new_map->GetGpuAddress());
+ if (!cache_ptr || !cpu_addr) {
+ LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}",
+ new_map->GetGpuAddress());
+ return;
+ }
+ const std::size_t size = new_map->GetEnd() - new_map->GetStart();
+ new_map->SetCpuAddress(*cpu_addr);
+ new_map->MarkAsRegistered(true);
+ const IntervalType interval{new_map->GetStart(), new_map->GetEnd()};
+ mapped_addresses.insert({interval, new_map});
+ rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
+ if (inherit_written) {
+ MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1);
+ new_map->MarkAsWritten(true);
+ }
+ }
+
+ /// Unregisters an object from the cache
+ void Unregister(MapInterval& map) {
+ const std::size_t size = map->GetEnd() - map->GetStart();
+ rasterizer.UpdatePagesCachedCount(map->GetCpuAddress(), size, -1);
+ map->MarkAsRegistered(false);
+ if (map->IsWritten()) {
+ UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
+ }
+ const IntervalType delete_interval{map->GetStart(), map->GetEnd()};
+ mapped_addresses.erase(delete_interval);
+ }
+
+private:
+ MapInterval CreateMap(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) {
+ return std::make_shared<MapIntervalBase>(start, end, gpu_addr);
+ }
+
+ MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr,
+ const CacheAddr cache_addr, const std::size_t size) {
+
+ std::vector<MapInterval> overlaps = GetMapsInRange(cache_addr, size);
+ if (overlaps.empty()) {
+ const CacheAddr cache_addr_end = cache_addr + size;
+ MapInterval new_map = CreateMap(cache_addr, cache_addr_end, gpu_addr);
+ u8* host_ptr = FromCacheAddr(cache_addr);
+ UploadBlockData(block, block->GetOffset(cache_addr), size, host_ptr);
+ Register(new_map);
+ return new_map;
+ }
+
+ const CacheAddr cache_addr_end = cache_addr + size;
+ if (overlaps.size() == 1) {
+ MapInterval& current_map = overlaps[0];
+ if (current_map->IsInside(cache_addr, cache_addr_end)) {
+ return current_map;
+ }
+ }
+ CacheAddr new_start = cache_addr;
+ CacheAddr new_end = cache_addr_end;
+ bool write_inheritance = false;
+ bool modified_inheritance = false;
+ // Calculate new buffer parameters
+ for (auto& overlap : overlaps) {
+ new_start = std::min(overlap->GetStart(), new_start);
+ new_end = std::max(overlap->GetEnd(), new_end);
+ write_inheritance |= overlap->IsWritten();
+ modified_inheritance |= overlap->IsModified();
+ }
+ GPUVAddr new_gpu_addr = gpu_addr + new_start - cache_addr;
+ for (auto& overlap : overlaps) {
+ Unregister(overlap);
+ }
+ UpdateBlock(block, new_start, new_end, overlaps);
+ MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr);
+ if (modified_inheritance) {
+ new_map->MarkAsModified(true, GetModifiedTicks());
+ }
+ Register(new_map, write_inheritance);
+ return new_map;
+ }
+
+ void UpdateBlock(const TBuffer& block, CacheAddr start, CacheAddr end,
+ std::vector<MapInterval>& overlaps) {
+ const IntervalType base_interval{start, end};
+ IntervalSet interval_set{};
+ interval_set.add(base_interval);
+ for (auto& overlap : overlaps) {
+ const IntervalType subtract{overlap->GetStart(), overlap->GetEnd()};
+ interval_set.subtract(subtract);
+ }
+ for (auto& interval : interval_set) {
+ std::size_t size = interval.upper() - interval.lower();
+ if (size > 0) {
+ u8* host_ptr = FromCacheAddr(interval.lower());
+ UploadBlockData(block, block->GetOffset(interval.lower()), size, host_ptr);
+ }
+ }
+ }
+
+ std::vector<MapInterval> GetMapsInRange(CacheAddr addr, std::size_t size) {
+ if (size == 0) {
+ return {};
+ }
+
+ std::vector<MapInterval> objects{};
+ const IntervalType interval{addr, addr + size};
+ for (auto& pair : boost::make_iterator_range(mapped_addresses.equal_range(interval))) {
+ objects.push_back(pair.second);
+ }
+
+ return objects;
+ }
+
+ /// Returns a ticks counter used for tracking when cached objects were last modified
+ u64 GetModifiedTicks() {
+ return ++modified_ticks;
+ }
+
+ void FlushMap(MapInterval map) {
+ std::size_t size = map->GetEnd() - map->GetStart();
+ TBuffer block = blocks[map->GetStart() >> block_page_bits];
+ u8* host_ptr = FromCacheAddr(map->GetStart());
+ DownloadBlockData(block, block->GetOffset(map->GetStart()), size, host_ptr);
+ map->MarkAsModified(false, 0);
+ }
+
+ BufferInfo StreamBufferUpload(const void* raw_pointer, std::size_t size,
+ std::size_t alignment) {
+ AlignBuffer(alignment);
+ const std::size_t uploaded_offset = buffer_offset;
+ std::memcpy(buffer_ptr, raw_pointer, size);
+
+ buffer_ptr += size;
+ buffer_offset += size;
+ return {&stream_buffer_handle, uploaded_offset};
+ }
+
+ void AlignBuffer(std::size_t alignment) {
+ // Align the offset, not the mapped pointer
+ const std::size_t offset_aligned = Common::AlignUp(buffer_offset, alignment);
+ buffer_ptr += offset_aligned - buffer_offset;
+ buffer_offset = offset_aligned;
+ }
+
+ TBuffer EnlargeBlock(TBuffer buffer) {
+ const std::size_t old_size = buffer->GetSize();
+ const std::size_t new_size = old_size + block_page_size;
+ const CacheAddr cache_addr = buffer->GetCacheAddr();
+ TBuffer new_buffer = CreateBlock(cache_addr, new_size);
+ CopyBlock(buffer, new_buffer, 0, 0, old_size);
+ buffer->SetEpoch(epoch);
+ pending_destruction.push_back(buffer);
+ const CacheAddr cache_addr_end = cache_addr + new_size - 1;
+ u64 page_start = cache_addr >> block_page_bits;
+ const u64 page_end = cache_addr_end >> block_page_bits;
+ while (page_start <= page_end) {
+ blocks[page_start] = new_buffer;
+ ++page_start;
+ }
+ return new_buffer;
+ }
+
+ TBuffer MergeBlocks(TBuffer first, TBuffer second) {
+ const std::size_t size_1 = first->GetSize();
+ const std::size_t size_2 = second->GetSize();
+ const CacheAddr first_addr = first->GetCacheAddr();
+ const CacheAddr second_addr = second->GetCacheAddr();
+ const CacheAddr new_addr = std::min(first_addr, second_addr);
+ const std::size_t new_size = size_1 + size_2;
+ TBuffer new_buffer = CreateBlock(new_addr, new_size);
+ CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1);
+ CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2);
+ first->SetEpoch(epoch);
+ second->SetEpoch(epoch);
+ pending_destruction.push_back(first);
+ pending_destruction.push_back(second);
+ const CacheAddr cache_addr_end = new_addr + new_size - 1;
+ u64 page_start = new_addr >> block_page_bits;
+ const u64 page_end = cache_addr_end >> block_page_bits;
+ while (page_start <= page_end) {
+ blocks[page_start] = new_buffer;
+ ++page_start;
+ }
+ return new_buffer;
+ }
+
+ TBuffer GetBlock(const CacheAddr cache_addr, const std::size_t size) {
+ TBuffer found{};
+ const CacheAddr cache_addr_end = cache_addr + size - 1;
+ u64 page_start = cache_addr >> block_page_bits;
+ const u64 page_end = cache_addr_end >> block_page_bits;
+ while (page_start <= page_end) {
+ auto it = blocks.find(page_start);
+ if (it == blocks.end()) {
+ if (found) {
+ found = EnlargeBlock(found);
+ } else {
+ const CacheAddr start_addr = (page_start << block_page_bits);
+ found = CreateBlock(start_addr, block_page_size);
+ blocks[page_start] = found;
+ }
+ } else {
+ if (found) {
+ if (found == it->second) {
+ ++page_start;
+ continue;
+ }
+ found = MergeBlocks(found, it->second);
+ } else {
+ found = it->second;
+ }
+ }
+ ++page_start;
+ }
+ return found;
+ }
+
+ void MarkRegionAsWritten(const CacheAddr start, const CacheAddr end) {
+ u64 page_start = start >> write_page_bit;
+ const u64 page_end = end >> write_page_bit;
+ while (page_start <= page_end) {
+ auto it = written_pages.find(page_start);
+ if (it != written_pages.end()) {
+ it->second = it->second + 1;
+ } else {
+ written_pages[page_start] = 1;
+ }
+ page_start++;
+ }
+ }
+
+ void UnmarkRegionAsWritten(const CacheAddr start, const CacheAddr end) {
+ u64 page_start = start >> write_page_bit;
+ const u64 page_end = end >> write_page_bit;
+ while (page_start <= page_end) {
+ auto it = written_pages.find(page_start);
+ if (it != written_pages.end()) {
+ if (it->second > 1) {
+ it->second = it->second - 1;
+ } else {
+ written_pages.erase(it);
+ }
+ }
+ page_start++;
+ }
+ }
+
+ bool IsRegionWritten(const CacheAddr start, const CacheAddr end) const {
+ u64 page_start = start >> write_page_bit;
+ const u64 page_end = end >> write_page_bit;
+ while (page_start <= page_end) {
+ if (written_pages.count(page_start) > 0) {
+ return true;
+ }
+ page_start++;
+ }
+ return false;
+ }
+
+ VideoCore::RasterizerInterface& rasterizer;
+ Core::System& system;
+ std::unique_ptr<StreamBuffer> stream_buffer;
+
+ TBufferType stream_buffer_handle{};
+
+ bool invalidated = false;
+
+ u8* buffer_ptr = nullptr;
+ u64 buffer_offset = 0;
+ u64 buffer_offset_base = 0;
+
+ using IntervalSet = boost::icl::interval_set<CacheAddr>;
+ using IntervalCache = boost::icl::interval_map<CacheAddr, MapInterval>;
+ using IntervalType = typename IntervalCache::interval_type;
+ IntervalCache mapped_addresses{};
+
+ static constexpr u64 write_page_bit{11};
+ std::unordered_map<u64, u32> written_pages{};
+
+ static constexpr u64 block_page_bits{21};
+ static constexpr u64 block_page_size{1 << block_page_bits};
+ std::unordered_map<u64, TBuffer> blocks{};
+
+ std::list<TBuffer> pending_destruction{};
+ u64 epoch{};
+ u64 modified_ticks{};
+
+ std::recursive_mutex mutex;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
new file mode 100644
index 000000000..3a104d5cd
--- /dev/null
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -0,0 +1,89 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+
+namespace VideoCommon {
+
+class MapIntervalBase {
+public:
+ MapIntervalBase(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr)
+ : start{start}, end{end}, gpu_addr{gpu_addr} {}
+
+ void SetCpuAddress(VAddr new_cpu_addr) {
+ cpu_addr = new_cpu_addr;
+ }
+
+ VAddr GetCpuAddress() const {
+ return cpu_addr;
+ }
+
+ GPUVAddr GetGpuAddress() const {
+ return gpu_addr;
+ }
+
+ bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const {
+ return (start <= other_start && other_end <= end);
+ }
+
+ bool operator==(const MapIntervalBase& rhs) const {
+ return std::tie(start, end) == std::tie(rhs.start, rhs.end);
+ }
+
+ bool operator!=(const MapIntervalBase& rhs) const {
+ return !operator==(rhs);
+ }
+
+ void MarkAsRegistered(const bool registered) {
+ is_registered = registered;
+ }
+
+ bool IsRegistered() const {
+ return is_registered;
+ }
+
+ CacheAddr GetStart() const {
+ return start;
+ }
+
+ CacheAddr GetEnd() const {
+ return end;
+ }
+
+ void MarkAsModified(const bool is_modified_, const u64 tick) {
+ is_modified = is_modified_;
+ ticks = tick;
+ }
+
+ bool IsModified() const {
+ return is_modified;
+ }
+
+ u64 GetModificationTick() const {
+ return ticks;
+ }
+
+ void MarkAsWritten(const bool is_written_) {
+ is_written = is_written_;
+ }
+
+ bool IsWritten() const {
+ return is_written;
+ }
+
+private:
+ CacheAddr start;
+ CacheAddr end;
+ GPUVAddr gpu_addr;
+ VAddr cpu_addr{};
+ bool is_written{};
+ bool is_modified{};
+ bool is_registered{};
+ u64 ticks{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 3175579cc..0094fd715 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -22,7 +22,7 @@ void DmaPusher::DispatchCalls() {
MICROPROFILE_SCOPE(DispatchCalls);
// On entering GPU code, assume all memory may be touched by the ARM core.
- gpu.Maxwell3D().dirty_flags.OnMemoryWrite();
+ gpu.Maxwell3D().dirty.OnMemoryWrite();
dma_pushbuffer_subindex = 0;
@@ -31,6 +31,7 @@ void DmaPusher::DispatchCalls() {
break;
}
}
+ gpu.FlushCommands();
}
bool DmaPusher::Step() {
diff --git a/src/video_core/engines/const_buffer_info.h b/src/video_core/engines/const_buffer_info.h
new file mode 100644
index 000000000..d8f672462
--- /dev/null
+++ b/src/video_core/engines/const_buffer_info.h
@@ -0,0 +1,17 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Tegra::Engines {
+
+struct ConstBufferInfo {
+ GPUVAddr address;
+ u32 size;
+ bool enabled;
+};
+
+} // namespace Tegra::Engines
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 082a40cd9..d44ad0cd8 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -36,10 +36,10 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
} else {
UNIMPLEMENTED_IF(regs.dest.z != 0);
UNIMPLEMENTED_IF(regs.dest.depth != 1);
- UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 1);
- UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 1);
+ UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0);
+ UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0);
const std::size_t dst_size = Tegra::Texture::CalculateSize(
- true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 1);
+ true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0);
tmp_buffer.resize(dst_size);
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y,
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index ef4f5839a..462da419e 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -39,15 +39,15 @@ struct Registers {
}
u32 BlockWidth() const {
- return 1U << block_width.Value();
+ return block_width.Value();
}
u32 BlockHeight() const {
- return 1U << block_height.Value();
+ return block_height.Value();
}
u32 BlockDepth() const {
- return 1U << block_depth.Value();
+ return block_depth.Value();
}
} dest;
};
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 55966eef1..98a8b5337 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -4,15 +4,13 @@
#include "common/assert.h"
#include "common/logging/log.h"
-#include "common/math_util.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
namespace Tegra::Engines {
-Fermi2D::Fermi2D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
- : rasterizer{rasterizer}, memory_manager{memory_manager} {}
+Fermi2D::Fermi2D(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {}
void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
ASSERT_MSG(method_call.method < Regs::NUM_REGS,
@@ -35,21 +33,31 @@ void Fermi2D::HandleSurfaceCopy() {
static_cast<u32>(regs.operation));
// TODO(Subv): Only raw copies are implemented.
- ASSERT(regs.operation == Regs::Operation::SrcCopy);
+ ASSERT(regs.operation == Operation::SrcCopy);
const u32 src_blit_x1{static_cast<u32>(regs.blit_src_x >> 32)};
const u32 src_blit_y1{static_cast<u32>(regs.blit_src_y >> 32)};
- const u32 src_blit_x2{
- static_cast<u32>((regs.blit_src_x + (regs.blit_dst_width * regs.blit_du_dx)) >> 32)};
- const u32 src_blit_y2{
- static_cast<u32>((regs.blit_src_y + (regs.blit_dst_height * regs.blit_dv_dy)) >> 32)};
-
+ u32 src_blit_x2, src_blit_y2;
+ if (regs.blit_control.origin == Origin::Corner) {
+ src_blit_x2 =
+ static_cast<u32>((regs.blit_src_x + (regs.blit_du_dx * regs.blit_dst_width)) >> 32);
+ src_blit_y2 =
+ static_cast<u32>((regs.blit_src_y + (regs.blit_dv_dy * regs.blit_dst_height)) >> 32);
+ } else {
+ src_blit_x2 = static_cast<u32>((regs.blit_src_x >> 32) + regs.blit_dst_width);
+ src_blit_y2 = static_cast<u32>((regs.blit_src_y >> 32) + regs.blit_dst_height);
+ }
const Common::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2};
const Common::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y,
regs.blit_dst_x + regs.blit_dst_width,
regs.blit_dst_y + regs.blit_dst_height};
+ Config copy_config;
+ copy_config.operation = regs.operation;
+ copy_config.filter = regs.blit_control.filter;
+ copy_config.src_rect = src_rect;
+ copy_config.dst_rect = dst_rect;
- if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst, src_rect, dst_rect)) {
+ if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst, copy_config)) {
UNIMPLEMENTED();
}
}
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index 45f59a4d9..0901cf2fa 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -9,6 +9,7 @@
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
+#include "common/math_util.h"
#include "video_core/gpu.h"
namespace Tegra {
@@ -32,12 +33,32 @@ namespace Tegra::Engines {
class Fermi2D final {
public:
- explicit Fermi2D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
+ explicit Fermi2D(VideoCore::RasterizerInterface& rasterizer);
~Fermi2D() = default;
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ enum class Origin : u32 {
+ Center = 0,
+ Corner = 1,
+ };
+
+ enum class Filter : u32 {
+ PointSample = 0, // Nearest
+ Linear = 1,
+ };
+
+ enum class Operation : u32 {
+ SrcCopyAnd = 0,
+ ROPAnd = 1,
+ Blend = 2,
+ SrcCopy = 3,
+ ROP = 4,
+ SrcCopyPremult = 5,
+ BlendPremult = 6,
+ };
+
struct Regs {
static constexpr std::size_t NUM_REGS = 0x258;
@@ -63,32 +84,19 @@ public:
}
u32 BlockWidth() const {
- // The block width is stored in log2 format.
- return 1 << block_width;
+ return block_width.Value();
}
u32 BlockHeight() const {
- // The block height is stored in log2 format.
- return 1 << block_height;
+ return block_height.Value();
}
u32 BlockDepth() const {
- // The block depth is stored in log2 format.
- return 1 << block_depth;
+ return block_depth.Value();
}
};
static_assert(sizeof(Surface) == 0x28, "Surface has incorrect size");
- enum class Operation : u32 {
- SrcCopyAnd = 0,
- ROPAnd = 1,
- Blend = 2,
- SrcCopy = 3,
- ROP = 4,
- SrcCopyPremult = 5,
- BlendPremult = 6,
- };
-
union {
struct {
INSERT_PADDING_WORDS(0x80);
@@ -105,7 +113,11 @@ public:
INSERT_PADDING_WORDS(0x177);
- u32 blit_control;
+ union {
+ u32 raw;
+ BitField<0, 1, Origin> origin;
+ BitField<4, 1, Filter> filter;
+ } blit_control;
INSERT_PADDING_WORDS(0x8);
@@ -124,9 +136,15 @@ public:
};
} regs{};
+ struct Config {
+ Operation operation;
+ Filter filter;
+ Common::Rectangle<u32> src_rect;
+ Common::Rectangle<u32> dst_rect;
+ };
+
private:
VideoCore::RasterizerInterface& rasterizer;
- MemoryManager& memory_manager;
/// Performs the copy from the source surface to the destination surface as configured in the
/// registers.
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 7404a8163..63d449135 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <bitset>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
@@ -37,7 +38,7 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
const bool is_last_call = method_call.IsLastCall();
upload_state.ProcessData(method_call.argument, is_last_call);
if (is_last_call) {
- system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
+ system.GPU().Maxwell3D().dirty.OnMemoryWrite();
}
break;
}
@@ -49,14 +50,67 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
}
}
-void KeplerCompute::ProcessLaunch() {
+Tegra::Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
+ const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
+ ASSERT(cbuf_mask[regs.tex_cb_index]);
+
+ const auto& texinfo = launch_description.const_buffer_config[regs.tex_cb_index];
+ ASSERT(texinfo.Address() != 0);
+
+ const GPUVAddr address = texinfo.Address() + offset * sizeof(Texture::TextureHandle);
+ ASSERT(address < texinfo.Address() + texinfo.size);
+
+ const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(address)};
+ return GetTextureInfo(tex_handle, offset);
+}
+Texture::FullTextureInfo KeplerCompute::GetTextureInfo(const Texture::TextureHandle tex_handle,
+ std::size_t offset) const {
+ return Texture::FullTextureInfo{static_cast<u32>(offset), GetTICEntry(tex_handle.tic_id),
+ GetTSCEntry(tex_handle.tsc_id)};
+}
+
+u32 KeplerCompute::AccessConstBuffer32(u64 const_buffer, u64 offset) const {
+ const auto& buffer = launch_description.const_buffer_config[const_buffer];
+ u32 result;
+ std::memcpy(&result, memory_manager.GetPointer(buffer.Address() + offset), sizeof(u32));
+ return result;
+}
+
+void KeplerCompute::ProcessLaunch() {
const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address();
memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description,
LaunchParams::NUM_LAUNCH_PARAMETERS * sizeof(u32));
- const GPUVAddr code_loc = regs.code_loc.Address() + launch_description.program_start;
- LOG_WARNING(HW_GPU, "Compute Kernel Execute at Address 0x{:016x}, STUBBED", code_loc);
+ const GPUVAddr code_addr = regs.code_loc.Address() + launch_description.program_start;
+ LOG_TRACE(HW_GPU, "Compute invocation launched at address 0x{:016x}", code_addr);
+
+ rasterizer.DispatchCompute(code_addr);
+}
+
+Texture::TICEntry KeplerCompute::GetTICEntry(u32 tic_index) const {
+ const GPUVAddr tic_address_gpu{regs.tic.Address() + tic_index * sizeof(Texture::TICEntry)};
+
+ Texture::TICEntry tic_entry;
+ memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
+
+ const auto r_type{tic_entry.r_type.Value()};
+ const auto g_type{tic_entry.g_type.Value()};
+ const auto b_type{tic_entry.b_type.Value()};
+ const auto a_type{tic_entry.a_type.Value()};
+
+ // TODO(Subv): Different data types for separate components are not supported
+ DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
+
+ return tic_entry;
+}
+
+Texture::TSCEntry KeplerCompute::GetTSCEntry(u32 tsc_index) const {
+ const GPUVAddr tsc_address_gpu{regs.tsc.Address() + tsc_index * sizeof(Texture::TSCEntry)};
+
+ Texture::TSCEntry tsc_entry;
+ memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry));
+ return tsc_entry;
}
} // namespace Tegra::Engines
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index 6a3309a2c..90cf650d2 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -12,6 +12,7 @@
#include "common/common_types.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/gpu.h"
+#include "video_core/textures/texture.h"
namespace Core {
class System;
@@ -111,7 +112,7 @@ public:
INSERT_PADDING_WORDS(0x3FE);
- u32 texture_const_buffer_index;
+ u32 tex_cb_index;
INSERT_PADDING_WORDS(0x374);
};
@@ -149,7 +150,7 @@ public:
union {
BitField<0, 8, u32> const_buffer_enable_mask;
BitField<29, 2, u32> cache_layout;
- } memory_config;
+ };
INSERT_PADDING_WORDS(0x8);
@@ -194,6 +195,14 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ Tegra::Texture::FullTextureInfo GetTexture(std::size_t offset) const;
+
+ /// Given a Texture Handle, returns the TSC and TIC entries.
+ Texture::FullTextureInfo GetTextureInfo(const Texture::TextureHandle tex_handle,
+ std::size_t offset) const;
+
+ u32 AccessConstBuffer32(u64 const_buffer, u64 offset) const;
+
private:
Core::System& system;
VideoCore::RasterizerInterface& rasterizer;
@@ -201,6 +210,12 @@ private:
Upload::State upload_state;
void ProcessLaunch();
+
+ /// Retrieves information about a specific TIC entry from the TIC buffer.
+ Texture::TICEntry GetTICEntry(u32 tic_index) const;
+
+ /// Retrieves information about a specific TSC entry from the TSC buffer.
+ Texture::TSCEntry GetTSCEntry(u32 tsc_index) const;
};
#define ASSERT_REG_POSITION(field_name, position) \
@@ -218,12 +233,12 @@ ASSERT_REG_POSITION(launch, 0xAF);
ASSERT_REG_POSITION(tsc, 0x557);
ASSERT_REG_POSITION(tic, 0x55D);
ASSERT_REG_POSITION(code_loc, 0x582);
-ASSERT_REG_POSITION(texture_const_buffer_index, 0x982);
+ASSERT_REG_POSITION(tex_cb_index, 0x982);
ASSERT_LAUNCH_PARAM_POSITION(program_start, 0x8);
ASSERT_LAUNCH_PARAM_POSITION(grid_dim_x, 0xC);
ASSERT_LAUNCH_PARAM_POSITION(shared_alloc, 0x11);
ASSERT_LAUNCH_PARAM_POSITION(block_dim_x, 0x12);
-ASSERT_LAUNCH_PARAM_POSITION(memory_config, 0x14);
+ASSERT_LAUNCH_PARAM_POSITION(const_buffer_enable_mask, 0x14);
ASSERT_LAUNCH_PARAM_POSITION(const_buffer_config, 0x1D);
#undef ASSERT_REG_POSITION
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 0561f676c..fa4a7c5c1 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -15,7 +15,7 @@
namespace Tegra::Engines {
KeplerMemory::KeplerMemory(Core::System& system, MemoryManager& memory_manager)
- : system{system}, memory_manager{memory_manager}, upload_state{memory_manager, regs.upload} {}
+ : system{system}, upload_state{memory_manager, regs.upload} {}
KeplerMemory::~KeplerMemory() = default;
@@ -34,7 +34,7 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) {
const bool is_last_call = method_call.IsLastCall();
upload_state.ProcessData(method_call.argument, is_last_call);
if (is_last_call) {
- system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
+ system.GPU().Maxwell3D().dirty.OnMemoryWrite();
}
break;
}
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index f3bc675a9..e0e25c321 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -65,7 +65,6 @@ public:
private:
Core::System& system;
- MemoryManager& memory_manager;
Upload::State upload_state;
};
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 39968d403..fb3d1112c 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -22,6 +22,7 @@ Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& raste
MemoryManager& memory_manager)
: system{system}, rasterizer{rasterizer}, memory_manager{memory_manager},
macro_interpreter{*this}, upload_state{memory_manager, regs.upload} {
+ InitDirtySettings();
InitializeRegisterDefaults();
}
@@ -69,6 +70,10 @@ void Maxwell3D::InitializeRegisterDefaults() {
regs.stencil_back_func_mask = 0xFFFFFFFF;
regs.stencil_back_mask = 0xFFFFFFFF;
+ regs.depth_test_func = Regs::ComparisonOp::Always;
+ regs.cull.front_face = Regs::Cull::FrontFace::CounterClockWise;
+ regs.cull.cull_face = Regs::Cull::CullFace::Back;
+
// TODO(Rodrigo): Most games do not set a point size. I think this is a case of a
// register carrying a default value. Assume it's OpenGL's default (1).
regs.point_size = 1.0f;
@@ -84,23 +89,173 @@ void Maxwell3D::InitializeRegisterDefaults() {
// Commercial games seem to assume this value is enabled and nouveau sets this value manually.
regs.rt_separate_frag_data = 1;
+
+ // Some games (like Super Mario Odyssey) assume that SRGB is enabled.
+ regs.framebuffer_srgb = 1;
+}
+
+#define DIRTY_REGS_POS(field_name) (offsetof(Maxwell3D::DirtyRegs, field_name))
+
+void Maxwell3D::InitDirtySettings() {
+ const auto set_block = [this](const u32 start, const u32 range, const u8 position) {
+ const auto start_itr = dirty_pointers.begin() + start;
+ const auto end_itr = start_itr + range;
+ std::fill(start_itr, end_itr, position);
+ };
+ dirty.regs.fill(true);
+
+ // Init Render Targets
+ constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32);
+ constexpr u32 rt_start_reg = MAXWELL3D_REG_INDEX(rt);
+ constexpr u32 rt_end_reg = rt_start_reg + registers_per_rt * 8;
+ u32 rt_dirty_reg = DIRTY_REGS_POS(render_target);
+ for (u32 rt_reg = rt_start_reg; rt_reg < rt_end_reg; rt_reg += registers_per_rt) {
+ set_block(rt_reg, registers_per_rt, rt_dirty_reg);
+ rt_dirty_reg++;
+ }
+ constexpr u32 depth_buffer_flag = DIRTY_REGS_POS(depth_buffer);
+ dirty_pointers[MAXWELL3D_REG_INDEX(zeta_enable)] = depth_buffer_flag;
+ dirty_pointers[MAXWELL3D_REG_INDEX(zeta_width)] = depth_buffer_flag;
+ dirty_pointers[MAXWELL3D_REG_INDEX(zeta_height)] = depth_buffer_flag;
+ constexpr u32 registers_in_zeta = sizeof(regs.zeta) / sizeof(u32);
+ constexpr u32 zeta_reg = MAXWELL3D_REG_INDEX(zeta);
+ set_block(zeta_reg, registers_in_zeta, depth_buffer_flag);
+
+ // Init Vertex Arrays
+ constexpr u32 vertex_array_start = MAXWELL3D_REG_INDEX(vertex_array);
+ constexpr u32 vertex_array_size = sizeof(regs.vertex_array[0]) / sizeof(u32);
+ constexpr u32 vertex_array_end = vertex_array_start + vertex_array_size * Regs::NumVertexArrays;
+ u32 va_reg = DIRTY_REGS_POS(vertex_array);
+ u32 vi_reg = DIRTY_REGS_POS(vertex_instance);
+ for (u32 vertex_reg = vertex_array_start; vertex_reg < vertex_array_end;
+ vertex_reg += vertex_array_size) {
+ set_block(vertex_reg, 3, va_reg);
+ // The divisor concerns vertex array instances
+ dirty_pointers[vertex_reg + 3] = vi_reg;
+ va_reg++;
+ vi_reg++;
+ }
+ constexpr u32 vertex_limit_start = MAXWELL3D_REG_INDEX(vertex_array_limit);
+ constexpr u32 vertex_limit_size = sizeof(regs.vertex_array_limit[0]) / sizeof(u32);
+ constexpr u32 vertex_limit_end = vertex_limit_start + vertex_limit_size * Regs::NumVertexArrays;
+ va_reg = DIRTY_REGS_POS(vertex_array);
+ for (u32 vertex_reg = vertex_limit_start; vertex_reg < vertex_limit_end;
+ vertex_reg += vertex_limit_size) {
+ set_block(vertex_reg, vertex_limit_size, va_reg);
+ va_reg++;
+ }
+ constexpr u32 vertex_instance_start = MAXWELL3D_REG_INDEX(instanced_arrays);
+ constexpr u32 vertex_instance_size =
+ sizeof(regs.instanced_arrays.is_instanced[0]) / sizeof(u32);
+ constexpr u32 vertex_instance_end =
+ vertex_instance_start + vertex_instance_size * Regs::NumVertexArrays;
+ vi_reg = DIRTY_REGS_POS(vertex_instance);
+ for (u32 vertex_reg = vertex_instance_start; vertex_reg < vertex_instance_end;
+ vertex_reg += vertex_instance_size) {
+ set_block(vertex_reg, vertex_instance_size, vi_reg);
+ vi_reg++;
+ }
+ set_block(MAXWELL3D_REG_INDEX(vertex_attrib_format), regs.vertex_attrib_format.size(),
+ DIRTY_REGS_POS(vertex_attrib_format));
+
+ // Init Shaders
+ constexpr u32 shader_registers_count =
+ sizeof(regs.shader_config[0]) * Regs::MaxShaderProgram / sizeof(u32);
+ set_block(MAXWELL3D_REG_INDEX(shader_config[0]), shader_registers_count,
+ DIRTY_REGS_POS(shaders));
+
+ // State
+
+ // Viewport
+ constexpr u32 viewport_dirty_reg = DIRTY_REGS_POS(viewport);
+ constexpr u32 viewport_start = MAXWELL3D_REG_INDEX(viewports);
+ constexpr u32 viewport_size = sizeof(regs.viewports) / sizeof(u32);
+ set_block(viewport_start, viewport_size, viewport_dirty_reg);
+ constexpr u32 view_volume_start = MAXWELL3D_REG_INDEX(view_volume_clip_control);
+ constexpr u32 view_volume_size = sizeof(regs.view_volume_clip_control) / sizeof(u32);
+ set_block(view_volume_start, view_volume_size, viewport_dirty_reg);
+
+ // Viewport transformation
+ constexpr u32 viewport_trans_start = MAXWELL3D_REG_INDEX(viewport_transform);
+ constexpr u32 viewport_trans_size = sizeof(regs.viewport_transform) / sizeof(u32);
+ set_block(viewport_trans_start, viewport_trans_size, DIRTY_REGS_POS(viewport_transform));
+
+ // Cullmode
+ constexpr u32 cull_mode_start = MAXWELL3D_REG_INDEX(cull);
+ constexpr u32 cull_mode_size = sizeof(regs.cull) / sizeof(u32);
+ set_block(cull_mode_start, cull_mode_size, DIRTY_REGS_POS(cull_mode));
+
+ // Screen y control
+ dirty_pointers[MAXWELL3D_REG_INDEX(screen_y_control)] = DIRTY_REGS_POS(screen_y_control);
+
+ // Primitive Restart
+ constexpr u32 primitive_restart_start = MAXWELL3D_REG_INDEX(primitive_restart);
+ constexpr u32 primitive_restart_size = sizeof(regs.primitive_restart) / sizeof(u32);
+ set_block(primitive_restart_start, primitive_restart_size, DIRTY_REGS_POS(primitive_restart));
+
+ // Depth Test
+ constexpr u32 depth_test_dirty_reg = DIRTY_REGS_POS(depth_test);
+ dirty_pointers[MAXWELL3D_REG_INDEX(depth_test_enable)] = depth_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(depth_write_enabled)] = depth_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(depth_test_func)] = depth_test_dirty_reg;
+
+ // Stencil Test
+ constexpr u32 stencil_test_dirty_reg = DIRTY_REGS_POS(stencil_test);
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_enable)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_func_func)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_func_ref)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_func_mask)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_op_fail)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_op_zfail)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_op_zpass)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_mask)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_two_side_enable)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_func_func)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_func_ref)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_func_mask)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_op_fail)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_op_zfail)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_op_zpass)] = stencil_test_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_mask)] = stencil_test_dirty_reg;
+
+ // Color Mask
+ constexpr u32 color_mask_dirty_reg = DIRTY_REGS_POS(color_mask);
+ dirty_pointers[MAXWELL3D_REG_INDEX(color_mask_common)] = color_mask_dirty_reg;
+ set_block(MAXWELL3D_REG_INDEX(color_mask), sizeof(regs.color_mask) / sizeof(u32),
+ color_mask_dirty_reg);
+ // Blend State
+ constexpr u32 blend_state_dirty_reg = DIRTY_REGS_POS(blend_state);
+ set_block(MAXWELL3D_REG_INDEX(blend_color), sizeof(regs.blend_color) / sizeof(u32),
+ blend_state_dirty_reg);
+ dirty_pointers[MAXWELL3D_REG_INDEX(independent_blend_enable)] = blend_state_dirty_reg;
+ set_block(MAXWELL3D_REG_INDEX(blend), sizeof(regs.blend) / sizeof(u32), blend_state_dirty_reg);
+ set_block(MAXWELL3D_REG_INDEX(independent_blend), sizeof(regs.independent_blend) / sizeof(u32),
+ blend_state_dirty_reg);
+
+ // Scissor State
+ constexpr u32 scissor_test_dirty_reg = DIRTY_REGS_POS(scissor_test);
+ set_block(MAXWELL3D_REG_INDEX(scissor_test), sizeof(regs.scissor_test) / sizeof(u32),
+ scissor_test_dirty_reg);
+
+ // Polygon Offset
+ constexpr u32 polygon_offset_dirty_reg = DIRTY_REGS_POS(polygon_offset);
+ dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_fill_enable)] = polygon_offset_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_line_enable)] = polygon_offset_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_point_enable)] = polygon_offset_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_units)] = polygon_offset_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_factor)] = polygon_offset_dirty_reg;
+ dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_clamp)] = polygon_offset_dirty_reg;
}
-void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) {
+void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters) {
// Reset the current macro.
executing_macro = 0;
// Lookup the macro offset
- const u32 entry{(method - MacroRegistersStart) >> 1};
- const auto& search{macro_offsets.find(entry)};
- if (search == macro_offsets.end()) {
- LOG_CRITICAL(HW_GPU, "macro not found for method 0x{:X}!", method);
- UNREACHABLE();
- return;
- }
+ const u32 entry = ((method - MacroRegistersStart) >> 1) % macro_positions.size();
// Execute the current macro.
- macro_interpreter.Execute(search->second, std::move(parameters));
+ macro_interpreter.Execute(macro_positions[entry], num_parameters, parameters);
}
void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
@@ -108,6 +263,14 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
const u32 method = method_call.method;
+ if (method == cb_data_state.current) {
+ regs.reg_array[method] = method_call.argument;
+ ProcessCBData(method_call.argument);
+ return;
+ } else if (cb_data_state.current != null_cb_data) {
+ FinishCBData();
+ }
+
// It is an error to write to a register other than the current macro's ARG register before it
// has finished execution.
if (executing_macro != 0) {
@@ -129,7 +292,8 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
// Call the macro when there are no more parameters in the command buffer
if (method_call.IsLastCall()) {
- CallMacroMethod(executing_macro, std::move(macro_params));
+ CallMacroMethod(executing_macro, macro_params.size(), macro_params.data());
+ macro_params.clear();
}
return;
}
@@ -143,49 +307,19 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
if (regs.reg_array[method] != method_call.argument) {
regs.reg_array[method] = method_call.argument;
- // Color buffers
- constexpr u32 first_rt_reg = MAXWELL3D_REG_INDEX(rt);
- constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32);
- if (method >= first_rt_reg &&
- method < first_rt_reg + registers_per_rt * Regs::NumRenderTargets) {
- const std::size_t rt_index = (method - first_rt_reg) / registers_per_rt;
- dirty_flags.color_buffer.set(rt_index);
- }
-
- // Zeta buffer
- constexpr u32 registers_in_zeta = sizeof(regs.zeta) / sizeof(u32);
- if (method == MAXWELL3D_REG_INDEX(zeta_enable) ||
- method == MAXWELL3D_REG_INDEX(zeta_width) ||
- method == MAXWELL3D_REG_INDEX(zeta_height) ||
- (method >= MAXWELL3D_REG_INDEX(zeta) &&
- method < MAXWELL3D_REG_INDEX(zeta) + registers_in_zeta)) {
- dirty_flags.zeta_buffer = true;
- }
-
- // Shader
- constexpr u32 shader_registers_count =
- sizeof(regs.shader_config[0]) * Regs::MaxShaderProgram / sizeof(u32);
- if (method >= MAXWELL3D_REG_INDEX(shader_config[0]) &&
- method < MAXWELL3D_REG_INDEX(shader_config[0]) + shader_registers_count) {
- dirty_flags.shaders = true;
- }
-
- // Vertex format
- if (method >= MAXWELL3D_REG_INDEX(vertex_attrib_format) &&
- method < MAXWELL3D_REG_INDEX(vertex_attrib_format) + regs.vertex_attrib_format.size()) {
- dirty_flags.vertex_attrib_format = true;
- }
-
- // Vertex buffer
- if (method >= MAXWELL3D_REG_INDEX(vertex_array) &&
- method < MAXWELL3D_REG_INDEX(vertex_array) + 4 * Regs::NumVertexArrays) {
- dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array)) >> 2);
- } else if (method >= MAXWELL3D_REG_INDEX(vertex_array_limit) &&
- method < MAXWELL3D_REG_INDEX(vertex_array_limit) + 2 * Regs::NumVertexArrays) {
- dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array_limit)) >> 1);
- } else if (method >= MAXWELL3D_REG_INDEX(instanced_arrays) &&
- method < MAXWELL3D_REG_INDEX(instanced_arrays) + Regs::NumVertexArrays) {
- dirty_flags.vertex_array.set(method - MAXWELL3D_REG_INDEX(instanced_arrays));
+ const std::size_t dirty_reg = dirty_pointers[method];
+ if (dirty_reg) {
+ dirty.regs[dirty_reg] = true;
+ if (dirty_reg >= DIRTY_REGS_POS(vertex_array) &&
+ dirty_reg < DIRTY_REGS_POS(vertex_array_buffers)) {
+ dirty.vertex_array_buffers = true;
+ } else if (dirty_reg >= DIRTY_REGS_POS(vertex_instance) &&
+ dirty_reg < DIRTY_REGS_POS(vertex_instances)) {
+ dirty.vertex_instances = true;
+ } else if (dirty_reg >= DIRTY_REGS_POS(render_target) &&
+ dirty_reg < DIRTY_REGS_POS(render_settings)) {
+ dirty.render_settings = true;
+ }
}
}
@@ -198,6 +332,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
ProcessMacroBind(method_call.argument);
break;
}
+ case MAXWELL3D_REG_INDEX(firmware[4]): {
+ ProcessFirmwareCall4();
+ break;
+ }
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
@@ -214,7 +352,7 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
- ProcessCBData(method_call.argument);
+ StartCBData(method);
break;
}
case MAXWELL3D_REG_INDEX(cb_bind[0].raw_config): {
@@ -249,6 +387,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
ProcessQueryGet();
break;
}
+ case MAXWELL3D_REG_INDEX(condition.mode): {
+ ProcessQueryCondition();
+ break;
+ }
case MAXWELL3D_REG_INDEX(sync_info): {
ProcessSyncPoint();
break;
@@ -261,7 +403,7 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
const bool is_last_call = method_call.IsLastCall();
upload_state.ProcessData(method_call.argument, is_last_call);
if (is_last_call) {
- dirty_flags.OnMemoryWrite();
+ dirty.OnMemoryWrite();
}
break;
}
@@ -281,7 +423,15 @@ void Maxwell3D::ProcessMacroUpload(u32 data) {
}
void Maxwell3D::ProcessMacroBind(u32 data) {
- macro_offsets[regs.macros.entry] = data;
+ macro_positions[regs.macros.entry++] = data;
+}
+
+void Maxwell3D::ProcessFirmwareCall4() {
+ LOG_WARNING(HW_GPU, "(STUBBED) called");
+
+ // Firmware call 4 is a blob that changes some registers depending on its parameters.
+ // These registers don't affect emulation and so are stubbed by setting 0xd00 to 1.
+ regs.reg_array[0xd00] = 1;
}
void Maxwell3D::ProcessQueryGet() {
@@ -302,6 +452,7 @@ void Maxwell3D::ProcessQueryGet() {
result = regs.query.query_sequence;
break;
default:
+ result = 1;
UNIMPLEMENTED_MSG("Unimplemented query select type {}",
static_cast<u32>(regs.query.query_get.select.Value()));
}
@@ -333,7 +484,6 @@ void Maxwell3D::ProcessQueryGet() {
query_result.timestamp = system.CoreTiming().GetTicks();
memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result));
}
- dirty_flags.OnMemoryWrite();
break;
}
default:
@@ -342,16 +492,56 @@ void Maxwell3D::ProcessQueryGet() {
}
}
+void Maxwell3D::ProcessQueryCondition() {
+ const GPUVAddr condition_address{regs.condition.Address()};
+ switch (regs.condition.mode) {
+ case Regs::ConditionMode::Always: {
+ execute_on = true;
+ break;
+ }
+ case Regs::ConditionMode::Never: {
+ execute_on = false;
+ break;
+ }
+ case Regs::ConditionMode::ResNonZero: {
+ Regs::QueryCompare cmp;
+ memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp));
+ execute_on = cmp.initial_sequence != 0U && cmp.initial_mode != 0U;
+ break;
+ }
+ case Regs::ConditionMode::Equal: {
+ Regs::QueryCompare cmp;
+ memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp));
+ execute_on =
+ cmp.initial_sequence == cmp.current_sequence && cmp.initial_mode == cmp.current_mode;
+ break;
+ }
+ case Regs::ConditionMode::NotEqual: {
+ Regs::QueryCompare cmp;
+ memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp));
+ execute_on =
+ cmp.initial_sequence != cmp.current_sequence || cmp.initial_mode != cmp.current_mode;
+ break;
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Uninplemented Condition Mode!");
+ execute_on = true;
+ break;
+ }
+ }
+}
+
void Maxwell3D::ProcessSyncPoint() {
const u32 sync_point = regs.sync_info.sync_point.Value();
const u32 increment = regs.sync_info.increment.Value();
- const u32 cache_flush = regs.sync_info.unknown.Value();
- LOG_DEBUG(HW_GPU, "Syncpoint set {}, increment: {}, unk: {}", sync_point, increment,
- cache_flush);
+ [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value();
+ if (increment) {
+ system.GPU().IncrementSyncPoint(sync_point);
+ }
}
void Maxwell3D::DrawArrays() {
- LOG_DEBUG(HW_GPU, "called, topology={}, count={}", static_cast<u32>(regs.draw.topology.Value()),
+ LOG_TRACE(HW_GPU, "called, topology={}, count={}", static_cast<u32>(regs.draw.topology.Value()),
regs.vertex_buffer.count);
ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
@@ -396,34 +586,48 @@ void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) {
auto& shader = state.shader_stages[static_cast<std::size_t>(stage)];
auto& bind_data = regs.cb_bind[static_cast<std::size_t>(stage)];
- auto& buffer = shader.const_buffers[bind_data.index];
-
ASSERT(bind_data.index < Regs::MaxConstBuffers);
+ auto& buffer = shader.const_buffers[bind_data.index];
buffer.enabled = bind_data.valid.Value() != 0;
- buffer.index = bind_data.index;
buffer.address = regs.const_buffer.BufferAddress();
buffer.size = regs.const_buffer.cb_size;
}
void Maxwell3D::ProcessCBData(u32 value) {
+ const u32 id = cb_data_state.id;
+ cb_data_state.buffer[id][cb_data_state.counter] = value;
+ // Increment the current buffer position.
+ regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4;
+ cb_data_state.counter++;
+}
+
+void Maxwell3D::StartCBData(u32 method) {
+ constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]);
+ cb_data_state.start_pos = regs.const_buffer.cb_pos;
+ cb_data_state.id = method - first_cb_data;
+ cb_data_state.current = method;
+ cb_data_state.counter = 0;
+ ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
+}
+
+void Maxwell3D::FinishCBData() {
// Write the input value to the current const buffer at the current position.
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
ASSERT(buffer_address != 0);
// Don't allow writing past the end of the buffer.
- ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
-
- const GPUVAddr address{buffer_address + regs.const_buffer.cb_pos};
+ ASSERT(regs.const_buffer.cb_pos <= regs.const_buffer.cb_size);
- u8* ptr{memory_manager.GetPointer(address)};
- rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32));
- memory_manager.Write<u32>(address, value);
+ const GPUVAddr address{buffer_address + cb_data_state.start_pos};
+ const std::size_t size = regs.const_buffer.cb_pos - cb_data_state.start_pos;
- dirty_flags.OnMemoryWrite();
+ const u32 id = cb_data_state.id;
+ memory_manager.WriteBlock(address, cb_data_state.buffer[id].data(), size);
+ dirty.OnMemoryWrite();
- // Increment the current buffer position.
- regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4;
+ cb_data_state.id = null_cb_data;
+ cb_data_state.current = null_cb_data;
}
Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
@@ -432,14 +636,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
Texture::TICEntry tic_entry;
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
- ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear ||
- tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
- "TIC versions other than BlockLinear or Pitch are unimplemented");
-
- const auto r_type = tic_entry.r_type.Value();
- const auto g_type = tic_entry.g_type.Value();
- const auto b_type = tic_entry.b_type.Value();
- const auto a_type = tic_entry.a_type.Value();
+ [[maybe_unused]] const auto r_type{tic_entry.r_type.Value()};
+ [[maybe_unused]] const auto g_type{tic_entry.g_type.Value()};
+ [[maybe_unused]] const auto b_type{tic_entry.b_type.Value()};
+ [[maybe_unused]] const auto a_type{tic_entry.a_type.Value()};
// TODO(Subv): Different data types for separate components are not supported
DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index f342c78e6..e5ec90717 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -15,6 +15,7 @@
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/math_util.h"
+#include "video_core/engines/const_buffer_info.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/gpu.h"
#include "video_core/macro_interpreter.h"
@@ -61,11 +62,13 @@ public:
static constexpr std::size_t NumVertexAttributes = 32;
static constexpr std::size_t NumVaryings = 31;
static constexpr std::size_t NumTextureSamplers = 32;
+ static constexpr std::size_t NumImages = 8; // TODO(Rodrigo): Investigate this number
static constexpr std::size_t NumClipDistances = 8;
static constexpr std::size_t MaxShaderProgram = 6;
static constexpr std::size_t MaxShaderStage = 5;
// Maximum number of const buffers per shader stage.
static constexpr std::size_t MaxConstBuffers = 18;
+ static constexpr std::size_t MaxConstBufferSize = 0x10000;
enum class QueryMode : u32 {
Write = 0,
@@ -88,6 +91,20 @@ public:
enum class QuerySelect : u32 {
Zero = 0,
+ TimeElapsed = 2,
+ TransformFeedbackPrimitivesGenerated = 11,
+ PrimitivesGenerated = 18,
+ SamplesPassed = 21,
+ TransformFeedbackUnknown = 26,
+ };
+
+ struct QueryCompare {
+ u32 initial_sequence;
+ u32 initial_mode;
+ u32 unknown1;
+ u32 unknown2;
+ u32 current_sequence;
+ u32 current_mode;
};
enum class QuerySyncCondition : u32 {
@@ -95,6 +112,14 @@ public:
GreaterThan = 1,
};
+ enum class ConditionMode : u32 {
+ Never = 0,
+ Always = 1,
+ ResNonZero = 2,
+ Equal = 3,
+ NotEqual = 4,
+ };
+
enum class ShaderProgram : u32 {
VertexA = 0,
VertexB = 1,
@@ -813,7 +838,18 @@ public:
BitField<4, 1, u32> alpha_to_one;
} multisample_control;
- INSERT_PADDING_WORDS(0x7);
+ INSERT_PADDING_WORDS(0x4);
+
+ struct {
+ u32 address_high;
+ u32 address_low;
+ ConditionMode mode;
+
+ GPUVAddr Address() const {
+ return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
+ address_low);
+ }
+ } condition;
struct {
u32 tsc_address_high;
@@ -1053,7 +1089,9 @@ public:
INSERT_PADDING_WORDS(14);
} shader_config[MaxShaderProgram];
- INSERT_PADDING_WORDS(0x80);
+ INSERT_PADDING_WORDS(0x60);
+
+ u32 firmware[0x20];
struct {
u32 cb_size;
@@ -1112,13 +1150,6 @@ public:
static_assert(std::is_trivially_copyable_v<Regs>, "Maxwell3D Regs must be trivially copyable");
struct State {
- struct ConstBufferInfo {
- GPUVAddr address;
- u32 index;
- u32 size;
- bool enabled;
- };
-
struct ShaderStageInfo {
std::array<ConstBufferInfo, Regs::MaxConstBuffers> const_buffers;
};
@@ -1129,23 +1160,77 @@ public:
State state{};
- struct DirtyFlags {
- std::bitset<8> color_buffer{0xFF};
- std::bitset<32> vertex_array{0xFFFFFFFF};
+ struct DirtyRegs {
+ static constexpr std::size_t NUM_REGS = 256;
+ union {
+ struct {
+ bool null_dirty;
+
+ // Vertex Attributes
+ bool vertex_attrib_format;
+
+ // Vertex Arrays
+ std::array<bool, 32> vertex_array;
+
+ bool vertex_array_buffers;
- bool vertex_attrib_format = true;
- bool zeta_buffer = true;
- bool shaders = true;
+ // Vertex Instances
+ std::array<bool, 32> vertex_instance;
+
+ bool vertex_instances;
+
+ // Render Targets
+ std::array<bool, 8> render_target;
+ bool depth_buffer;
+
+ bool render_settings;
+
+ // Shaders
+ bool shaders;
+
+ // Rasterizer State
+ bool viewport;
+ bool clip_coefficient;
+ bool cull_mode;
+ bool primitive_restart;
+ bool depth_test;
+ bool stencil_test;
+ bool blend_state;
+ bool scissor_test;
+ bool transform_feedback;
+ bool color_mask;
+ bool polygon_offset;
+
+ // Complementary
+ bool viewport_transform;
+ bool screen_y_control;
+
+ bool memory_general;
+ };
+ std::array<bool, NUM_REGS> regs;
+ };
+
+ void ResetVertexArrays() {
+ vertex_array.fill(true);
+ vertex_array_buffers = true;
+ }
+
+ void ResetRenderTargets() {
+ depth_buffer = true;
+ render_target.fill(true);
+ render_settings = true;
+ }
void OnMemoryWrite() {
- zeta_buffer = true;
shaders = true;
- color_buffer.set();
- vertex_array.set();
+ memory_general = true;
+ ResetRenderTargets();
+ ResetVertexArrays();
}
- };
- DirtyFlags dirty_flags;
+ } dirty{};
+
+ std::array<u8, Regs::NUM_REGS> dirty_pointers{};
/// Reads a register value located at the input method address
u32 GetRegisterValue(u32 method) const;
@@ -1174,6 +1259,10 @@ public:
return macro_memory;
}
+ bool ShouldExecute() const {
+ return execute_on;
+ }
+
private:
void InitializeRegisterDefaults();
@@ -1184,7 +1273,7 @@ private:
MemoryManager& memory_manager;
/// Start offsets of each macro in macro_memory
- std::unordered_map<u32, u32> macro_offsets;
+ std::array<u32, 0x80> macro_positions = {};
/// Memory for macro code
MacroMemory macro_memory;
@@ -1197,20 +1286,34 @@ private:
/// Interpreter for the macro codes uploaded to the GPU.
MacroInterpreter macro_interpreter;
+ static constexpr u32 null_cb_data = 0xFFFFFFFF;
+ struct {
+ std::array<std::array<u32, 0x4000>, 16> buffer;
+ u32 current{null_cb_data};
+ u32 id{null_cb_data};
+ u32 start_pos{};
+ u32 counter{};
+ } cb_data_state;
+
Upload::State upload_state;
+ bool execute_on{true};
+
/// Retrieves information about a specific TIC entry from the TIC buffer.
Texture::TICEntry GetTICEntry(u32 tic_index) const;
/// Retrieves information about a specific TSC entry from the TSC buffer.
Texture::TSCEntry GetTSCEntry(u32 tsc_index) const;
+ void InitDirtySettings();
+
/**
* Call a macro on this engine.
* @param method Method to call
+ * @param num_parameters Number of arguments
* @param parameters Arguments to the method call
*/
- void CallMacroMethod(u32 method, std::vector<u32> parameters);
+ void CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters);
/// Handles writes to the macro uploading register.
void ProcessMacroUpload(u32 data);
@@ -1218,17 +1321,25 @@ private:
/// Handles writes to the macro bind register.
void ProcessMacroBind(u32 data);
+ /// Handles firmware blob 4
+ void ProcessFirmwareCall4();
+
/// Handles a write to the CLEAR_BUFFERS register.
void ProcessClearBuffers();
/// Handles a write to the QUERY_GET register.
void ProcessQueryGet();
+ // Handles Conditional Rendering
+ void ProcessQueryCondition();
+
/// Handles writes to syncing register.
void ProcessSyncPoint();
/// Handles a write to the CB_DATA[i] register.
+ void StartCBData(u32 method);
void ProcessCBData(u32 value);
+ void FinishCBData();
/// Handles a write to the CB_BIND register.
void ProcessCBBind(Regs::ShaderStage stage);
@@ -1295,6 +1406,7 @@ ASSERT_REG_POSITION(clip_distance_enabled, 0x544);
ASSERT_REG_POSITION(point_size, 0x546);
ASSERT_REG_POSITION(zeta_enable, 0x54E);
ASSERT_REG_POSITION(multisample_control, 0x54F);
+ASSERT_REG_POSITION(condition, 0x554);
ASSERT_REG_POSITION(tsc, 0x557);
ASSERT_REG_POSITION(polygon_offset_factor, 0x55b);
ASSERT_REG_POSITION(tic, 0x55D);
@@ -1324,6 +1436,7 @@ ASSERT_REG_POSITION(vertex_array[0], 0x700);
ASSERT_REG_POSITION(independent_blend, 0x780);
ASSERT_REG_POSITION(vertex_array_limit[0], 0x7C0);
ASSERT_REG_POSITION(shader_config[0], 0x800);
+ASSERT_REG_POSITION(firmware, 0x8C0);
ASSERT_REG_POSITION(const_buffer, 0x8E0);
ASSERT_REG_POSITION(cb_bind[0], 0x904);
ASSERT_REG_POSITION(tex_cb_index, 0x982);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3a5dfef0c..ad8453c5f 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -5,18 +5,17 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/settings.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/memory_manager.h"
-#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_base.h"
#include "video_core/textures/decoders.h"
namespace Tegra::Engines {
-MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- MemoryManager& memory_manager)
- : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager} {}
+MaxwellDMA::MaxwellDMA(Core::System& system, MemoryManager& memory_manager)
+ : system{system}, memory_manager{memory_manager} {}
void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
ASSERT_MSG(method_call.method < Regs::NUM_REGS,
@@ -38,7 +37,7 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
}
void MaxwellDMA::HandleCopy() {
- LOG_WARNING(HW_GPU, "Requested a DMA copy");
+ LOG_TRACE(HW_GPU, "Requested a DMA copy");
const GPUVAddr source = regs.src_address.Address();
const GPUVAddr dest = regs.dst_address.Address();
@@ -58,7 +57,7 @@ void MaxwellDMA::HandleCopy() {
}
// All copies here update the main memory, so mark all rasterizer states as invalid.
- system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
+ system.GPU().Maxwell3D().dirty.OnMemoryWrite();
if (regs.exec.is_dst_linear && regs.exec.is_src_linear) {
// When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D
@@ -84,13 +83,17 @@ void MaxwellDMA::HandleCopy() {
ASSERT(regs.exec.enable_2d == 1);
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
- ASSERT(regs.src_params.size_z == 1);
+ ASSERT(regs.src_params.BlockDepth() == 0);
// If the input is tiled and the output is linear, deswizzle the input and copy it over.
- const u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x;
+ const u32 bytes_per_pixel = regs.dst_pitch / regs.x_count;
const std::size_t src_size = Texture::CalculateSize(
- true, src_bytes_per_pixel, regs.src_params.size_x, regs.src_params.size_y,
+ true, bytes_per_pixel, regs.src_params.size_x, regs.src_params.size_y,
regs.src_params.size_z, regs.src_params.BlockHeight(), regs.src_params.BlockDepth());
+ const std::size_t src_layer_size = Texture::CalculateSize(
+ true, bytes_per_pixel, regs.src_params.size_x, regs.src_params.size_y, 1,
+ regs.src_params.BlockHeight(), regs.src_params.BlockDepth());
+
const std::size_t dst_size = regs.dst_pitch * regs.y_count;
if (read_buffer.size() < src_size) {
@@ -104,23 +107,23 @@ void MaxwellDMA::HandleCopy() {
memory_manager.ReadBlock(source, read_buffer.data(), src_size);
memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
- Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch,
- regs.src_params.size_x, src_bytes_per_pixel, read_buffer.data(),
- write_buffer.data(), regs.src_params.BlockHeight(),
- regs.src_params.pos_x, regs.src_params.pos_y);
+ Texture::UnswizzleSubrect(
+ regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel,
+ read_buffer.data() + src_layer_size * regs.src_params.pos_z, write_buffer.data(),
+ regs.src_params.BlockHeight(), regs.src_params.pos_x, regs.src_params.pos_y);
memory_manager.WriteBlock(dest, write_buffer.data(), dst_size);
} else {
- ASSERT(regs.dst_params.BlockDepth() == 1);
+ ASSERT(regs.dst_params.BlockDepth() == 0);
- const u32 src_bytes_per_pixel = regs.src_pitch / regs.x_count;
+ const u32 bytes_per_pixel = regs.src_pitch / regs.x_count;
const std::size_t dst_size = Texture::CalculateSize(
- true, src_bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y,
+ true, bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y,
regs.dst_params.size_z, regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth());
const std::size_t dst_layer_size = Texture::CalculateSize(
- true, src_bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y, 1,
+ true, bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y, 1,
regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth());
const std::size_t src_size = regs.src_pitch * regs.y_count;
@@ -133,14 +136,19 @@ void MaxwellDMA::HandleCopy() {
write_buffer.resize(dst_size);
}
- memory_manager.ReadBlock(source, read_buffer.data(), src_size);
- memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
+ if (Settings::values.use_accurate_gpu_emulation) {
+ memory_manager.ReadBlock(source, read_buffer.data(), src_size);
+ memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
+ } else {
+ memory_manager.ReadBlockUnsafe(source, read_buffer.data(), src_size);
+ memory_manager.ReadBlockUnsafe(dest, write_buffer.data(), dst_size);
+ }
// If the input is linear and the output is tiled, swizzle the input and copy it over.
- Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x,
- src_bytes_per_pixel,
- write_buffer.data() + dst_layer_size * regs.dst_params.pos_z,
- read_buffer.data(), regs.dst_params.BlockHeight());
+ Texture::SwizzleSubrect(
+ regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, bytes_per_pixel,
+ write_buffer.data() + dst_layer_size * regs.dst_params.pos_z, read_buffer.data(),
+ regs.dst_params.BlockHeight(), regs.dst_params.pos_x, regs.dst_params.pos_y);
memory_manager.WriteBlock(dest, write_buffer.data(), dst_size);
}
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index e5942f671..93808a9bb 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -20,10 +20,6 @@ namespace Tegra {
class MemoryManager;
}
-namespace VideoCore {
-class RasterizerInterface;
-}
-
namespace Tegra::Engines {
/**
@@ -33,8 +29,7 @@ namespace Tegra::Engines {
class MaxwellDMA final {
public:
- explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- MemoryManager& memory_manager);
+ explicit MaxwellDMA(Core::System& system, MemoryManager& memory_manager);
~MaxwellDMA() = default;
/// Write the value to the register identified by method.
@@ -59,11 +54,11 @@ public:
};
u32 BlockHeight() const {
- return 1 << block_height;
+ return block_height.Value();
}
u32 BlockDepth() const {
- return 1 << block_depth;
+ return block_depth.Value();
}
};
@@ -180,8 +175,6 @@ public:
private:
Core::System& system;
- VideoCore::RasterizerInterface& rasterizer;
-
MemoryManager& memory_manager;
std::vector<u8> read_buffer;
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index ffb3ec3e0..a6110bd86 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -4,6 +4,7 @@
#pragma once
+#include <array>
#include <bitset>
#include <optional>
#include <tuple>
@@ -77,7 +78,7 @@ union Attribute {
constexpr explicit Attribute(u64 value) : value(value) {}
enum class Index : u64 {
- PointSize = 6,
+ LayerViewportPointSize = 6,
Position = 7,
Attribute_0 = 8,
Attribute_31 = 39,
@@ -126,6 +127,15 @@ union Sampler {
u64 value{};
};
+union Image {
+ Image() = default;
+
+ constexpr explicit Image(u64 value) : value{value} {}
+
+ BitField<36, 13, u64> index;
+ u64 value;
+};
+
} // namespace Tegra::Shader
namespace std {
@@ -344,6 +354,26 @@ enum class TextureMiscMode : u64 {
PTP,
};
+enum class SurfaceDataMode : u64 {
+ P = 0,
+ D_BA = 1,
+};
+
+enum class OutOfBoundsStore : u64 {
+ Ignore = 0,
+ Clamp = 1,
+ Trap = 2,
+};
+
+enum class ImageType : u64 {
+ Texture1D = 0,
+ TextureBuffer = 1,
+ Texture1DArray = 2,
+ Texture2D = 3,
+ Texture2DArray = 4,
+ Texture3D = 5,
+};
+
enum class IsberdMode : u64 {
None = 0,
Patch = 1,
@@ -398,7 +428,7 @@ enum class LmemLoadCacheManagement : u64 {
CV = 3,
};
-enum class LmemStoreCacheManagement : u64 {
+enum class StoreCacheManagement : u64 {
Default = 0,
CG = 1,
CS = 2,
@@ -508,6 +538,41 @@ enum class PhysicalAttributeDirection : u64 {
Output = 1,
};
+enum class VoteOperation : u64 {
+ All = 0, // allThreadsNV
+ Any = 1, // anyThreadNV
+ Eq = 2, // allThreadsEqualNV
+};
+
+enum class ImageAtomicSize : u64 {
+ U32 = 0,
+ S32 = 1,
+ U64 = 2,
+ F32 = 3,
+ S64 = 5,
+ SD32 = 6,
+ SD64 = 7,
+};
+
+enum class ImageAtomicOperation : u64 {
+ Add = 0,
+ Min = 1,
+ Max = 2,
+ Inc = 3,
+ Dec = 4,
+ And = 5,
+ Or = 6,
+ Xor = 7,
+ Exch = 8,
+};
+
+enum class ShuffleOperation : u64 {
+ Idx = 0, // shuffleNV
+ Up = 1, // shuffleUpNV
+ Down = 2, // shuffleDownNV
+ Bfly = 3, // shuffleXorNV
+};
+
union Instruction {
Instruction& operator=(const Instruction& instr) {
value = instr.value;
@@ -530,6 +595,27 @@ union Instruction {
BitField<48, 16, u64> opcode;
union {
+ BitField<8, 5, ConditionCode> cc;
+ BitField<13, 1, u64> trigger;
+ } nop;
+
+ union {
+ BitField<48, 2, VoteOperation> operation;
+ BitField<45, 3, u64> dest_pred;
+ BitField<39, 3, u64> value;
+ BitField<42, 1, u64> negate_value;
+ } vote;
+
+ union {
+ BitField<30, 2, ShuffleOperation> operation;
+ BitField<48, 3, u64> pred48;
+ BitField<28, 1, u64> is_index_imm;
+ BitField<29, 1, u64> is_mask_imm;
+ BitField<20, 5, u64> index_imm;
+ BitField<34, 13, u64> mask_imm;
+ } shfl;
+
+ union {
BitField<8, 8, Register> gpr;
BitField<20, 24, s64> offset;
} gmem;
@@ -627,6 +713,10 @@ union Instruction {
} shift;
union {
+ BitField<39, 1, u64> wrap;
+ } shr;
+
+ union {
BitField<39, 5, u64> shift_amount;
BitField<48, 1, u64> negate_b;
BitField<49, 1, u64> negate_a;
@@ -811,7 +901,7 @@ union Instruction {
} ld_l;
union {
- BitField<44, 2, LmemStoreCacheManagement> cache_management;
+ BitField<44, 2, StoreCacheManagement> cache_management;
} st_l;
union {
@@ -838,6 +928,7 @@ union Instruction {
union {
BitField<0, 3, u64> pred0;
BitField<3, 3, u64> pred3;
+ BitField<6, 1, u64> neg_b;
BitField<7, 1, u64> abs_a;
BitField<39, 3, u64> pred39;
BitField<42, 1, u64> neg_pred;
@@ -901,8 +992,6 @@ union Instruction {
} csetp;
union {
- BitField<35, 4, PredCondition> cond;
- BitField<49, 1, u64> h_and;
BitField<6, 1, u64> ftz;
BitField<45, 2, PredOperation> op;
BitField<3, 3, u64> pred3;
@@ -910,9 +999,21 @@ union Instruction {
BitField<43, 1, u64> negate_a;
BitField<44, 1, u64> abs_a;
BitField<47, 2, HalfType> type_a;
- BitField<31, 1, u64> negate_b;
- BitField<30, 1, u64> abs_b;
- BitField<28, 2, HalfType> type_b;
+ union {
+ BitField<35, 4, PredCondition> cond;
+ BitField<49, 1, u64> h_and;
+ BitField<31, 1, u64> negate_b;
+ BitField<30, 1, u64> abs_b;
+ BitField<28, 2, HalfType> type_b;
+ } reg;
+ union {
+ BitField<56, 1, u64> negate_b;
+ BitField<54, 1, u64> abs_b;
+ } cbuf;
+ union {
+ BitField<49, 4, PredCondition> cond;
+ BitField<53, 1, u64> h_and;
+ } cbuf_and_imm;
BitField<42, 1, u64> neg_pred;
BitField<39, 3, u64> pred39;
} hsetp2;
@@ -961,7 +1062,6 @@ union Instruction {
} iset;
union {
- BitField<41, 2, u64> selector; // i2i and i2f only
BitField<45, 1, u64> negate_a;
BitField<49, 1, u64> abs_a;
BitField<10, 2, Register::Size> src_size;
@@ -978,8 +1078,6 @@ union Instruction {
} f2i;
union {
- BitField<8, 2, Register::Size> src_size;
- BitField<10, 2, Register::Size> dst_size;
BitField<39, 4, u64> rounding;
// H0, H1 extract for F16 missing
BitField<41, 1, u64> selector; // Guessed as some games set it, TODO: reverse this value
@@ -989,6 +1087,13 @@ union Instruction {
}
} f2f;
+ union {
+ BitField<41, 2, u64> selector;
+ } int_src;
+
+ union {
+ BitField<41, 1, u64> selector;
+ } float_src;
} conversion;
union {
@@ -1232,8 +1337,23 @@ union Instruction {
} texs;
union {
+ BitField<28, 1, u64> is_array;
+ BitField<29, 2, TextureType> texture_type;
+ BitField<35, 1, u64> aoffi;
+ BitField<49, 1, u64> nodep_flag;
+ BitField<50, 1, u64> ms; // Multisample?
+ BitField<54, 1, u64> cl;
+ BitField<55, 1, u64> process_mode;
+
+ TextureProcessMode GetTextureProcessMode() const {
+ return process_mode == 0 ? TextureProcessMode::LZ : TextureProcessMode::LL;
+ }
+ } tld;
+
+ union {
BitField<49, 1, u64> nodep_flag;
BitField<53, 4, u64> texture_info;
+ BitField<59, 1, u64> fp32_flag;
TextureType GetTextureType() const {
// The TLDS instruction has a weird encoding for the texture type.
@@ -1281,6 +1401,43 @@ union Instruction {
} tlds;
union {
+ BitField<24, 2, StoreCacheManagement> cache_management;
+ BitField<33, 3, ImageType> image_type;
+ BitField<49, 2, OutOfBoundsStore> out_of_bounds_store;
+ BitField<51, 1, u64> is_immediate;
+ BitField<52, 1, SurfaceDataMode> mode;
+
+ BitField<20, 3, StoreType> store_data_layout;
+ BitField<20, 4, u64> component_mask_selector;
+
+ bool IsComponentEnabled(std::size_t component) const {
+ ASSERT(mode == SurfaceDataMode::P);
+ constexpr u8 R = 0b0001;
+ constexpr u8 G = 0b0010;
+ constexpr u8 B = 0b0100;
+ constexpr u8 A = 0b1000;
+ constexpr std::array<u8, 16> mask = {
+ 0, (R), (G), (R | G), (B), (R | B),
+ (G | B), (R | G | B), (A), (R | A), (G | A), (R | G | A),
+ (B | A), (R | B | A), (G | B | A), (R | G | B | A)};
+ return std::bitset<4>{mask.at(component_mask_selector)}.test(component);
+ }
+
+ StoreType GetStoreDataLayout() const {
+ ASSERT(mode == SurfaceDataMode::D_BA);
+ return store_data_layout;
+ }
+ } sust;
+
+ union {
+ BitField<28, 1, u64> is_ba;
+ BitField<51, 3, ImageAtomicSize> size;
+ BitField<33, 3, ImageType> image_type;
+ BitField<29, 4, ImageAtomicOperation> operation;
+ BitField<49, 2, OutOfBoundsStore> out_of_bounds_store;
+ } suatom_d;
+
+ union {
BitField<20, 24, u64> target;
BitField<5, 1, u64> constant_buffer;
@@ -1295,6 +1452,20 @@ union Instruction {
} bra;
union {
+ BitField<20, 24, u64> target;
+ BitField<5, 1, u64> constant_buffer;
+
+ s32 GetBranchExtend() const {
+ // Sign extend the branch target offset
+ u32 mask = 1U << (24 - 1);
+ u32 value = static_cast<u32>(target);
+ // The branch offset is relative to the next instruction and is stored in bytes, so
+ // divide it by the size of an instruction and add 1 to it.
+ return static_cast<s32>((value ^ mask) - mask) / sizeof(Instruction) + 1;
+ }
+ } brx;
+
+ union {
BitField<39, 1, u64> emit; // EmitVertex
BitField<40, 1, u64> cut; // EndPrimitive
} out;
@@ -1371,6 +1542,7 @@ union Instruction {
Attribute attribute;
Sampler sampler;
+ Image image;
u64 value;
};
@@ -1385,11 +1557,14 @@ public:
SYNC,
BRK,
DEPBAR,
+ VOTE,
+ SHFL,
BFE_C,
BFE_R,
BFE_IMM,
BFI_IMM_R,
BRA,
+ BRX,
PBK,
LD_A,
LD_L,
@@ -1408,12 +1583,16 @@ public:
TXQ, // Texture Query
TXQ_B, // Texture Query Bindless
TEXS, // Texture Fetch with scalar/non-vec4 source/destinations
+ TLD, // Texture Load
TLDS, // Texture Load with scalar/non-vec4 source/destinations
TLD4, // Texture Load 4
TLD4S, // Texture Load 4 with scalar / non - vec4 source / destinations
TMML_B, // Texture Mip Map Level
TMML, // Texture Mip Map Level
+ SUST, // Surface Store
+ SUATOM, // Surface Atomic Operation
EXIT,
+ NOP,
IPA,
OUT_R, // Emit vertex/primitive
ISBERD,
@@ -1456,7 +1635,9 @@ public:
HFMA2_RC,
HFMA2_RR,
HFMA2_IMM_R,
+ HSETP2_C,
HSETP2_R,
+ HSETP2_IMM,
HSET2_R,
POPC_C,
POPC_R,
@@ -1541,8 +1722,10 @@ public:
Hfma2,
Flow,
Synch,
+ Warp,
Memory,
Texture,
+ Image,
FloatSet,
FloatSetPredicate,
IntegerSet,
@@ -1661,10 +1844,13 @@ private:
INST("111000101001----", Id::SSY, Type::Flow, "SSY"),
INST("111000101010----", Id::PBK, Type::Flow, "PBK"),
INST("111000100100----", Id::BRA, Type::Flow, "BRA"),
+ INST("111000100101----", Id::BRX, Type::Flow, "BRX"),
INST("1111000011111---", Id::SYNC, Type::Flow, "SYNC"),
INST("111000110100---", Id::BRK, Type::Flow, "BRK"),
INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"),
INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"),
+ INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"),
+ INST("1110111100010---", Id::SHFL, Type::Warp, "SHFL"),
INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"),
INST("1110111101001---", Id::LD_S, Type::Memory, "LD_S"),
INST("1110111101000---", Id::LD_L, Type::Memory, "LD_L"),
@@ -1682,11 +1868,15 @@ private:
INST("1101111101001---", Id::TXQ, Type::Texture, "TXQ"),
INST("1101111101010---", Id::TXQ_B, Type::Texture, "TXQ_B"),
INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"),
- INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"),
+ INST("11011100--11----", Id::TLD, Type::Texture, "TLD"),
+ INST("1101-01---------", Id::TLDS, Type::Texture, "TLDS"),
INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"),
INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"),
INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"),
INST("1101111101011---", Id::TMML, Type::Texture, "TMML"),
+ INST("11101011001-----", Id::SUST, Type::Image, "SUST"),
+ INST("1110101000------", Id::SUATOM, Type::Image, "SUATOM_D"),
+ INST("0101000010110---", Id::NOP, Type::Trivial, "NOP"),
INST("11100000--------", Id::IPA, Type::Trivial, "IPA"),
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"),
@@ -1735,7 +1925,9 @@ private:
INST("01100---1-------", Id::HFMA2_RC, Type::Hfma2, "HFMA2_RC"),
INST("0101110100000---", Id::HFMA2_RR, Type::Hfma2, "HFMA2_RR"),
INST("01110---0-------", Id::HFMA2_IMM_R, Type::Hfma2, "HFMA2_R_IMM"),
- INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP_R"),
+ INST("0111111-1-------", Id::HSETP2_C, Type::HalfSetPredicate, "HSETP2_C"),
+ INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP2_R"),
+ INST("0111111-0-------", Id::HSETP2_IMM, Type::HalfSetPredicate, "HSETP2_IMM"),
INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"),
INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"),
INST("0100110010010---", Id::RRO_C, Type::Arithmetic, "RRO_C"),
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 52706505b..2c47541cb 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -17,26 +17,15 @@
namespace Tegra {
-u32 FramebufferConfig::BytesPerPixel(PixelFormat format) {
- switch (format) {
- case PixelFormat::ABGR8:
- case PixelFormat::BGRA8:
- return 4;
- default:
- return 4;
- }
-
- UNREACHABLE();
-}
-
-GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer) : renderer{renderer} {
+GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async)
+ : system{system}, renderer{renderer}, is_async{is_async} {
auto& rasterizer{renderer.Rasterizer()};
- memory_manager = std::make_unique<Tegra::MemoryManager>(rasterizer);
+ memory_manager = std::make_unique<Tegra::MemoryManager>(system, rasterizer);
dma_pusher = std::make_unique<Tegra::DmaPusher>(*this);
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
- fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager);
+ fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer);
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, rasterizer, *memory_manager);
- maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager);
+ maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
}
@@ -50,6 +39,14 @@ const Engines::Maxwell3D& GPU::Maxwell3D() const {
return *maxwell_3d;
}
+Engines::KeplerCompute& GPU::KeplerCompute() {
+ return *kepler_compute;
+}
+
+const Engines::KeplerCompute& GPU::KeplerCompute() const {
+ return *kepler_compute;
+}
+
MemoryManager& GPU::MemoryManager() {
return *memory_manager;
}
@@ -66,6 +63,55 @@ const DmaPusher& GPU::DmaPusher() const {
return *dma_pusher;
}
+void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
+ syncpoints[syncpoint_id]++;
+ std::lock_guard lock{sync_mutex};
+ if (!syncpt_interrupts[syncpoint_id].empty()) {
+ u32 value = syncpoints[syncpoint_id].load();
+ auto it = syncpt_interrupts[syncpoint_id].begin();
+ while (it != syncpt_interrupts[syncpoint_id].end()) {
+ if (value >= *it) {
+ TriggerCpuInterrupt(syncpoint_id, *it);
+ it = syncpt_interrupts[syncpoint_id].erase(it);
+ continue;
+ }
+ it++;
+ }
+ }
+}
+
+u32 GPU::GetSyncpointValue(const u32 syncpoint_id) const {
+ return syncpoints[syncpoint_id].load();
+}
+
+void GPU::RegisterSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
+ auto& interrupt = syncpt_interrupts[syncpoint_id];
+ bool contains = std::any_of(interrupt.begin(), interrupt.end(),
+ [value](u32 in_value) { return in_value == value; });
+ if (contains) {
+ return;
+ }
+ syncpt_interrupts[syncpoint_id].emplace_back(value);
+}
+
+bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
+ std::lock_guard lock{sync_mutex};
+ auto& interrupt = syncpt_interrupts[syncpoint_id];
+ const auto iter =
+ std::find_if(interrupt.begin(), interrupt.end(),
+ [value](u32 interrupt_value) { return value == interrupt_value; });
+
+ if (iter == interrupt.end()) {
+ return false;
+ }
+ interrupt.erase(iter);
+ return true;
+}
+
+void GPU::FlushCommands() {
+ renderer.Rasterizer().FlushCommands();
+}
+
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
ASSERT(format != RenderTargetFormat::NONE);
@@ -143,12 +189,12 @@ enum class BufferMethods {
NotifyIntr = 0x8,
WrcacheFlush = 0x9,
Unk28 = 0xA,
- Unk2c = 0xB,
+ UnkCacheFlush = 0xB,
RefCnt = 0x14,
SemaphoreAcquire = 0x1A,
SemaphoreRelease = 0x1B,
- Unk70 = 0x1C,
- Unk74 = 0x1D,
+ FenceValue = 0x1C,
+ FenceAction = 0x1D,
Unk78 = 0x1E,
Unk7c = 0x1F,
Yield = 0x20,
@@ -194,6 +240,10 @@ void GPU::CallPullerMethod(const MethodCall& method_call) {
case BufferMethods::SemaphoreAddressLow:
case BufferMethods::SemaphoreSequence:
case BufferMethods::RefCnt:
+ case BufferMethods::UnkCacheFlush:
+ case BufferMethods::WrcacheFlush:
+ case BufferMethods::FenceValue:
+ case BufferMethods::FenceAction:
break;
case BufferMethods::SemaphoreTrigger: {
ProcessSemaphoreTriggerMethod();
@@ -204,21 +254,11 @@ void GPU::CallPullerMethod(const MethodCall& method_call) {
LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
break;
}
- case BufferMethods::WrcacheFlush: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method WrcacheFlush not implemented");
- break;
- }
case BufferMethods::Unk28: {
// TODO(Kmather73): Research and implement this method.
LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
break;
}
- case BufferMethods::Unk2c: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method Unk2c not implemented");
- break;
- }
case BufferMethods::SemaphoreAcquire: {
ProcessSemaphoreAcquire();
break;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index fe6628923..78bc0601a 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -5,8 +5,12 @@
#pragma once
#include <array>
+#include <atomic>
+#include <list>
#include <memory>
+#include <mutex>
#include "common/common_types.h"
+#include "core/hle/service/nvdrv/nvdata.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
#include "video_core/dma_pusher.h"
@@ -15,6 +19,10 @@ inline CacheAddr ToCacheAddr(const void* host_ptr) {
return reinterpret_cast<CacheAddr>(host_ptr);
}
+inline u8* FromCacheAddr(CacheAddr cache_addr) {
+ return reinterpret_cast<u8*>(cache_addr);
+}
+
namespace Core {
class System;
}
@@ -87,14 +95,10 @@ class DebugContext;
struct FramebufferConfig {
enum class PixelFormat : u32 {
ABGR8 = 1,
+ RGB565 = 4,
BGRA8 = 5,
};
- /**
- * Returns the number of bytes per pixel.
- */
- static u32 BytesPerPixel(PixelFormat format);
-
VAddr address;
u32 offset;
u32 width;
@@ -127,7 +131,7 @@ class MemoryManager;
class GPU {
public:
- explicit GPU(Core::System& system, VideoCore::RendererBase& renderer);
+ explicit GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async);
virtual ~GPU();
@@ -149,12 +153,20 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
+ void FlushCommands();
+
/// Returns a reference to the Maxwell3D GPU engine.
Engines::Maxwell3D& Maxwell3D();
/// Returns a const reference to the Maxwell3D GPU engine.
const Engines::Maxwell3D& Maxwell3D() const;
+ /// Returns a reference to the KeplerCompute GPU engine.
+ Engines::KeplerCompute& KeplerCompute();
+
+ /// Returns a reference to the KeplerCompute GPU engine.
+ const Engines::KeplerCompute& KeplerCompute() const;
+
/// Returns a reference to the GPU memory manager.
Tegra::MemoryManager& MemoryManager();
@@ -164,6 +176,22 @@ public:
/// Returns a reference to the GPU DMA pusher.
Tegra::DmaPusher& DmaPusher();
+ void IncrementSyncPoint(u32 syncpoint_id);
+
+ u32 GetSyncpointValue(u32 syncpoint_id) const;
+
+ void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
+
+ bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
+
+ std::unique_lock<std::mutex> LockSync() {
+ return std::unique_lock{sync_mutex};
+ }
+
+ bool IsAsync() const {
+ return is_async;
+ }
+
/// Returns a const reference to the GPU DMA pusher.
const Tegra::DmaPusher& DmaPusher() const;
@@ -194,7 +222,12 @@ public:
u32 semaphore_acquire;
u32 semaphore_release;
- INSERT_PADDING_WORDS(0xE4);
+ u32 fence_value;
+ union {
+ BitField<4, 4, u32> operation;
+ BitField<8, 8, u32> id;
+ } fence_action;
+ INSERT_PADDING_WORDS(0xE2);
// Puller state
u32 acquire_mode;
@@ -216,8 +249,7 @@ public:
virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0;
/// Swap buffers (render frame)
- virtual void SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0;
+ virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
virtual void FlushRegion(CacheAddr addr, u64 size) = 0;
@@ -228,6 +260,9 @@ public:
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+protected:
+ virtual void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const = 0;
+
private:
void ProcessBindMethod(const MethodCall& method_call);
void ProcessSemaphoreTriggerMethod();
@@ -245,6 +280,7 @@ private:
protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
+ Core::System& system;
VideoCore::RendererBase& renderer;
private:
@@ -262,6 +298,14 @@ private:
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
/// Inline memory engine
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
+
+ std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
+
+ std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts;
+
+ std::mutex sync_mutex;
+
+ const bool is_async;
};
#define ASSERT_REG_POSITION(field_name, position) \
@@ -274,6 +318,8 @@ ASSERT_REG_POSITION(semaphore_trigger, 0x7);
ASSERT_REG_POSITION(reference_count, 0x14);
ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
ASSERT_REG_POSITION(semaphore_release, 0x1B);
+ASSERT_REG_POSITION(fence_value, 0x1C);
+ASSERT_REG_POSITION(fence_action, 0x1D);
ASSERT_REG_POSITION(acquire_mode, 0x100);
ASSERT_REG_POSITION(acquire_source, 0x101);
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index d4e2553a9..f2a3a390e 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "core/core.h"
+#include "core/hardware_interrupt_manager.h"
#include "video_core/gpu_asynch.h"
#include "video_core/gpu_thread.h"
#include "video_core/renderer_base.h"
@@ -9,7 +11,7 @@
namespace VideoCommon {
GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer)
- : GPU(system, renderer), gpu_thread{system} {}
+ : GPU(system, renderer, true), gpu_thread{system} {}
GPUAsynch::~GPUAsynch() = default;
@@ -21,9 +23,8 @@ void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) {
gpu_thread.SubmitList(std::move(entries));
}
-void GPUAsynch::SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
- gpu_thread.SwapBuffers(std::move(framebuffer));
+void GPUAsynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
+ gpu_thread.SwapBuffers(framebuffer);
}
void GPUAsynch::FlushRegion(CacheAddr addr, u64 size) {
@@ -38,4 +39,9 @@ void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
gpu_thread.FlushAndInvalidateRegion(addr, size);
}
+void GPUAsynch::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) const {
+ auto& interrupt_manager = system.InterruptManager();
+ interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
+}
+
} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 30be74cba..a12f9bac4 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -14,19 +14,21 @@ class RendererBase;
namespace VideoCommon {
/// Implementation of GPU interface that runs the GPU asynchronously
-class GPUAsynch : public Tegra::GPU {
+class GPUAsynch final : public Tegra::GPU {
public:
explicit GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer);
~GPUAsynch() override;
void Start() override;
void PushGPUEntries(Tegra::CommandList&& entries) override;
- void SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
void FlushRegion(CacheAddr addr, u64 size) override;
void InvalidateRegion(CacheAddr addr, u64 size) override;
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+protected:
+ void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override;
+
private:
GPUThread::ThreadManager gpu_thread;
};
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp
index 45e43b1dc..d48221077 100644
--- a/src/video_core/gpu_synch.cpp
+++ b/src/video_core/gpu_synch.cpp
@@ -8,7 +8,7 @@
namespace VideoCommon {
GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer)
- : GPU(system, renderer) {}
+ : GPU(system, renderer, false) {}
GPUSynch::~GPUSynch() = default;
@@ -19,9 +19,8 @@ void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) {
dma_pusher->DispatchCalls();
}
-void GPUSynch::SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
- renderer.SwapBuffers(std::move(framebuffer));
+void GPUSynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
+ renderer.SwapBuffers(framebuffer);
}
void GPUSynch::FlushRegion(CacheAddr addr, u64 size) {
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h
index 3031fcf72..5eb1c461c 100644
--- a/src/video_core/gpu_synch.h
+++ b/src/video_core/gpu_synch.h
@@ -13,18 +13,21 @@ class RendererBase;
namespace VideoCommon {
/// Implementation of GPU interface that runs the GPU synchronously
-class GPUSynch : public Tegra::GPU {
+class GPUSynch final : public Tegra::GPU {
public:
explicit GPUSynch(Core::System& system, VideoCore::RendererBase& renderer);
~GPUSynch() override;
void Start() override;
void PushGPUEntries(Tegra::CommandList&& entries) override;
- void SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
void FlushRegion(CacheAddr addr, u64 size) override;
void InvalidateRegion(CacheAddr addr, u64 size) override;
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+
+protected:
+ void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id,
+ [[maybe_unused]] u32 value) const override {}
};
} // namespace VideoCommon
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 3f0939ec9..5f039e4fd 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -21,7 +21,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
MicroProfileOnThreadCreate("GpuThread");
// Wait for first GPU command before acquiring the window context
- state.WaitForCommands();
+ while (state.queue.Empty())
+ ;
// If emulation was stopped during disk shader loading, abort before trying to acquire context
if (!state.is_running) {
@@ -32,14 +33,13 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
CommandDataContainer next;
while (state.is_running) {
- state.WaitForCommands();
while (!state.queue.Empty()) {
state.queue.Pop(next);
if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) {
dma_pusher.Push(std::move(submit_list->entries));
dma_pusher.DispatchCalls();
} else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) {
- renderer.SwapBuffers(std::move(data->framebuffer));
+ renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
} else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) {
renderer.Rasterizer().FlushRegion(data->addr, data->size);
} else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) {
@@ -49,8 +49,7 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
} else {
UNREACHABLE();
}
- state.signaled_fence = next.fence;
- state.TrySynchronize();
+ state.signaled_fence.store(next.fence);
}
}
}
@@ -79,9 +78,9 @@ void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
system.CoreTiming().ScheduleEvent(synchronization_ticks, synchronization_event, fence);
}
-void ThreadManager::SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
- PushCommand(SwapBuffersCommand(std::move(framebuffer)));
+void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
+ PushCommand(SwapBuffersCommand(framebuffer ? *framebuffer
+ : std::optional<const Tegra::FramebufferConfig>{}));
}
void ThreadManager::FlushRegion(CacheAddr addr, u64 size) {
@@ -89,12 +88,7 @@ void ThreadManager::FlushRegion(CacheAddr addr, u64 size) {
}
void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) {
- if (state.queue.Empty()) {
- // It's quicker to invalidate a single region on the CPU if the queue is already empty
- system.Renderer().Rasterizer().InvalidateRegion(addr, size);
- } else {
- PushCommand(InvalidateRegionCommand(addr, size));
- }
+ system.Renderer().Rasterizer().InvalidateRegion(addr, size);
}
void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
@@ -105,22 +99,13 @@ void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
u64 ThreadManager::PushCommand(CommandData&& command_data) {
const u64 fence{++state.last_fence};
state.queue.Push(CommandDataContainer(std::move(command_data), fence));
- state.SignalCommands();
return fence;
}
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
void SynchState::WaitForSynchronization(u64 fence) {
- if (signaled_fence >= fence) {
- return;
- }
-
- // Wait for the GPU to be idle (all commands to be executed)
- {
- MICROPROFILE_SCOPE(GPU_wait);
- std::unique_lock lock{synchronization_mutex};
- synchronization_condition.wait(lock, [this, fence] { return signaled_fence >= fence; });
- }
+ while (signaled_fence.load() < fence)
+ ;
}
} // namespace VideoCommon::GPUThread
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 05a168a72..3ae0ec9f3 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -88,41 +88,9 @@ struct CommandDataContainer {
/// Struct used to synchronize the GPU thread
struct SynchState final {
std::atomic_bool is_running{true};
- std::atomic_int queued_frame_count{};
- std::mutex synchronization_mutex;
- std::mutex commands_mutex;
- std::condition_variable commands_condition;
- std::condition_variable synchronization_condition;
-
- /// Returns true if the gap in GPU commands is small enough that we can consider the CPU and GPU
- /// synchronized. This is entirely empirical.
- bool IsSynchronized() const {
- constexpr std::size_t max_queue_gap{5};
- return queue.Size() <= max_queue_gap;
- }
-
- void TrySynchronize() {
- if (IsSynchronized()) {
- std::lock_guard lock{synchronization_mutex};
- synchronization_condition.notify_one();
- }
- }
void WaitForSynchronization(u64 fence);
- void SignalCommands() {
- if (queue.Empty()) {
- return;
- }
-
- commands_condition.notify_one();
- }
-
- void WaitForCommands() {
- std::unique_lock lock{commands_mutex};
- commands_condition.wait(lock, [this] { return !queue.Empty(); });
- }
-
using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
CommandQueue queue;
u64 last_fence{};
@@ -142,8 +110,7 @@ public:
void SubmitList(Tegra::CommandList&& entries);
/// Swap buffers (render frame)
- void SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
+ void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
void FlushRegion(CacheAddr addr, u64 size);
diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro_interpreter.cpp
index c766ed692..62afc0d11 100644
--- a/src/video_core/macro_interpreter.cpp
+++ b/src/video_core/macro_interpreter.cpp
@@ -4,17 +4,28 @@
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/microprofile.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/macro_interpreter.h"
+MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192));
+
namespace Tegra {
MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
-void MacroInterpreter::Execute(u32 offset, std::vector<u32> parameters) {
+void MacroInterpreter::Execute(u32 offset, std::size_t num_parameters, const u32* parameters) {
+ MICROPROFILE_SCOPE(MacroInterp);
Reset();
+
registers[1] = parameters[0];
- this->parameters = std::move(parameters);
+
+ if (num_parameters > parameters_capacity) {
+ parameters_capacity = num_parameters;
+ this->parameters = std::make_unique<u32[]>(num_parameters);
+ }
+ std::memcpy(this->parameters.get(), parameters, num_parameters * sizeof(u32));
+ this->num_parameters = num_parameters;
// Execute the code until we hit an exit condition.
bool keep_executing = true;
@@ -23,7 +34,7 @@ void MacroInterpreter::Execute(u32 offset, std::vector<u32> parameters) {
}
// Assert the the macro used all the input parameters
- ASSERT(next_parameter_index == this->parameters.size());
+ ASSERT(next_parameter_index == num_parameters);
}
void MacroInterpreter::Reset() {
@@ -31,7 +42,7 @@ void MacroInterpreter::Reset() {
pc = 0;
delayed_pc = {};
method_address.raw = 0;
- parameters.clear();
+ num_parameters = 0;
// The next parameter index starts at 1, because $r1 already has the value of the first
// parameter.
next_parameter_index = 1;
@@ -120,9 +131,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
// An instruction with the Exit flag will not actually
// cause an exit if it's executed inside a delay slot.
- // TODO(Blinkhawk): Reversed to always exit. The behavior explained above requires further
- // testing on the MME code.
- if (opcode.is_exit) {
+ if (opcode.is_exit && !is_delay_slot) {
// Exit has a delay slot, execute the next instruction
Step(offset, true);
return false;
@@ -225,7 +234,8 @@ void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 res
}
u32 MacroInterpreter::FetchParameter() {
- return parameters.at(next_parameter_index++);
+ ASSERT(next_parameter_index < num_parameters);
+ return parameters[next_parameter_index++];
}
u32 MacroInterpreter::GetRegister(u32 register_id) const {
diff --git a/src/video_core/macro_interpreter.h b/src/video_core/macro_interpreter.h
index cde360288..76b6a895b 100644
--- a/src/video_core/macro_interpreter.h
+++ b/src/video_core/macro_interpreter.h
@@ -25,7 +25,7 @@ public:
* @param offset Offset to start execution at.
* @param parameters The parameters of the macro.
*/
- void Execute(u32 offset, std::vector<u32> parameters);
+ void Execute(u32 offset, std::size_t num_parameters, const u32* parameters);
private:
enum class Operation : u32 {
@@ -162,10 +162,12 @@ private:
MethodAddress method_address = {};
/// Input parameters of the current macro.
- std::vector<u32> parameters;
+ std::unique_ptr<u32[]> parameters;
+ std::size_t num_parameters = 0;
+ std::size_t parameters_capacity = 0;
/// Index of the next parameter that will be fetched by the 'parm' instruction.
u32 next_parameter_index = 0;
- bool carry_flag{};
+ bool carry_flag = false;
};
} // namespace Tegra
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index 5d8d126c1..bffae940c 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -5,13 +5,17 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
namespace Tegra {
-MemoryManager::MemoryManager(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {
+MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
+ : rasterizer{rasterizer}, system{system} {
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
Common::PageType::Unmapped);
@@ -49,6 +53,11 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr);
+ ASSERT(system.CurrentProcess()
+ ->VMManager()
+ .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped,
+ Kernel::MemoryAttribute::DeviceMapped)
+ .IsSuccess());
return gpu_addr;
}
@@ -59,7 +68,11 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
const u64 aligned_size{Common::AlignUp(size, page_size)};
MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr);
-
+ ASSERT(system.CurrentProcess()
+ ->VMManager()
+ .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped,
+ Kernel::MemoryAttribute::DeviceMapped)
+ .IsSuccess());
return gpu_addr;
}
@@ -68,9 +81,16 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
const u64 aligned_size{Common::AlignUp(size, page_size)};
const CacheAddr cache_addr{ToCacheAddr(GetPointer(gpu_addr))};
+ const auto cpu_addr = GpuToCpuAddress(gpu_addr);
+ ASSERT(cpu_addr);
rasterizer.FlushAndInvalidateRegion(cache_addr, aligned_size);
UnmapRange(gpu_addr, aligned_size);
+ ASSERT(system.CurrentProcess()
+ ->VMManager()
+ .SetMemoryAttribute(cpu_addr.value(), size, Kernel::MemoryAttribute::DeviceMapped,
+ Kernel::MemoryAttribute::None)
+ .IsSuccess());
return gpu_addr;
}
@@ -202,11 +222,12 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
}
bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const {
- const GPUVAddr end = start + size;
+ const std::size_t inner_size = size - 1;
+ const GPUVAddr end = start + inner_size;
const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start));
const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end));
const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start);
- return range == size;
+ return range == inner_size;
}
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const {
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 113f9d8f3..aea010087 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -14,6 +14,10 @@ namespace VideoCore {
class RasterizerInterface;
}
+namespace Core {
+class System;
+}
+
namespace Tegra {
/**
@@ -47,7 +51,7 @@ struct VirtualMemoryArea {
class MemoryManager final {
public:
- explicit MemoryManager(VideoCore::RasterizerInterface& rasterizer);
+ explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer);
~MemoryManager();
GPUVAddr AllocateSpace(u64 size, u64 align);
@@ -163,8 +167,8 @@ private:
static constexpr u64 page_size{1 << page_bits};
static constexpr u64 page_mask{page_size - 1};
- /// Address space in bits, this is fairly arbitrary but sufficiently large.
- static constexpr u32 address_space_width{39};
+ /// Address space in bits, according to Tegra X1 TRM
+ static constexpr u32 address_space_width{40};
/// Start address for mapping, this is fairly arbitrary but must be non-zero.
static constexpr GPUVAddr address_space_base{0x100000};
/// End of address space, based on address space in bits.
@@ -173,6 +177,8 @@ private:
Common::PageTable page_table{page_bits};
VMAMap vma_map;
VideoCore::RasterizerInterface& rasterizer;
+
+ Core::System& system;
};
} // namespace Tegra
diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp
index 3e91cbc83..084f85e67 100644
--- a/src/video_core/morton.cpp
+++ b/src/video_core/morton.cpp
@@ -25,8 +25,8 @@ static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
// pixel values.
- const u32 tile_size_x{GetDefaultBlockWidth(format)};
- const u32 tile_size_y{GetDefaultBlockHeight(format)};
+ constexpr u32 tile_size_x{GetDefaultBlockWidth(format)};
+ constexpr u32 tile_size_y{GetDefaultBlockHeight(format)};
if constexpr (morton_to_linear) {
Tegra::Texture::UnswizzleTexture(buffer, addr, tile_size_x, tile_size_y, bytes_per_pixel,
@@ -186,99 +186,6 @@ static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFor
return morton_to_linear_fns[static_cast<std::size_t>(format)];
}
-static u32 MortonInterleave128(u32 x, u32 y) {
- // 128x128 Z-Order coordinate from 2D coordinates
- static constexpr u32 xlut[] = {
- 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042,
- 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809,
- 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000,
- 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043,
- 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a,
- 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001,
- 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048,
- 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b,
- 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002,
- 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049,
- 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840,
- 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003,
- 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a,
- 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841,
- 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008,
- 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b,
- 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842,
- 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009,
- 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800,
- 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843,
- 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a,
- 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801,
- 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848,
- 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b,
- 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802,
- 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849,
- 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040,
- 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803,
- 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a,
- 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041,
- 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808,
- 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b,
- 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042,
- 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809,
- 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b,
- };
- static constexpr u32 ylut[] = {
- 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090,
- 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124,
- 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200,
- 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294,
- 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330,
- 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404,
- 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0,
- 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534,
- 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610,
- 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4,
- 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780,
- 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014,
- 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0,
- 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184,
- 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220,
- 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4,
- 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390,
- 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424,
- 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500,
- 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594,
- 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630,
- 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704,
- 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0,
- 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034,
- 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110,
- 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4,
- 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280,
- 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314,
- 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0,
- 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484,
- 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520,
- 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4,
- 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690,
- 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724,
- 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4,
- };
- return xlut[x % 128] + ylut[y % 128];
-}
-
-static u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) {
- // Calculates the offset of the position of the pixel in Morton order
- // Framebuffer images are split into 128x128 tiles.
-
- constexpr u32 block_height = 128;
- const u32 coarse_x = x & ~127;
-
- const u32 i = MortonInterleave128(x, y);
-
- const u32 offset = coarse_x * block_height;
-
- return (i + offset) * bytes_per_pixel;
-}
-
void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride,
u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing,
u8* buffer, u8* addr) {
@@ -286,23 +193,4 @@ void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stri
tile_width_spacing, buffer, addr);
}
-void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel,
- u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data) {
- const bool morton_to_linear = mode == MortonSwizzleMode::MortonToLinear;
- u8* data_ptrs[2];
- for (u32 y = 0; y < height; ++y) {
- for (u32 x = 0; x < width; ++x) {
- const u32 coarse_y = y & ~127;
- const u32 morton_offset =
- GetMortonOffset128(x, y, bytes_per_pixel) + coarse_y * width * bytes_per_pixel;
- const u32 linear_pixel_index = (x + y * width) * linear_bytes_per_pixel;
-
- data_ptrs[morton_to_linear ? 1 : 0] = morton_data + morton_offset;
- data_ptrs[morton_to_linear ? 0 : 1] = &linear_data[linear_pixel_index];
-
- std::memcpy(data_ptrs[0], data_ptrs[1], bytes_per_pixel);
- }
- }
-}
-
} // namespace VideoCore
diff --git a/src/video_core/morton.h b/src/video_core/morton.h
index ee5b45555..b714a7e3f 100644
--- a/src/video_core/morton.h
+++ b/src/video_core/morton.h
@@ -15,7 +15,4 @@ void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat forma
u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing,
u8* buffer, u8* addr);
-void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel,
- u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data);
-
} // namespace VideoCore
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
index 0c4ea1494..6de1597a2 100644
--- a/src/video_core/rasterizer_cache.h
+++ b/src/video_core/rasterizer_cache.h
@@ -169,6 +169,8 @@ protected:
object->MarkAsModified(false, *this);
}
+ std::recursive_mutex mutex;
+
private:
/// Returns a list of cached objects from the specified memory region, ordered by access time
std::vector<T> GetSortedObjectsFromRegion(CacheAddr addr, u64 size) {
@@ -208,5 +210,4 @@ private:
IntervalCache interval_cache; ///< Cache of objects
u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing
VideoCore::RasterizerInterface& rasterizer;
- std::recursive_mutex mutex;
};
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index d7b86df38..6b3f2d50a 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -10,6 +10,10 @@
#include "video_core/engines/fermi_2d.h"
#include "video_core/gpu.h"
+namespace Tegra {
+class MemoryManager;
+}
+
namespace VideoCore {
enum class LoadCallbackStage {
@@ -30,6 +34,9 @@ public:
/// Clear the current framebuffer
virtual void Clear() = 0;
+ /// Dispatches a compute shader invocation
+ virtual void DispatchCompute(GPUVAddr code_addr) = 0;
+
/// Notify rasterizer that all caches should be flushed to Switch memory
virtual void FlushAll() = 0;
@@ -43,11 +50,16 @@ public:
/// and invalidated
virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+ /// Notify the rasterizer to send all written commands to the host GPU.
+ virtual void FlushCommands() = 0;
+
+ /// Notify rasterizer that a frame is about to finish
+ virtual void TickFrame() = 0;
+
/// Attempt to use a faster method to perform a surface copy
virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect) {
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
return false;
}
diff --git a/src/video_core/renderer_base.h b/src/video_core/renderer_base.h
index 1d54c3723..af1bebc4f 100644
--- a/src/video_core/renderer_base.h
+++ b/src/video_core/renderer_base.h
@@ -36,8 +36,7 @@ public:
virtual ~RendererBase();
/// Swap buffers (render frame)
- virtual void SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0;
+ virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0;
/// Initialize the renderer
virtual bool Init() = 0;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 48b86f3bd..f8a807c84 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -2,101 +2,71 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <cstring>
#include <memory>
-#include "common/alignment.h"
-#include "core/core.h"
-#include "video_core/memory_manager.h"
+#include <glad/glad.h>
+
+#include "common/assert.h"
+#include "common/microprofile.h"
+#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
namespace OpenGL {
-CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset,
- std::size_t alignment, u8* host_ptr)
- : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset},
- alignment{alignment} {}
-
-OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size)
- : RasterizerCache{rasterizer}, stream_buffer(size, true) {}
-
-GLintptr OGLBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment,
- bool cache) {
- auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
-
- // Cache management is a big overhead, so only cache entries with a given size.
- // TODO: Figure out which size is the best for given games.
- cache &= size >= 2048;
-
- const auto& host_ptr{memory_manager.GetPointer(gpu_addr)};
- if (cache) {
- auto entry = TryGet(host_ptr);
- if (entry) {
- if (entry->GetSize() >= size && entry->GetAlignment() == alignment) {
- return entry->GetOffset();
- }
- Unregister(entry);
- }
- }
-
- AlignBuffer(alignment);
- const GLintptr uploaded_offset = buffer_offset;
-
- if (!host_ptr) {
- return uploaded_offset;
- }
-
- std::memcpy(buffer_ptr, host_ptr, size);
- buffer_ptr += size;
- buffer_offset += size;
-
- if (cache) {
- auto entry = std::make_shared<CachedBufferEntry>(
- *memory_manager.GpuToCpuAddress(gpu_addr), size, uploaded_offset, alignment, host_ptr);
- Register(entry);
- }
-
- return uploaded_offset;
+MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128));
+
+CachedBufferBlock::CachedBufferBlock(CacheAddr cache_addr, const std::size_t size)
+ : VideoCommon::BufferBlock{cache_addr, size} {
+ gl_buffer.Create();
+ glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW);
}
-GLintptr OGLBufferCache::UploadHostMemory(const void* raw_pointer, std::size_t size,
- std::size_t alignment) {
- AlignBuffer(alignment);
- std::memcpy(buffer_ptr, raw_pointer, size);
- const GLintptr uploaded_offset = buffer_offset;
+CachedBufferBlock::~CachedBufferBlock() = default;
+
+OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system,
+ std::size_t stream_size)
+ : VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>{
+ rasterizer, system, std::make_unique<OGLStreamBuffer>(stream_size, true)} {}
+
+OGLBufferCache::~OGLBufferCache() = default;
- buffer_ptr += size;
- buffer_offset += size;
- return uploaded_offset;
+Buffer OGLBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) {
+ return std::make_shared<CachedBufferBlock>(cache_addr, size);
}
-bool OGLBufferCache::Map(std::size_t max_size) {
- bool invalidate;
- std::tie(buffer_ptr, buffer_offset_base, invalidate) =
- stream_buffer.Map(static_cast<GLsizeiptr>(max_size), 4);
- buffer_offset = buffer_offset_base;
+void OGLBufferCache::WriteBarrier() {
+ glMemoryBarrier(GL_ALL_BARRIER_BITS);
+}
+
+const GLuint* OGLBufferCache::ToHandle(const Buffer& buffer) {
+ return buffer->GetHandle();
+}
- if (invalidate) {
- InvalidateAll();
- }
- return invalidate;
+const GLuint* OGLBufferCache::GetEmptyBuffer(std::size_t) {
+ static const GLuint null_buffer = 0;
+ return &null_buffer;
}
-void OGLBufferCache::Unmap() {
- stream_buffer.Unmap(buffer_offset - buffer_offset_base);
+void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ const u8* data) {
+ glNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset),
+ static_cast<GLsizeiptr>(size), data);
}
-GLuint OGLBufferCache::GetHandle() const {
- return stream_buffer.GetHandle();
+void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ u8* data) {
+ MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
+ glGetNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset),
+ static_cast<GLsizeiptr>(size), data);
}
-void OGLBufferCache::AlignBuffer(std::size_t alignment) {
- // Align the offset, not the mapped pointer
- const GLintptr offset_aligned =
- static_cast<GLintptr>(Common::AlignUp(static_cast<std::size_t>(buffer_offset), alignment));
- buffer_ptr += offset_aligned - buffer_offset;
- buffer_offset = offset_aligned;
+void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
+ std::size_t dst_offset, std::size_t size) {
+ glCopyNamedBufferSubData(*src->GetHandle(), *dst->GetHandle(),
+ static_cast<GLintptr>(src_offset), static_cast<GLintptr>(dst_offset),
+ static_cast<GLsizeiptr>(size));
}
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index f2347581b..022e7bfa9 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -4,80 +4,63 @@
#pragma once
-#include <cstddef>
#include <memory>
-#include <tuple>
#include "common/common_types.h"
+#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/rasterizer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_stream_buffer.h"
+namespace Core {
+class System;
+}
+
namespace OpenGL {
+class OGLStreamBuffer;
class RasterizerOpenGL;
-class CachedBufferEntry final : public RasterizerCacheObject {
-public:
- explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset,
- std::size_t alignment, u8* host_ptr);
-
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
+class CachedBufferBlock;
- std::size_t GetSizeInBytes() const override {
- return size;
- }
-
- std::size_t GetSize() const {
- return size;
- }
+using Buffer = std::shared_ptr<CachedBufferBlock>;
- GLintptr GetOffset() const {
- return offset;
- }
+class CachedBufferBlock : public VideoCommon::BufferBlock {
+public:
+ explicit CachedBufferBlock(CacheAddr cache_addr, const std::size_t size);
+ ~CachedBufferBlock();
- std::size_t GetAlignment() const {
- return alignment;
+ const GLuint* GetHandle() const {
+ return &gl_buffer.handle;
}
private:
- VAddr cpu_addr{};
- std::size_t size{};
- GLintptr offset{};
- std::size_t alignment{};
+ OGLBuffer gl_buffer{};
};
-class OGLBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> {
+class OGLBufferCache final : public VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer> {
public:
- explicit OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size);
-
- /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been
- /// allocated.
- GLintptr UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4,
- bool cache = true);
+ explicit OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system,
+ std::size_t stream_size);
+ ~OGLBufferCache();
- /// Uploads from a host memory. Returns host's buffer offset where it's been allocated.
- GLintptr UploadHostMemory(const void* raw_pointer, std::size_t size, std::size_t alignment = 4);
+ const GLuint* GetEmptyBuffer(std::size_t) override;
- bool Map(std::size_t max_size);
- void Unmap();
+protected:
+ Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override;
- GLuint GetHandle() const;
+ void WriteBarrier() override;
-protected:
- void AlignBuffer(std::size_t alignment);
+ const GLuint* ToHandle(const Buffer& buffer) override;
- // We do not have to flush this cache as things in it are never modified by us.
- void FlushObjectInner(const std::shared_ptr<CachedBufferEntry>& object) override {}
+ void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ const u8* data) override;
-private:
- OGLStreamBuffer stream_buffer;
+ void DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ u8* data) override;
- u8* buffer_ptr = nullptr;
- GLintptr buffer_offset = 0;
- GLintptr buffer_offset_base = 0;
+ void CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
+ std::size_t dst_offset, std::size_t size) override;
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 65a88b06c..4f59a87b4 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -14,51 +14,64 @@
namespace OpenGL {
namespace {
+
template <typename T>
T GetInteger(GLenum pname) {
GLint temporary;
glGetIntegerv(pname, &temporary);
return static_cast<T>(temporary);
}
+
+bool TestProgram(const GLchar* glsl) {
+ const GLuint shader{glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &glsl)};
+ GLint link_status;
+ glGetProgramiv(shader, GL_LINK_STATUS, &link_status);
+ glDeleteProgram(shader);
+ return link_status == GL_TRUE;
+}
+
} // Anonymous namespace
Device::Device() {
uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
+ shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT);
max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS);
max_varyings = GetInteger<u32>(GL_MAX_VARYING_VECTORS);
+ has_warp_intrinsics = GLAD_GL_NV_gpu_shader5 && GLAD_GL_NV_shader_thread_group &&
+ GLAD_GL_NV_shader_thread_shuffle;
+ has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array;
has_variable_aoffi = TestVariableAoffi();
has_component_indexing_bug = TestComponentIndexingBug();
+ has_precise_bug = TestPreciseBug();
+
+ LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi);
+ LOG_INFO(Render_OpenGL, "Renderer_ComponentIndexingBug: {}", has_component_indexing_bug);
+ LOG_INFO(Render_OpenGL, "Renderer_PreciseBug: {}", has_precise_bug);
}
Device::Device(std::nullptr_t) {
uniform_buffer_alignment = 0;
max_vertex_attributes = 16;
max_varyings = 15;
+ has_warp_intrinsics = true;
+ has_vertex_viewport_layer = true;
has_variable_aoffi = true;
has_component_indexing_bug = false;
+ has_precise_bug = false;
}
bool Device::TestVariableAoffi() {
- const GLchar* AOFFI_TEST = R"(#version 430 core
+ return TestProgram(R"(#version 430 core
// This is a unit test, please ignore me on apitrace bug reports.
uniform sampler2D tex;
uniform ivec2 variable_offset;
+out vec4 output_attribute;
void main() {
- gl_Position = textureOffset(tex, vec2(0), variable_offset);
-}
-)";
- const GLuint shader{glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &AOFFI_TEST)};
- GLint link_status{};
- glGetProgramiv(shader, GL_LINK_STATUS, &link_status);
- glDeleteProgram(shader);
-
- const bool supported{link_status == GL_TRUE};
- LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", supported);
- return supported;
+ output_attribute = textureOffset(tex, vec2(0), variable_offset);
+})");
}
bool Device::TestComponentIndexingBug() {
- constexpr char log_message[] = "Renderer_ComponentIndexingBug: {}";
const GLchar* COMPONENT_TEST = R"(#version 430 core
layout (std430, binding = 0) buffer OutputBuffer {
uint output_value;
@@ -98,12 +111,21 @@ void main() {
GLuint result;
glGetNamedBufferSubData(ssbo.handle, 0, sizeof(result), &result);
if (result != values.at(index)) {
- LOG_INFO(Render_OpenGL, log_message, true);
return true;
}
}
- LOG_INFO(Render_OpenGL, log_message, false);
return false;
}
+bool Device::TestPreciseBug() {
+ return !TestProgram(R"(#version 430 core
+in vec3 coords;
+out float out_value;
+uniform sampler2DShadow tex;
+void main() {
+ precise float tmp_value = vec4(texture(tex, coords)).x;
+ out_value = tmp_value;
+})");
+}
+
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index 8c8c93760..ba6dcd3be 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -18,6 +18,10 @@ public:
return uniform_buffer_alignment;
}
+ std::size_t GetShaderStorageBufferAlignment() const {
+ return shader_storage_alignment;
+ }
+
u32 GetMaxVertexAttributes() const {
return max_vertex_attributes;
}
@@ -26,6 +30,14 @@ public:
return max_varyings;
}
+ bool HasWarpIntrinsics() const {
+ return has_warp_intrinsics;
+ }
+
+ bool HasVertexViewportLayer() const {
+ return has_vertex_viewport_layer;
+ }
+
bool HasVariableAoffi() const {
return has_variable_aoffi;
}
@@ -34,15 +46,24 @@ public:
return has_component_indexing_bug;
}
+ bool HasPreciseBug() const {
+ return has_precise_bug;
+ }
+
private:
static bool TestVariableAoffi();
static bool TestComponentIndexingBug();
+ static bool TestPreciseBug();
std::size_t uniform_buffer_alignment{};
+ std::size_t shader_storage_alignment{};
u32 max_vertex_attributes{};
u32 max_varyings{};
+ bool has_warp_intrinsics{};
+ bool has_vertex_viewport_layer{};
bool has_variable_aoffi{};
bool has_component_indexing_bug{};
+ bool has_precise_bug{};
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp
new file mode 100644
index 000000000..7c926bd48
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp
@@ -0,0 +1,75 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+
+#include "common/cityhash.h"
+#include "common/scope_exit.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
+#include "video_core/renderer_opengl/gl_state.h"
+
+namespace OpenGL {
+
+using Maxwell = Tegra::Engines::Maxwell3D::Regs;
+
+FramebufferCacheOpenGL::FramebufferCacheOpenGL() = default;
+
+FramebufferCacheOpenGL::~FramebufferCacheOpenGL() = default;
+
+GLuint FramebufferCacheOpenGL::GetFramebuffer(const FramebufferCacheKey& key) {
+ const auto [entry, is_cache_miss] = cache.try_emplace(key);
+ auto& framebuffer{entry->second};
+ if (is_cache_miss) {
+ framebuffer = CreateFramebuffer(key);
+ }
+ return framebuffer.handle;
+}
+
+OGLFramebuffer FramebufferCacheOpenGL::CreateFramebuffer(const FramebufferCacheKey& key) {
+ OGLFramebuffer framebuffer;
+ framebuffer.Create();
+
+ // TODO(Rodrigo): Use DSA here after Nvidia fixes their framebuffer DSA bugs.
+ local_state.draw.draw_framebuffer = framebuffer.handle;
+ local_state.ApplyFramebufferState();
+
+ if (key.is_single_buffer) {
+ if (key.color_attachments[0] != GL_NONE && key.colors[0]) {
+ key.colors[0]->Attach(key.color_attachments[0], GL_DRAW_FRAMEBUFFER);
+ glDrawBuffer(key.color_attachments[0]);
+ } else {
+ glDrawBuffer(GL_NONE);
+ }
+ } else {
+ for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
+ if (key.colors[index]) {
+ key.colors[index]->Attach(GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index),
+ GL_DRAW_FRAMEBUFFER);
+ }
+ }
+ glDrawBuffers(key.colors_count, key.color_attachments.data());
+ }
+
+ if (key.zeta) {
+ key.zeta->Attach(key.stencil_enable ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT,
+ GL_DRAW_FRAMEBUFFER);
+ }
+
+ return framebuffer;
+}
+
+std::size_t FramebufferCacheKey::Hash() const {
+ static_assert(sizeof(*this) % sizeof(u64) == 0, "Unaligned struct");
+ return static_cast<std::size_t>(
+ Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
+}
+
+bool FramebufferCacheKey::operator==(const FramebufferCacheKey& rhs) const {
+ return std::tie(is_single_buffer, stencil_enable, colors_count, color_attachments, colors,
+ zeta) == std::tie(rhs.is_single_buffer, rhs.stencil_enable, rhs.colors_count,
+ rhs.color_attachments, rhs.colors, rhs.zeta);
+}
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.h b/src/video_core/renderer_opengl/gl_framebuffer_cache.h
new file mode 100644
index 000000000..a3a996353
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.h
@@ -0,0 +1,68 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <cstddef>
+#include <unordered_map>
+
+#include <glad/glad.h>
+
+#include "common/common_types.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/renderer_opengl/gl_state.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
+
+namespace OpenGL {
+
+struct alignas(sizeof(u64)) FramebufferCacheKey {
+ bool is_single_buffer = false;
+ bool stencil_enable = false;
+ u16 colors_count = 0;
+
+ std::array<GLenum, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_attachments{};
+ std::array<View, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors;
+ View zeta;
+
+ std::size_t Hash() const;
+
+ bool operator==(const FramebufferCacheKey& rhs) const;
+
+ bool operator!=(const FramebufferCacheKey& rhs) const {
+ return !operator==(rhs);
+ }
+};
+
+} // namespace OpenGL
+
+namespace std {
+
+template <>
+struct hash<OpenGL::FramebufferCacheKey> {
+ std::size_t operator()(const OpenGL::FramebufferCacheKey& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
+
+namespace OpenGL {
+
+class FramebufferCacheOpenGL {
+public:
+ FramebufferCacheOpenGL();
+ ~FramebufferCacheOpenGL();
+
+ GLuint GetFramebuffer(const FramebufferCacheKey& key);
+
+private:
+ OGLFramebuffer CreateFramebuffer(const FramebufferCacheKey& key);
+
+ OpenGLState local_state;
+ std::unordered_map<FramebufferCacheKey, OGLFramebuffer> cache;
+};
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp
deleted file mode 100644
index ea4a593af..000000000
--- a/src/video_core/renderer_opengl/gl_global_cache.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <glad/glad.h>
-
-#include "common/logging/log.h"
-#include "core/core.h"
-#include "video_core/memory_manager.h"
-#include "video_core/renderer_opengl/gl_global_cache.h"
-#include "video_core/renderer_opengl/gl_rasterizer.h"
-#include "video_core/renderer_opengl/gl_shader_decompiler.h"
-#include "video_core/renderer_opengl/utils.h"
-
-namespace OpenGL {
-
-CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u8* host_ptr, u32 size, u32 max_size)
- : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, host_ptr{host_ptr}, size{size},
- max_size{max_size} {
- buffer.Create();
- LabelGLObject(GL_BUFFER, buffer.handle, cpu_addr, "GlobalMemory");
-}
-
-CachedGlobalRegion::~CachedGlobalRegion() = default;
-
-void CachedGlobalRegion::Reload(u32 size_) {
- size = size_;
- if (size > max_size) {
- size = max_size;
- LOG_CRITICAL(HW_GPU, "Global region size {} exceeded the supported size {}!", size_,
- max_size);
- }
- glNamedBufferData(buffer.handle, size, host_ptr, GL_STREAM_DRAW);
-}
-
-void CachedGlobalRegion::Flush() {
- LOG_DEBUG(Render_OpenGL, "Flushing {} bytes to CPU memory address 0x{:16}", size, cpu_addr);
- glGetNamedBufferSubData(buffer.handle, 0, static_cast<GLsizeiptr>(size), host_ptr);
-}
-
-GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const {
- const auto search{reserve.find(addr)};
- if (search == reserve.end()) {
- return {};
- }
- return search->second;
-}
-
-GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(GPUVAddr addr, u8* host_ptr,
- u32 size) {
- GlobalRegion region{TryGetReservedGlobalRegion(ToCacheAddr(host_ptr), size)};
- if (!region) {
- // No reserved surface available, create a new one and reserve it
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(addr)};
- ASSERT(cpu_addr);
-
- region = std::make_shared<CachedGlobalRegion>(*cpu_addr, host_ptr, size, max_ssbo_size);
- ReserveGlobalRegion(region);
- }
- region->Reload(size);
- return region;
-}
-
-void GlobalRegionCacheOpenGL::ReserveGlobalRegion(GlobalRegion region) {
- reserve.insert_or_assign(region->GetCacheAddr(), std::move(region));
-}
-
-GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer)
- : RasterizerCache{rasterizer} {
- GLint max_ssbo_size_;
- glGetIntegerv(GL_MAX_SHADER_STORAGE_BLOCK_SIZE, &max_ssbo_size_);
- max_ssbo_size = static_cast<u32>(max_ssbo_size_);
-}
-
-GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion(
- const GLShader::GlobalMemoryEntry& global_region,
- Tegra::Engines::Maxwell3D::Regs::ShaderStage stage) {
-
- auto& gpu{Core::System::GetInstance().GPU()};
- auto& memory_manager{gpu.MemoryManager()};
- const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<std::size_t>(stage)]};
- const auto addr{cbufs.const_buffers[global_region.GetCbufIndex()].address +
- global_region.GetCbufOffset()};
- const auto actual_addr{memory_manager.Read<u64>(addr)};
- const auto size{memory_manager.Read<u32>(addr + 8)};
-
- // Look up global region in the cache based on address
- const auto& host_ptr{memory_manager.GetPointer(actual_addr)};
- GlobalRegion region{TryGet(host_ptr)};
-
- if (!region) {
- // No global region found - create a new one
- region = GetUncachedGlobalRegion(actual_addr, host_ptr, size);
- Register(region);
- }
-
- return region;
-}
-
-} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_global_cache.h b/src/video_core/renderer_opengl/gl_global_cache.h
deleted file mode 100644
index 2d467a240..000000000
--- a/src/video_core/renderer_opengl/gl_global_cache.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <memory>
-#include <unordered_map>
-
-#include <glad/glad.h>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/rasterizer_cache.h"
-#include "video_core/renderer_opengl/gl_resource_manager.h"
-
-namespace OpenGL {
-
-namespace GLShader {
-class GlobalMemoryEntry;
-}
-
-class RasterizerOpenGL;
-class CachedGlobalRegion;
-using GlobalRegion = std::shared_ptr<CachedGlobalRegion>;
-
-class CachedGlobalRegion final : public RasterizerCacheObject {
-public:
- explicit CachedGlobalRegion(VAddr cpu_addr, u8* host_ptr, u32 size, u32 max_size);
- ~CachedGlobalRegion();
-
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
-
- std::size_t GetSizeInBytes() const override {
- return size;
- }
-
- /// Gets the GL program handle for the buffer
- GLuint GetBufferHandle() const {
- return buffer.handle;
- }
-
- /// Reloads the global region from guest memory
- void Reload(u32 size_);
-
- void Flush();
-
-private:
- VAddr cpu_addr{};
- u8* host_ptr{};
- u32 size{};
- u32 max_size{};
-
- OGLBuffer buffer;
-};
-
-class GlobalRegionCacheOpenGL final : public RasterizerCache<GlobalRegion> {
-public:
- explicit GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer);
-
- /// Gets the current specified shader stage program
- GlobalRegion GetGlobalRegion(const GLShader::GlobalMemoryEntry& descriptor,
- Tegra::Engines::Maxwell3D::Regs::ShaderStage stage);
-
-protected:
- void FlushObjectInner(const GlobalRegion& object) override {
- object->Flush();
- }
-
-private:
- GlobalRegion TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const;
- GlobalRegion GetUncachedGlobalRegion(GPUVAddr addr, u8* host_ptr, u32 size);
- void ReserveGlobalRegion(GlobalRegion region);
-
- std::unordered_map<CacheAddr, GlobalRegion> reserve;
- u32 max_ssbo_size{};
-};
-
-} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index ca410287a..4dd08bccb 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -4,6 +4,7 @@
#include <algorithm>
#include <array>
+#include <bitset>
#include <memory>
#include <string>
#include <string_view>
@@ -19,7 +20,9 @@
#include "core/core.h"
#include "core/hle/kernel/process.h"
#include "core/settings.h"
+#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
+#include "video_core/memory_manager.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/gl_shader_cache.h"
#include "video_core/renderer_opengl/gl_shader_gen.h"
@@ -29,8 +32,10 @@
namespace OpenGL {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using PixelFormat = VideoCore::Surface::PixelFormat;
-using SurfaceType = VideoCore::Surface::SurfaceType;
+
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceType;
MICROPROFILE_DEFINE(OpenGL_VAO, "OpenGL", "Vertex Format Setup", MP_RGB(128, 128, 192));
MICROPROFILE_DEFINE(OpenGL_VB, "OpenGL", "Vertex Buffer Setup", MP_RGB(128, 128, 192));
@@ -78,36 +83,31 @@ struct DrawParameters {
}
};
-struct FramebufferCacheKey {
- bool is_single_buffer = false;
- bool stencil_enable = false;
-
- std::array<GLenum, Maxwell::NumRenderTargets> color_attachments{};
- std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors{};
- u32 colors_count = 0;
-
- GLuint zeta = 0;
-
- auto Tie() const {
- return std::tie(is_single_buffer, stencil_enable, color_attachments, colors, colors_count,
- zeta);
+static std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer,
+ const GLShader::ConstBufferEntry& entry) {
+ if (!entry.IsIndirect()) {
+ return entry.GetSize();
}
- bool operator<(const FramebufferCacheKey& rhs) const {
- return Tie() < rhs.Tie();
+ if (buffer.size > Maxwell::MaxConstBufferSize) {
+ LOG_WARNING(Render_OpenGL, "Indirect constbuffer size {} exceeds maximum {}", buffer.size,
+ Maxwell::MaxConstBufferSize);
+ return Maxwell::MaxConstBufferSize;
}
-};
+
+ return buffer.size;
+}
RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window,
ScreenInfo& info)
- : res_cache{*this}, shader_cache{*this, system, emu_window, device},
- global_cache{*this}, system{system}, screen_info{info},
- buffer_cache(*this, STREAM_BUFFER_SIZE) {
+ : texture_cache{system, *this, device}, shader_cache{*this, system, emu_window, device},
+ system{system}, screen_info{info}, buffer_cache{*this, system, STREAM_BUFFER_SIZE} {
OpenGLState::ApplyDefaultState();
shader_program_manager = std::make_unique<GLShader::ProgramManager>();
state.draw.shader_program = 0;
state.Apply();
+ clear_framebuffer.Create();
LOG_DEBUG(Render_OpenGL, "Sync fixed function OpenGL state here");
CheckExtensions();
@@ -121,21 +121,16 @@ void RasterizerOpenGL::CheckExtensions() {
Render_OpenGL,
"Anisotropic filter is not supported! This can cause graphical issues in some games.");
}
- if (!GLAD_GL_ARB_buffer_storage) {
- LOG_WARNING(
- Render_OpenGL,
- "Buffer storage control is not supported! This can cause performance degradation.");
- }
}
GLuint RasterizerOpenGL::SetupVertexFormat() {
auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
- if (!gpu.dirty_flags.vertex_attrib_format) {
+ if (!gpu.dirty.vertex_attrib_format) {
return state.draw.vertex_array;
}
- gpu.dirty_flags.vertex_attrib_format = false;
+ gpu.dirty.vertex_attrib_format = false;
MICROPROFILE_SCOPE(OpenGL_VAO);
@@ -152,8 +147,6 @@ GLuint RasterizerOpenGL::SetupVertexFormat() {
state.draw.vertex_array = vao;
state.ApplyVertexArrayState();
- glVertexArrayElementBuffer(vao, buffer_cache.GetHandle());
-
// Use the vertex array as-is, assumes that the data is formatted correctly for OpenGL.
// Enables the first 16 vertex attributes always, as we don't know which ones are actually
// used until shader time. Note, Tegra technically supports 32, but we're capping this to 16
@@ -191,7 +184,7 @@ GLuint RasterizerOpenGL::SetupVertexFormat() {
}
// Rebinding the VAO invalidates the vertex buffer bindings.
- gpu.dirty_flags.vertex_array.set();
+ gpu.dirty.ResetVertexArrays();
state.draw.vertex_array = vao_entry.handle;
return vao_entry.handle;
@@ -199,17 +192,20 @@ GLuint RasterizerOpenGL::SetupVertexFormat() {
void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) {
auto& gpu = system.GPU().Maxwell3D();
- const auto& regs = gpu.regs;
-
- if (gpu.dirty_flags.vertex_array.none())
+ if (!gpu.dirty.vertex_array_buffers)
return;
+ gpu.dirty.vertex_array_buffers = false;
+
+ const auto& regs = gpu.regs;
MICROPROFILE_SCOPE(OpenGL_VB);
// Upload all guest vertex arrays sequentially to our buffer
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
- if (!gpu.dirty_flags.vertex_array[index])
+ if (!gpu.dirty.vertex_array[index])
continue;
+ gpu.dirty.vertex_array[index] = false;
+ gpu.dirty.vertex_instance[index] = false;
const auto& vertex_array = regs.vertex_array[index];
if (!vertex_array.IsEnabled())
@@ -220,11 +216,11 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) {
ASSERT(end > start);
const u64 size = end - start + 1;
- const GLintptr vertex_buffer_offset = buffer_cache.UploadMemory(start, size);
+ const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
// Bind the vertex array to the buffer at the current offset.
- glVertexArrayVertexBuffer(vao, index, buffer_cache.GetHandle(), vertex_buffer_offset,
- vertex_array.stride);
+ vertex_array_pushbuffer.SetVertexBuffer(index, vertex_buffer, vertex_buffer_offset,
+ vertex_array.stride);
if (regs.instanced_arrays.IsInstancingEnabled(index) && vertex_array.divisor != 0) {
// Enable vertex buffer instancing with the specified divisor.
@@ -234,11 +230,47 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) {
glVertexArrayBindingDivisor(vao, index, 0);
}
}
+}
+
+void RasterizerOpenGL::SetupVertexInstances(GLuint vao) {
+ auto& gpu = system.GPU().Maxwell3D();
+
+ if (!gpu.dirty.vertex_instances)
+ return;
+ gpu.dirty.vertex_instances = false;
+
+ const auto& regs = gpu.regs;
+ // Upload all guest vertex arrays sequentially to our buffer
+ for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
+ if (!gpu.dirty.vertex_instance[index])
+ continue;
+
+ gpu.dirty.vertex_instance[index] = false;
+
+ if (regs.instanced_arrays.IsInstancingEnabled(index) &&
+ regs.vertex_array[index].divisor != 0) {
+ // Enable vertex buffer instancing with the specified divisor.
+ glVertexArrayBindingDivisor(vao, index, regs.vertex_array[index].divisor);
+ } else {
+ // Disable the vertex buffer instancing.
+ glVertexArrayBindingDivisor(vao, index, 0);
+ }
+ }
+}
- gpu.dirty_flags.vertex_array.reset();
+GLintptr RasterizerOpenGL::SetupIndexBuffer() {
+ if (accelerate_draw != AccelDraw::Indexed) {
+ return 0;
+ }
+ MICROPROFILE_SCOPE(OpenGL_Index);
+ const auto& regs = system.GPU().Maxwell3D().regs;
+ const std::size_t size = CalculateIndexBufferSize();
+ const auto [buffer, offset] = buffer_cache.UploadMemory(regs.index_array.IndexStart(), size);
+ vertex_array_pushbuffer.SetIndexBuffer(buffer);
+ return offset;
}
-DrawParameters RasterizerOpenGL::SetupDraw() {
+DrawParameters RasterizerOpenGL::SetupDraw(GLintptr index_buffer_offset) {
const auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
const bool is_indexed = accelerate_draw == AccelDraw::Indexed;
@@ -250,11 +282,9 @@ DrawParameters RasterizerOpenGL::SetupDraw() {
params.primitive_mode = MaxwellToGL::PrimitiveTopology(regs.draw.topology);
if (is_indexed) {
- MICROPROFILE_SCOPE(OpenGL_Index);
params.index_format = MaxwellToGL::IndexFormat(regs.index_array.format);
params.count = regs.index_array.count;
- params.index_buffer_offset =
- buffer_cache.UploadMemory(regs.index_array.IndexStart(), CalculateIndexBufferSize());
+ params.index_buffer_offset = index_buffer_offset;
params.base_vertex = static_cast<GLint>(regs.vb_element_base);
} else {
params.count = regs.vertex_buffer.count;
@@ -270,10 +300,6 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
BaseBindings base_bindings;
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
- // Prepare packed bindings
- bind_ubo_pushbuffer.Setup(base_bindings.cbuf);
- bind_ssbo_pushbuffer.Setup(base_bindings.gmem);
-
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto& shader_config = gpu.regs.shader_config[index];
const Maxwell::ShaderProgram program{static_cast<Maxwell::ShaderProgram>(index)};
@@ -294,16 +320,21 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
GLShader::MaxwellUniformData ubo{};
ubo.SetFromRegs(gpu, stage);
- const GLintptr offset =
+ const auto [buffer, offset] =
buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment());
// Bind the emulation info buffer
- bind_ubo_pushbuffer.Push(buffer_cache.GetHandle(), offset,
- static_cast<GLsizeiptr>(sizeof(ubo)));
+ bind_ubo_pushbuffer.Push(buffer, offset, static_cast<GLsizeiptr>(sizeof(ubo)));
Shader shader{shader_cache.GetStageProgram(program)};
- const auto [program_handle, next_bindings] =
- shader->GetProgramHandle(primitive_mode, base_bindings);
+
+ const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage);
+ SetupDrawConstBuffers(stage_enum, shader);
+ SetupDrawGlobalMemory(stage_enum, shader);
+ const auto texture_buffer_usage{SetupDrawTextures(stage_enum, shader, base_bindings)};
+
+ const ProgramVariant variant{base_bindings, primitive_mode, texture_buffer_usage};
+ const auto [program_handle, next_bindings] = shader->GetProgramHandle(variant);
switch (program) {
case Maxwell::ShaderProgram::VertexA:
@@ -321,11 +352,6 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
shader_config.enable.Value(), shader_config.offset);
}
- const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage);
- SetupConstBuffers(stage_enum, shader, program_handle, base_bindings);
- SetupGlobalRegions(stage_enum, shader, program_handle, base_bindings);
- SetupTextures(stage_enum, shader, program_handle, base_bindings);
-
// Workaround for Intel drivers.
// When a clip distance is enabled but not set in the shader it crops parts of the screen
// (sometimes it's half the screen, sometimes three quarters). To avoid this, enable the
@@ -343,50 +369,9 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
base_bindings = next_bindings;
}
- bind_ubo_pushbuffer.Bind();
- bind_ssbo_pushbuffer.Bind();
-
SyncClipEnabled(clip_distances);
- gpu.dirty_flags.shaders = false;
-}
-
-void RasterizerOpenGL::SetupCachedFramebuffer(const FramebufferCacheKey& fbkey,
- OpenGLState& current_state) {
- const auto [entry, is_cache_miss] = framebuffer_cache.try_emplace(fbkey);
- auto& framebuffer = entry->second;
-
- if (is_cache_miss)
- framebuffer.Create();
-
- current_state.draw.draw_framebuffer = framebuffer.handle;
- current_state.ApplyFramebufferState();
-
- if (!is_cache_miss)
- return;
-
- if (fbkey.is_single_buffer) {
- if (fbkey.color_attachments[0] != GL_NONE) {
- glFramebufferTexture(GL_DRAW_FRAMEBUFFER, fbkey.color_attachments[0], fbkey.colors[0],
- 0);
- }
- glDrawBuffer(fbkey.color_attachments[0]);
- } else {
- for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
- if (fbkey.colors[index]) {
- glFramebufferTexture(GL_DRAW_FRAMEBUFFER,
- GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index),
- fbkey.colors[index], 0);
- }
- }
- glDrawBuffers(fbkey.colors_count, fbkey.color_attachments.data());
- }
-
- if (fbkey.zeta) {
- GLenum zeta_attachment =
- fbkey.stencil_enable ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT;
- glFramebufferTexture(GL_DRAW_FRAMEBUFFER, zeta_attachment, fbkey.zeta, 0);
- }
+ gpu.dirty.shaders = false;
}
std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
@@ -469,18 +454,22 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
const FramebufferConfigState fb_config_state{using_color_fb, using_depth_fb, preserve_contents,
single_color_target};
- if (fb_config_state == current_framebuffer_config_state &&
- gpu.dirty_flags.color_buffer.none() && !gpu.dirty_flags.zeta_buffer) {
+ if (fb_config_state == current_framebuffer_config_state && !gpu.dirty.render_settings) {
// Only skip if the previous ConfigureFramebuffers call was from the same kind (multiple or
// single color targets). This is done because the guest registers may not change but the
// host framebuffer may contain different attachments
return current_depth_stencil_usage;
}
+ gpu.dirty.render_settings = false;
current_framebuffer_config_state = fb_config_state;
- Surface depth_surface;
+ texture_cache.GuardRenderTargets(true);
+
+ View depth_surface{};
if (using_depth_fb) {
- depth_surface = res_cache.GetDepthBufferSurface(preserve_contents);
+ depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
+ } else {
+ texture_cache.SetEmptyDepthBuffer();
}
UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -493,42 +482,38 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
if (using_color_fb) {
if (single_color_target) {
// Used when just a single color attachment is enabled, e.g. for clearing a color buffer
- Surface color_surface =
- res_cache.GetColorBufferSurface(*single_color_target, preserve_contents);
+ View color_surface{
+ texture_cache.GetColorBufferSurface(*single_color_target, preserve_contents)};
if (color_surface) {
// Assume that a surface will be written to if it is used as a framebuffer, even if
// the shader doesn't actually write to it.
- color_surface->MarkAsModified(true, res_cache);
- // Workaround for and issue in nvidia drivers
- // https://devtalk.nvidia.com/default/topic/776591/opengl/gl_framebuffer_srgb-functions-incorrectly/
- state.framebuffer_srgb.enabled |= color_surface->GetSurfaceParams().srgb_conversion;
+ texture_cache.MarkColorBufferInUse(*single_color_target);
}
fbkey.is_single_buffer = true;
fbkey.color_attachments[0] =
GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(*single_color_target);
- fbkey.colors[0] = color_surface != nullptr ? color_surface->Texture().handle : 0;
+ fbkey.colors[0] = color_surface;
+ for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
+ if (index != *single_color_target) {
+ texture_cache.SetEmptyColorBuffer(index);
+ }
+ }
} else {
// Multiple color attachments are enabled
for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
- Surface color_surface = res_cache.GetColorBufferSurface(index, preserve_contents);
+ View color_surface{texture_cache.GetColorBufferSurface(index, preserve_contents)};
if (color_surface) {
// Assume that a surface will be written to if it is used as a framebuffer, even
// if the shader doesn't actually write to it.
- color_surface->MarkAsModified(true, res_cache);
- // Enable sRGB only for supported formats
- // Workaround for and issue in nvidia drivers
- // https://devtalk.nvidia.com/default/topic/776591/opengl/gl_framebuffer_srgb-functions-incorrectly/
- state.framebuffer_srgb.enabled |=
- color_surface->GetSurfaceParams().srgb_conversion;
+ texture_cache.MarkColorBufferInUse(index);
}
fbkey.color_attachments[index] =
GL_COLOR_ATTACHMENT0 + regs.rt_control.GetMap(index);
- fbkey.colors[index] =
- color_surface != nullptr ? color_surface->Texture().handle : 0;
+ fbkey.colors[index] = color_surface;
}
fbkey.is_single_buffer = false;
fbkey.colors_count = regs.rt_control.count;
@@ -541,26 +526,84 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
if (depth_surface) {
// Assume that a surface will be written to if it is used as a framebuffer, even if
// the shader doesn't actually write to it.
- depth_surface->MarkAsModified(true, res_cache);
+ texture_cache.MarkDepthBufferInUse();
- fbkey.zeta = depth_surface->Texture().handle;
- fbkey.stencil_enable = regs.stencil_enable &&
- depth_surface->GetSurfaceParams().type == SurfaceType::DepthStencil;
+ fbkey.zeta = depth_surface;
+ fbkey.stencil_enable = depth_surface->GetSurfaceParams().type == SurfaceType::DepthStencil;
}
- SetupCachedFramebuffer(fbkey, current_state);
+ texture_cache.GuardRenderTargets(false);
+
+ current_state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(fbkey);
SyncViewport(current_state);
return current_depth_stencil_usage = {static_cast<bool>(depth_surface), fbkey.stencil_enable};
}
+void RasterizerOpenGL::ConfigureClearFramebuffer(OpenGLState& current_state, bool using_color_fb,
+ bool using_depth_fb, bool using_stencil_fb) {
+ auto& gpu = system.GPU().Maxwell3D();
+ const auto& regs = gpu.regs;
+
+ texture_cache.GuardRenderTargets(true);
+ View color_surface{};
+ if (using_color_fb) {
+ color_surface = texture_cache.GetColorBufferSurface(regs.clear_buffers.RT, false);
+ }
+ View depth_surface{};
+ if (using_depth_fb || using_stencil_fb) {
+ depth_surface = texture_cache.GetDepthBufferSurface(false);
+ }
+ texture_cache.GuardRenderTargets(false);
+
+ current_state.draw.draw_framebuffer = clear_framebuffer.handle;
+ current_state.ApplyFramebufferState();
+
+ if (color_surface) {
+ color_surface->Attach(GL_COLOR_ATTACHMENT0, GL_DRAW_FRAMEBUFFER);
+ } else {
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ }
+
+ if (depth_surface) {
+ const auto& params = depth_surface->GetSurfaceParams();
+ switch (params.type) {
+ case VideoCore::Surface::SurfaceType::Depth:
+ depth_surface->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
+ break;
+ case VideoCore::Surface::SurfaceType::DepthStencil:
+ depth_surface->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_DRAW_FRAMEBUFFER);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ } else {
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
+ 0);
+ }
+}
+
void RasterizerOpenGL::Clear() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& maxwell3d = system.GPU().Maxwell3D();
+
+ if (!maxwell3d.ShouldExecute()) {
+ return;
+ }
+
+ const auto& regs = maxwell3d.regs;
bool use_color{};
bool use_depth{};
bool use_stencil{};
- OpenGLState clear_state;
+ OpenGLState prev_state{OpenGLState::GetCurState()};
+ SCOPE_EXIT({
+ prev_state.AllDirty();
+ prev_state.Apply();
+ });
+
+ OpenGLState clear_state{OpenGLState::GetCurState()};
+ clear_state.SetDefaultViewports();
if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
regs.clear_buffers.A) {
use_color = true;
@@ -580,11 +623,13 @@ void RasterizerOpenGL::Clear() {
// true.
clear_state.depth.test_enabled = true;
clear_state.depth.test_func = GL_ALWAYS;
+ clear_state.depth.write_mask = GL_TRUE;
}
if (regs.clear_buffers.S) {
ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear stencil but buffer is not enabled!");
use_stencil = true;
clear_state.stencil.test_enabled = true;
+
if (regs.clear_flags.stencil) {
// Stencil affects the clear so fill it with the used masks
clear_state.stencil.front.test_func = GL_ALWAYS;
@@ -616,8 +661,9 @@ void RasterizerOpenGL::Clear() {
return;
}
- const auto [clear_depth, clear_stencil] = ConfigureFramebuffers(
- clear_state, use_color, use_depth || use_stencil, false, regs.clear_buffers.RT.Value());
+ ConfigureClearFramebuffer(clear_state, use_color, use_depth, use_stencil);
+
+ SyncViewport(clear_state);
if (regs.clear_flags.scissor) {
SyncScissorTest(clear_state);
}
@@ -626,20 +672,18 @@ void RasterizerOpenGL::Clear() {
clear_state.EmulateViewportWithScissor();
}
- clear_state.ApplyColorMask();
- clear_state.ApplyDepth();
- clear_state.ApplyStencilTest();
- clear_state.ApplyViewport();
+ clear_state.AllDirty();
+ clear_state.Apply();
if (use_color) {
- glClearBufferfv(GL_COLOR, regs.clear_buffers.RT, regs.clear_color);
+ glClearBufferfv(GL_COLOR, 0, regs.clear_color);
}
- if (clear_depth && clear_stencil) {
+ if (use_depth && use_stencil) {
glClearBufferfi(GL_DEPTH_STENCIL, 0, regs.clear_depth, regs.clear_stencil);
- } else if (clear_depth) {
+ } else if (use_depth) {
glClearBufferfv(GL_DEPTH, 0, &regs.clear_depth);
- } else if (clear_stencil) {
+ } else if (use_stencil) {
glClearBufferiv(GL_STENCIL, 0, &regs.clear_stencil);
}
}
@@ -650,9 +694,11 @@ void RasterizerOpenGL::DrawArrays() {
MICROPROFILE_SCOPE(OpenGL_Drawing);
auto& gpu = system.GPU().Maxwell3D();
- const auto& regs = gpu.regs;
- ConfigureFramebuffers(state);
+ if (!gpu.ShouldExecute()) {
+ return;
+ }
+
SyncColorMask();
SyncFragmentColorClampState();
SyncMultiSampleState();
@@ -684,31 +730,102 @@ void RasterizerOpenGL::DrawArrays() {
Maxwell::MaxShaderStage;
// Add space for at least 18 constant buffers
- buffer_size +=
- Maxwell::MaxConstBuffers * (MaxConstbufferSize + device.GetUniformBufferAlignment());
+ buffer_size += Maxwell::MaxConstBuffers *
+ (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment());
- const bool invalidate = buffer_cache.Map(buffer_size);
- if (invalidate) {
- // As all cached buffers are invalidated, we need to recheck their state.
- gpu.dirty_flags.vertex_array.set();
- }
+ // Prepare the vertex array.
+ buffer_cache.Map(buffer_size);
+ // Prepare vertex array format.
const GLuint vao = SetupVertexFormat();
+ vertex_array_pushbuffer.Setup(vao);
+
+ // Upload vertex and index data.
SetupVertexBuffer(vao);
+ SetupVertexInstances(vao);
+ const GLintptr index_buffer_offset = SetupIndexBuffer();
+
+ // Setup draw parameters. It will automatically choose what glDraw* method to use.
+ const DrawParameters params = SetupDraw(index_buffer_offset);
- DrawParameters params = SetupDraw();
+ // Prepare packed bindings.
+ bind_ubo_pushbuffer.Setup(0);
+ bind_ssbo_pushbuffer.Setup(0);
+
+ // Setup shaders and their used resources.
+ texture_cache.GuardSamplers(true);
SetupShaders(params.primitive_mode);
+ texture_cache.GuardSamplers(false);
- buffer_cache.Unmap();
+ ConfigureFramebuffers(state);
+
+ // Signal the buffer cache that we are not going to upload more things.
+ const bool invalidate = buffer_cache.Unmap();
+
+ // Now that we are no longer uploading data, we can safely bind the buffers to OpenGL.
+ vertex_array_pushbuffer.Bind();
+ bind_ubo_pushbuffer.Bind();
+ bind_ssbo_pushbuffer.Bind();
+
+ if (invalidate) {
+ // As all cached buffers are invalidated, we need to recheck their state.
+ gpu.dirty.ResetVertexArrays();
+ }
shader_program_manager->ApplyTo(state);
state.Apply();
- res_cache.SignalPreDrawCall();
+ if (texture_cache.TextureBarrier()) {
+ glTextureBarrier();
+ }
+
params.DispatchDraw();
- res_cache.SignalPostDrawCall();
accelerate_draw = AccelDraw::Disabled;
+ gpu.dirty.memory_general = false;
+}
+
+void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
+ if (!GLAD_GL_ARB_compute_variable_group_size) {
+ LOG_ERROR(Render_OpenGL, "Compute is currently not supported on this device due to the "
+ "lack of GL_ARB_compute_variable_group_size");
+ return;
+ }
+
+ auto kernel = shader_cache.GetComputeKernel(code_addr);
+ ProgramVariant variant;
+ variant.texture_buffer_usage = SetupComputeTextures(kernel);
+ SetupComputeImages(kernel);
+
+ const auto [program, next_bindings] = kernel->GetProgramHandle(variant);
+ state.draw.shader_program = program;
+ state.draw.program_pipeline = 0;
+
+ const std::size_t buffer_size =
+ Tegra::Engines::KeplerCompute::NumConstBuffers *
+ (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment());
+ buffer_cache.Map(buffer_size);
+
+ bind_ubo_pushbuffer.Setup(0);
+ bind_ssbo_pushbuffer.Setup(0);
+
+ SetupComputeConstBuffers(kernel);
+ SetupComputeGlobalMemory(kernel);
+
+ buffer_cache.Unmap();
+
+ bind_ubo_pushbuffer.Bind();
+ bind_ssbo_pushbuffer.Bind();
+
+ state.ApplyTextures();
+ state.ApplyImages();
+ state.ApplyShaderProgram();
+ state.ApplyProgramPipeline();
+
+ const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
+ glDispatchComputeGroupSizeARB(launch_desc.grid_dim_x, launch_desc.grid_dim_y,
+ launch_desc.grid_dim_z, launch_desc.block_dim_x,
+ launch_desc.block_dim_y, launch_desc.block_dim_z);
}
void RasterizerOpenGL::FlushAll() {}
@@ -718,8 +835,8 @@ void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) {
if (!addr || !size) {
return;
}
- res_cache.FlushRegion(addr, size);
- global_cache.FlushRegion(addr, size);
+ texture_cache.FlushRegion(addr, size);
+ buffer_cache.FlushRegion(addr, size);
}
void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
@@ -727,23 +844,31 @@ void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
if (!addr || !size) {
return;
}
- res_cache.InvalidateRegion(addr, size);
+ texture_cache.InvalidateRegion(addr, size);
shader_cache.InvalidateRegion(addr, size);
- global_cache.InvalidateRegion(addr, size);
buffer_cache.InvalidateRegion(addr, size);
}
void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
- FlushRegion(addr, size);
+ if (Settings::values.use_accurate_gpu_emulation) {
+ FlushRegion(addr, size);
+ }
InvalidateRegion(addr, size);
}
+void RasterizerOpenGL::FlushCommands() {
+ glFlush();
+}
+
+void RasterizerOpenGL::TickFrame() {
+ buffer_cache.TickFrame();
+}
+
bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect) {
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
MICROPROFILE_SCOPE(OpenGL_Blits);
- res_cache.FermiCopySurface(src, dst, src_rect, dst_rect);
+ texture_cache.DoFermiCopy(src, dst, copy_config);
return true;
}
@@ -755,7 +880,8 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
- const auto& surface{res_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))};
+ const auto surface{
+ texture_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))};
if (!surface) {
return {};
}
@@ -771,109 +897,204 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
LOG_WARNING(Render_OpenGL, "Framebuffer pixel_format is different");
}
- screen_info.display_texture = surface->Texture().handle;
+ screen_info.display_texture = surface->GetTexture();
+ screen_info.display_srgb = surface->GetSurfaceParams().srgb_conversion;
return true;
}
-void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
- const Shader& shader, GLuint program_handle,
- BaseBindings base_bindings) {
+void RasterizerOpenGL::SetupDrawConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
+ const Shader& shader) {
MICROPROFILE_SCOPE(OpenGL_UBO);
- const auto& gpu = system.GPU();
- const auto& maxwell3d = gpu.Maxwell3D();
- const auto& shader_stage = maxwell3d.state.shader_stages[static_cast<std::size_t>(stage)];
- const auto& entries = shader->GetShaderEntries().const_buffers;
-
- // Upload only the enabled buffers from the 16 constbuffers of each shader stage
- for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) {
- const auto& used_buffer = entries[bindpoint];
- const auto& buffer = shader_stage.const_buffers[used_buffer.GetIndex()];
-
- if (!buffer.enabled) {
- // Set values to zero to unbind buffers
- bind_ubo_pushbuffer.Push(0, 0, 0);
- continue;
- }
-
- std::size_t size = 0;
+ const auto& stages = system.GPU().Maxwell3D().state.shader_stages;
+ const auto& shader_stage = stages[static_cast<std::size_t>(stage)];
+ for (const auto& entry : shader->GetShaderEntries().const_buffers) {
+ const auto& buffer = shader_stage.const_buffers[entry.GetIndex()];
+ SetupConstBuffer(buffer, entry);
+ }
+}
- if (used_buffer.IsIndirect()) {
- // Buffer is accessed indirectly, so upload the entire thing
- size = buffer.size;
+void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) {
+ MICROPROFILE_SCOPE(OpenGL_UBO);
+ const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
+ for (const auto& entry : kernel->GetShaderEntries().const_buffers) {
+ const auto& config = launch_desc.const_buffer_config[entry.GetIndex()];
+ const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value();
+ Tegra::Engines::ConstBufferInfo buffer;
+ buffer.address = config.Address();
+ buffer.size = config.size;
+ buffer.enabled = mask[entry.GetIndex()];
+ SetupConstBuffer(buffer, entry);
+ }
+}
- if (size > MaxConstbufferSize) {
- LOG_WARNING(Render_OpenGL, "Indirect constbuffer size {} exceeds maximum {}", size,
- MaxConstbufferSize);
- size = MaxConstbufferSize;
- }
- } else {
- // Buffer is accessed directly, upload just what we use
- size = used_buffer.GetSize();
- }
+void RasterizerOpenGL::SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& buffer,
+ const GLShader::ConstBufferEntry& entry) {
+ if (!buffer.enabled) {
+ // Set values to zero to unbind buffers
+ bind_ubo_pushbuffer.Push(buffer_cache.GetEmptyBuffer(sizeof(float)), 0, sizeof(float));
+ return;
+ }
- // Align the actual size so it ends up being a multiple of vec4 to meet the OpenGL std140
- // UBO alignment requirements.
- size = Common::AlignUp(size, sizeof(GLvec4));
- ASSERT_MSG(size <= MaxConstbufferSize, "Constbuffer too big");
+ // Align the actual size so it ends up being a multiple of vec4 to meet the OpenGL std140
+ // UBO alignment requirements.
+ const std::size_t size = Common::AlignUp(GetConstBufferSize(buffer, entry), sizeof(GLvec4));
- const GLintptr const_buffer_offset =
- buffer_cache.UploadMemory(buffer.address, size, device.GetUniformBufferAlignment());
+ const auto alignment = device.GetUniformBufferAlignment();
+ const auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment);
+ bind_ubo_pushbuffer.Push(cbuf, offset, size);
+}
- bind_ubo_pushbuffer.Push(buffer_cache.GetHandle(), const_buffer_offset, size);
+void RasterizerOpenGL::SetupDrawGlobalMemory(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
+ const Shader& shader) {
+ auto& gpu{system.GPU()};
+ auto& memory_manager{gpu.MemoryManager()};
+ const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<std::size_t>(stage)]};
+ for (const auto& entry : shader->GetShaderEntries().global_memory_entries) {
+ const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()};
+ const auto gpu_addr{memory_manager.Read<u64>(addr)};
+ const auto size{memory_manager.Read<u32>(addr + 8)};
+ SetupGlobalMemory(entry, gpu_addr, size);
}
}
-void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
- const Shader& shader, GLenum primitive_mode,
- BaseBindings base_bindings) {
- const auto& entries = shader->GetShaderEntries().global_memory_entries;
- for (std::size_t bindpoint = 0; bindpoint < entries.size(); ++bindpoint) {
- const auto& entry{entries[bindpoint]};
- const auto& region{global_cache.GetGlobalRegion(entry, stage)};
- if (entry.IsWritten()) {
- region->MarkAsModified(true, global_cache);
- }
- bind_ssbo_pushbuffer.Push(region->GetBufferHandle(), 0,
- static_cast<GLsizeiptr>(region->GetSizeInBytes()));
+void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
+ auto& gpu{system.GPU()};
+ auto& memory_manager{gpu.MemoryManager()};
+ const auto cbufs{gpu.KeplerCompute().launch_description.const_buffer_config};
+ for (const auto& entry : kernel->GetShaderEntries().global_memory_entries) {
+ const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
+ const auto gpu_addr{memory_manager.Read<u64>(addr)};
+ const auto size{memory_manager.Read<u32>(addr + 8)};
+ SetupGlobalMemory(entry, gpu_addr, size);
}
}
-void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader,
- GLuint program_handle, BaseBindings base_bindings) {
+void RasterizerOpenGL::SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry,
+ GPUVAddr gpu_addr, std::size_t size) {
+ const auto alignment{device.GetShaderStorageBufferAlignment()};
+ const auto [ssbo, buffer_offset] =
+ buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten());
+ bind_ssbo_pushbuffer.Push(ssbo, buffer_offset, static_cast<GLsizeiptr>(size));
+}
+
+TextureBufferUsage RasterizerOpenGL::SetupDrawTextures(Maxwell::ShaderStage stage,
+ const Shader& shader,
+ BaseBindings base_bindings) {
MICROPROFILE_SCOPE(OpenGL_Texture);
const auto& gpu = system.GPU();
const auto& maxwell3d = gpu.Maxwell3D();
const auto& entries = shader->GetShaderEntries().samplers;
- ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.texture_units),
+ ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.textures),
"Exceeded the number of active textures.");
+ TextureBufferUsage texture_buffer_usage{0};
+
for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) {
const auto& entry = entries[bindpoint];
- Tegra::Texture::FullTextureInfo texture;
- if (entry.IsBindless()) {
+ const auto texture = [&]() {
+ if (!entry.IsBindless()) {
+ return maxwell3d.GetStageTexture(stage, entry.GetOffset());
+ }
const auto cbuf = entry.GetBindlessCBuf();
Tegra::Texture::TextureHandle tex_handle;
tex_handle.raw = maxwell3d.AccessConstBuffer32(stage, cbuf.first, cbuf.second);
- texture = maxwell3d.GetTextureInfo(tex_handle, entry.GetOffset());
- } else {
- texture = maxwell3d.GetStageTexture(stage, entry.GetOffset());
+ return maxwell3d.GetTextureInfo(tex_handle, entry.GetOffset());
+ }();
+
+ if (SetupTexture(base_bindings.sampler + bindpoint, texture, entry)) {
+ texture_buffer_usage.set(bindpoint);
}
- const u32 current_bindpoint = base_bindings.sampler + bindpoint;
+ }
- state.texture_units[current_bindpoint].sampler = sampler_cache.GetSampler(texture.tsc);
+ return texture_buffer_usage;
+}
- if (Surface surface = res_cache.GetTextureSurface(texture, entry); surface) {
- state.texture_units[current_bindpoint].texture =
- surface->Texture(entry.IsArray()).handle;
- surface->UpdateSwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source,
- texture.tic.w_source);
- } else {
- // Can occur when texture addr is null or its memory is unmapped/invalid
- state.texture_units[current_bindpoint].texture = 0;
+TextureBufferUsage RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
+ MICROPROFILE_SCOPE(OpenGL_Texture);
+ const auto& compute = system.GPU().KeplerCompute();
+ const auto& entries = kernel->GetShaderEntries().samplers;
+
+ ASSERT_MSG(entries.size() <= std::size(state.textures),
+ "Exceeded the number of active textures.");
+
+ TextureBufferUsage texture_buffer_usage{0};
+
+ for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) {
+ const auto& entry = entries[bindpoint];
+ const auto texture = [&]() {
+ if (!entry.IsBindless()) {
+ return compute.GetTexture(entry.GetOffset());
+ }
+ const auto cbuf = entry.GetBindlessCBuf();
+ Tegra::Texture::TextureHandle tex_handle;
+ tex_handle.raw = compute.AccessConstBuffer32(cbuf.first, cbuf.second);
+ return compute.GetTextureInfo(tex_handle, entry.GetOffset());
+ }();
+
+ if (SetupTexture(bindpoint, texture, entry)) {
+ texture_buffer_usage.set(bindpoint);
}
}
+
+ return texture_buffer_usage;
+}
+
+bool RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture,
+ const GLShader::SamplerEntry& entry) {
+ state.samplers[binding] = sampler_cache.GetSampler(texture.tsc);
+
+ const auto view = texture_cache.GetTextureSurface(texture.tic, entry);
+ if (!view) {
+ // Can occur when texture addr is null or its memory is unmapped/invalid
+ state.textures[binding] = 0;
+ return false;
+ }
+ state.textures[binding] = view->GetTexture();
+
+ if (view->GetSurfaceParams().IsBuffer()) {
+ return true;
+ }
+
+ // Apply swizzle to textures that are not buffers.
+ view->ApplySwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source,
+ texture.tic.w_source);
+ return false;
+}
+
+void RasterizerOpenGL::SetupComputeImages(const Shader& shader) {
+ const auto& compute = system.GPU().KeplerCompute();
+ const auto& entries = shader->GetShaderEntries().images;
+ for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) {
+ const auto& entry = entries[bindpoint];
+ const auto tic = [&]() {
+ if (!entry.IsBindless()) {
+ return compute.GetTexture(entry.GetOffset()).tic;
+ }
+ const auto cbuf = entry.GetBindlessCBuf();
+ Tegra::Texture::TextureHandle tex_handle;
+ tex_handle.raw = compute.AccessConstBuffer32(cbuf.first, cbuf.second);
+ return compute.GetTextureInfo(tex_handle, entry.GetOffset()).tic;
+ }();
+ SetupImage(bindpoint, tic, entry);
+ }
+}
+
+void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic,
+ const GLShader::ImageEntry& entry) {
+ const auto view = texture_cache.GetImageSurface(tic, entry);
+ if (!view) {
+ state.images[binding] = 0;
+ return;
+ }
+ if (!tic.IsBuffer()) {
+ view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
+ }
+ if (entry.IsWritten()) {
+ view->MarkAsModified(texture_cache.Tick());
+ }
+ state.images[binding] = view->GetTexture();
}
void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) {
@@ -917,10 +1138,11 @@ void RasterizerOpenGL::SyncClipCoef() {
}
void RasterizerOpenGL::SyncCullMode() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ auto& maxwell3d = system.GPU().Maxwell3D();
- state.cull.enabled = regs.cull.enabled != 0;
+ const auto& regs = maxwell3d.regs;
+ state.cull.enabled = regs.cull.enabled != 0;
if (state.cull.enabled) {
state.cull.front_face = MaxwellToGL::FrontFace(regs.cull.front_face);
state.cull.mode = MaxwellToGL::CullFace(regs.cull.cull_face);
@@ -953,15 +1175,23 @@ void RasterizerOpenGL::SyncDepthTestState() {
state.depth.test_enabled = regs.depth_test_enable != 0;
state.depth.write_mask = regs.depth_write_enabled ? GL_TRUE : GL_FALSE;
- if (!state.depth.test_enabled)
+ if (!state.depth.test_enabled) {
return;
+ }
state.depth.test_func = MaxwellToGL::ComparisonOp(regs.depth_test_func);
}
void RasterizerOpenGL::SyncStencilTestState() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty.stencil_test) {
+ return;
+ }
+ maxwell3d.dirty.stencil_test = false;
+
+ const auto& regs = maxwell3d.regs;
state.stencil.test_enabled = regs.stencil_enable != 0;
+ state.MarkDirtyStencilState();
if (!regs.stencil_enable) {
return;
@@ -994,7 +1224,12 @@ void RasterizerOpenGL::SyncStencilTestState() {
}
void RasterizerOpenGL::SyncColorMask() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty.color_mask) {
+ return;
+ }
+ const auto& regs = maxwell3d.regs;
+
const std::size_t count =
regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1;
for (std::size_t i = 0; i < count; i++) {
@@ -1005,6 +1240,9 @@ void RasterizerOpenGL::SyncColorMask() {
dest.blue_enabled = (source.B == 0) ? GL_FALSE : GL_TRUE;
dest.alpha_enabled = (source.A == 0) ? GL_FALSE : GL_TRUE;
}
+
+ state.MarkDirtyColorMask();
+ maxwell3d.dirty.color_mask = false;
}
void RasterizerOpenGL::SyncMultiSampleState() {
@@ -1019,7 +1257,11 @@ void RasterizerOpenGL::SyncFragmentColorClampState() {
}
void RasterizerOpenGL::SyncBlendState() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty.blend_state) {
+ return;
+ }
+ const auto& regs = maxwell3d.regs;
state.blend_color.red = regs.blend_color.r;
state.blend_color.green = regs.blend_color.g;
@@ -1042,6 +1284,8 @@ void RasterizerOpenGL::SyncBlendState() {
for (std::size_t i = 1; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
state.blend[i].enabled = false;
}
+ maxwell3d.dirty.blend_state = false;
+ state.MarkDirtyBlendState();
return;
}
@@ -1058,6 +1302,9 @@ void RasterizerOpenGL::SyncBlendState() {
blend.src_a_func = MaxwellToGL::BlendFunc(src.factor_source_a);
blend.dst_a_func = MaxwellToGL::BlendFunc(src.factor_dest_a);
}
+
+ state.MarkDirtyBlendState();
+ maxwell3d.dirty.blend_state = false;
}
void RasterizerOpenGL::SyncLogicOpState() {
@@ -1109,13 +1356,21 @@ void RasterizerOpenGL::SyncPointState() {
}
void RasterizerOpenGL::SyncPolygonOffset() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty.polygon_offset) {
+ return;
+ }
+ const auto& regs = maxwell3d.regs;
+
state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0;
state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0;
state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0;
state.polygon_offset.units = regs.polygon_offset_units;
state.polygon_offset.factor = regs.polygon_offset_factor;
state.polygon_offset.clamp = regs.polygon_offset_clamp;
+
+ state.MarkDirtyPolygonOffset();
+ maxwell3d.dirty.polygon_offset = false;
}
void RasterizerOpenGL::SyncAlphaTest() {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 2817f65c9..eada752e0 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -17,19 +17,22 @@
#include <glad/glad.h>
#include "common/common_types.h"
+#include "video_core/engines/const_buffer_info.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/rasterizer_cache.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_device.h"
-#include "video_core/renderer_opengl/gl_global_cache.h"
-#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
+#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_sampler_cache.h"
#include "video_core/renderer_opengl/gl_shader_cache.h"
+#include "video_core/renderer_opengl/gl_shader_decompiler.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_state.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
#include "video_core/renderer_opengl/utils.h"
+#include "video_core/textures/texture.h"
namespace Core {
class System;
@@ -39,11 +42,14 @@ namespace Core::Frontend {
class EmuWindow;
}
+namespace Tegra {
+class MemoryManager;
+}
+
namespace OpenGL {
struct ScreenInfo;
struct DrawParameters;
-struct FramebufferCacheKey;
class RasterizerOpenGL : public VideoCore::RasterizerInterface {
public:
@@ -53,14 +59,16 @@ public:
void DrawArrays() override;
void Clear() override;
+ void DispatchCompute(GPUVAddr code_addr) override;
void FlushAll() override;
void FlushRegion(CacheAddr addr, u64 size) override;
void InvalidateRegion(CacheAddr addr, u64 size) override;
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushCommands() override;
+ void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect) override;
+ const Tegra::Engines::Fermi2D::Config& copy_config) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
bool AccelerateDrawBatch(bool is_indexed) override;
@@ -68,11 +76,6 @@ public:
void LoadDiskResources(const std::atomic_bool& stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) override;
- /// Maximum supported size that a constbuffer can have in bytes.
- static constexpr std::size_t MaxConstbufferSize = 0x10000;
- static_assert(MaxConstbufferSize % sizeof(GLvec4) == 0,
- "The maximum size of a constbuffer must be a multiple of the size of GLvec4");
-
private:
struct FramebufferConfigState {
bool using_color_fb{};
@@ -93,29 +96,64 @@ private:
/**
* Configures the color and depth framebuffer states.
- * @param use_color_fb If true, configure color framebuffers.
- * @param using_depth_fb If true, configure the depth/stencil framebuffer.
- * @param preserve_contents If true, tries to preserve data from a previously used framebuffer.
+ *
+ * @param current_state The current OpenGL state.
+ * @param using_color_fb If true, configure color framebuffers.
+ * @param using_depth_fb If true, configure the depth/stencil framebuffer.
+ * @param preserve_contents If true, tries to preserve data from a previously used
+ * framebuffer.
* @param single_color_target Specifies if a single color buffer target should be used.
+ *
* @returns If depth (first) or stencil (second) are being stored in the bound zeta texture
- * (requires using_depth_fb to be true)
+ * (requires using_depth_fb to be true)
*/
std::pair<bool, bool> ConfigureFramebuffers(
- OpenGLState& current_state, bool use_color_fb = true, bool using_depth_fb = true,
+ OpenGLState& current_state, bool using_color_fb = true, bool using_depth_fb = true,
bool preserve_contents = true, std::optional<std::size_t> single_color_target = {});
+ void ConfigureClearFramebuffer(OpenGLState& current_state, bool using_color_fb,
+ bool using_depth_fb, bool using_stencil_fb);
+
/// Configures the current constbuffers to use for the draw command.
- void SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader,
- GLuint program_handle, BaseBindings base_bindings);
+ void SetupDrawConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
+ const Shader& shader);
+
+ /// Configures the current constbuffers to use for the kernel invocation.
+ void SetupComputeConstBuffers(const Shader& kernel);
+
+ /// Configures a constant buffer.
+ void SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& buffer,
+ const GLShader::ConstBufferEntry& entry);
/// Configures the current global memory entries to use for the draw command.
- void SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
- const Shader& shader, GLenum primitive_mode,
- BaseBindings base_bindings);
+ void SetupDrawGlobalMemory(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
+ const Shader& shader);
+
+ /// Configures the current global memory entries to use for the kernel invocation.
+ void SetupComputeGlobalMemory(const Shader& kernel);
+
+ /// Configures a constant buffer.
+ void SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr,
+ std::size_t size);
- /// Configures the current textures to use for the draw command.
- void SetupTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader,
- GLuint program_handle, BaseBindings base_bindings);
+ /// Configures the current textures to use for the draw command. Returns shaders texture buffer
+ /// usage.
+ TextureBufferUsage SetupDrawTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
+ const Shader& shader, BaseBindings base_bindings);
+
+ /// Configures the textures used in a compute shader. Returns texture buffer usage.
+ TextureBufferUsage SetupComputeTextures(const Shader& kernel);
+
+ /// Configures a texture. Returns true when the texture is a texture buffer.
+ bool SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture,
+ const GLShader::SamplerEntry& entry);
+
+ /// Configures images in a compute shader.
+ void SetupComputeImages(const Shader& shader);
+
+ /// Configures an image.
+ void SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic,
+ const GLShader::ImageEntry& entry);
/// Syncs the viewport and depth range to match the guest state
void SyncViewport(OpenGLState& current_state);
@@ -176,10 +214,10 @@ private:
const Device device;
OpenGLState state;
- RasterizerCacheOpenGL res_cache;
+ TextureCacheOpenGL texture_cache;
ShaderCacheOpenGL shader_cache;
- GlobalRegionCacheOpenGL global_cache;
SamplerCacheOpenGL sampler_cache;
+ FramebufferCacheOpenGL framebuffer_cache;
Core::System& system;
ScreenInfo& screen_info;
@@ -190,13 +228,13 @@ private:
OGLVertexArray>
vertex_array_cache;
- std::map<FramebufferCacheKey, OGLFramebuffer> framebuffer_cache;
FramebufferConfigState current_framebuffer_config_state;
std::pair<bool, bool> current_depth_stencil_usage{};
static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024;
OGLBufferCache buffer_cache;
+ VertexArrayPushBuffer vertex_array_pushbuffer;
BindBuffersRangePushBuffer bind_ubo_pushbuffer{GL_UNIFORM_BUFFER};
BindBuffersRangePushBuffer bind_ssbo_pushbuffer{GL_SHADER_STORAGE_BUFFER};
@@ -208,16 +246,19 @@ private:
GLuint SetupVertexFormat();
void SetupVertexBuffer(GLuint vao);
+ void SetupVertexInstances(GLuint vao);
- DrawParameters SetupDraw();
+ GLintptr SetupIndexBuffer();
- void SetupShaders(GLenum primitive_mode);
+ DrawParameters SetupDraw(GLintptr index_buffer_offset);
- void SetupCachedFramebuffer(const FramebufferCacheKey& fbkey, OpenGLState& current_state);
+ void SetupShaders(GLenum primitive_mode);
enum class AccelDraw { Disabled, Arrays, Indexed };
AccelDraw accelerate_draw = AccelDraw::Disabled;
+ OGLFramebuffer clear_framebuffer;
+
using CachedPageMap = boost::icl::interval_map<u64, int>;
CachedPageMap cached_pages;
};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
deleted file mode 100644
index a7681902e..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ /dev/null
@@ -1,1362 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <optional>
-#include <glad/glad.h>
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "common/microprofile.h"
-#include "common/scope_exit.h"
-#include "core/core.h"
-#include "core/hle/kernel/process.h"
-#include "core/settings.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/memory_manager.h"
-#include "video_core/morton.h"
-#include "video_core/renderer_opengl/gl_rasterizer.h"
-#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
-#include "video_core/renderer_opengl/utils.h"
-#include "video_core/surface.h"
-#include "video_core/textures/convert.h"
-#include "video_core/textures/decoders.h"
-
-namespace OpenGL {
-
-using VideoCore::MortonSwizzle;
-using VideoCore::MortonSwizzleMode;
-using VideoCore::Surface::ComponentTypeFromDepthFormat;
-using VideoCore::Surface::ComponentTypeFromRenderTarget;
-using VideoCore::Surface::ComponentTypeFromTexture;
-using VideoCore::Surface::PixelFormatFromDepthFormat;
-using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
-using VideoCore::Surface::PixelFormatFromTextureFormat;
-using VideoCore::Surface::SurfaceTargetFromTextureType;
-
-struct FormatTuple {
- GLint internal_format;
- GLenum format;
- GLenum type;
- ComponentType component_type;
- bool compressed;
-};
-
-static void ApplyTextureDefaults(GLuint texture, u32 max_mip_level) {
- glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- glTextureParameteri(texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTextureParameteri(texture, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTextureParameteri(texture, GL_TEXTURE_MAX_LEVEL, max_mip_level - 1);
- if (max_mip_level == 1) {
- glTextureParameterf(texture, GL_TEXTURE_LOD_BIAS, 1000.0);
- }
-}
-
-void SurfaceParams::InitCacheParameters(GPUVAddr gpu_addr_) {
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
-
- gpu_addr = gpu_addr_;
- host_ptr = memory_manager.GetPointer(gpu_addr_);
- size_in_bytes = SizeInBytesRaw();
-
- if (IsPixelFormatASTC(pixel_format)) {
- // ASTC is uncompressed in software, in emulated as RGBA8
- size_in_bytes_gl = width * height * depth * 4;
- } else {
- size_in_bytes_gl = SizeInBytesGL();
- }
-}
-
-std::size_t SurfaceParams::InnerMipmapMemorySize(u32 mip_level, bool force_gl, bool layer_only,
- bool uncompressed) const {
- const u32 tile_x{GetDefaultBlockWidth(pixel_format)};
- const u32 tile_y{GetDefaultBlockHeight(pixel_format)};
- const u32 bytes_per_pixel{GetBytesPerPixel(pixel_format)};
- u32 m_depth = (layer_only ? 1U : depth);
- u32 m_width = MipWidth(mip_level);
- u32 m_height = MipHeight(mip_level);
- m_width = uncompressed ? m_width : std::max(1U, (m_width + tile_x - 1) / tile_x);
- m_height = uncompressed ? m_height : std::max(1U, (m_height + tile_y - 1) / tile_y);
- m_depth = std::max(1U, m_depth >> mip_level);
- u32 m_block_height = MipBlockHeight(mip_level);
- u32 m_block_depth = MipBlockDepth(mip_level);
- return Tegra::Texture::CalculateSize(force_gl ? false : is_tiled, bytes_per_pixel, m_width,
- m_height, m_depth, m_block_height, m_block_depth);
-}
-
-std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only,
- bool uncompressed) const {
- std::size_t block_size_bytes = Tegra::Texture::GetGOBSize() * block_height * block_depth;
- std::size_t size = 0;
- for (u32 i = 0; i < max_mip_level; i++) {
- size += InnerMipmapMemorySize(i, force_gl, layer_only, uncompressed);
- }
- if (!force_gl && is_tiled) {
- size = Common::AlignUp(size, block_size_bytes);
- }
- return size;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
- const Tegra::Texture::FullTextureInfo& config, const GLShader::SamplerEntry& entry) {
- SurfaceParams params{};
- params.is_tiled = config.tic.IsTiled();
- params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
- params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
- params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
- params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
- params.srgb_conversion = config.tic.IsSrgbConversionEnabled();
- params.pixel_format = PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(),
- params.srgb_conversion);
-
- if (config.tsc.depth_compare_enabled) {
- // Some titles create a 'R16U' (normalized 16-bit) texture with depth_compare enabled,
- // then attempt to sample from it via a shadow sampler. Convert format to Z16 (which also
- // causes GetFormatType to properly return 'Depth' below).
- if (GetFormatType(params.pixel_format) == SurfaceType::ColorTexture) {
- switch (params.pixel_format) {
- case PixelFormat::R16S:
- case PixelFormat::R16U:
- case PixelFormat::R16F:
- params.pixel_format = PixelFormat::Z16;
- break;
- case PixelFormat::R32F:
- params.pixel_format = PixelFormat::Z32F;
- break;
- default:
- LOG_WARNING(HW_GPU, "Color texture format being used with depth compare: {}",
- static_cast<u32>(params.pixel_format));
- break;
- }
- }
- }
-
- params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value());
- params.type = GetFormatType(params.pixel_format);
- UNIMPLEMENTED_IF(params.type == SurfaceType::ColorTexture && config.tsc.depth_compare_enabled);
-
- params.width = Common::AlignUp(config.tic.Width(), GetCompressionFactor(params.pixel_format));
- params.height = Common::AlignUp(config.tic.Height(), GetCompressionFactor(params.pixel_format));
- if (!params.is_tiled) {
- params.pitch = config.tic.Pitch();
- }
- params.unaligned_height = config.tic.Height();
- params.target = SurfaceTargetFromTextureType(config.tic.texture_type);
- params.identity = SurfaceClass::Uploaded;
-
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- params.depth = 1;
- break;
- case SurfaceTarget::TextureCubemap:
- params.depth = config.tic.Depth() * 6;
- break;
- case SurfaceTarget::Texture3D:
- params.depth = config.tic.Depth();
- break;
- case SurfaceTarget::Texture2DArray:
- params.depth = config.tic.Depth();
- if (!entry.IsArray()) {
- // TODO(bunnei): We have seen games re-use a Texture2D as Texture2DArray with depth of
- // one, but sample the texture in the shader as if it were not an array texture. This
- // probably is valid on hardware, but we still need to write a test to confirm this. In
- // emulation, the workaround here is to continue to treat this as a Texture2D. An
- // example game that does this is Super Mario Odyssey (in Cloud Kingdom).
- ASSERT(params.depth == 1);
- params.target = SurfaceTarget::Texture2D;
- }
- break;
- case SurfaceTarget::TextureCubeArray:
- params.depth = config.tic.Depth() * 6;
- if (!entry.IsArray()) {
- ASSERT(params.depth == 6);
- params.target = SurfaceTarget::TextureCubemap;
- }
- break;
- default:
- LOG_CRITICAL(HW_GPU, "Unknown depth for target={}", static_cast<u32>(params.target));
- UNREACHABLE();
- params.depth = 1;
- break;
- }
-
- params.is_layered = SurfaceTargetIsLayered(params.target);
- params.is_array = SurfaceTargetIsArray(params.target);
- params.max_mip_level = config.tic.max_mip_level + 1;
- params.rt = {};
-
- params.InitCacheParameters(config.tic.Address());
-
- return params;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(std::size_t index) {
- const auto& config{Core::System::GetInstance().GPU().Maxwell3D().regs.rt[index]};
- SurfaceParams params{};
-
- params.is_tiled =
- config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << config.memory_layout.block_width;
- params.block_height = 1 << config.memory_layout.block_height;
- params.block_depth = 1 << config.memory_layout.block_depth;
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
- config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- if (params.is_tiled) {
- params.width = config.width;
- } else {
- params.pitch = config.width;
- const u32 bpp = params.GetFormatBpp() / 8;
- params.width = params.pitch / bpp;
- }
- params.height = config.height;
- params.unaligned_height = config.height;
- params.target = SurfaceTarget::Texture2D;
- params.identity = SurfaceClass::RenderTarget;
- params.depth = 1;
- params.max_mip_level = 1;
- params.is_layered = false;
-
- // Render target specific parameters, not used for caching
- params.rt.index = static_cast<u32>(index);
- params.rt.array_mode = config.array_mode;
- params.rt.layer_stride = config.layer_stride;
- params.rt.volume = config.volume;
- params.rt.base_layer = config.base_layer;
-
- params.InitCacheParameters(config.Address());
-
- return params;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer(
- u32 zeta_width, u32 zeta_height, GPUVAddr zeta_address, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
- SurfaceParams params{};
-
- params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << std::min(block_width, 5U);
- params.block_height = 1 << std::min(block_height, 5U);
- params.block_depth = 1 << std::min(block_depth, 5U);
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromDepthFormat(format);
- params.component_type = ComponentTypeFromDepthFormat(format);
- params.type = GetFormatType(params.pixel_format);
- params.srgb_conversion = false;
- params.width = zeta_width;
- params.height = zeta_height;
- params.unaligned_height = zeta_height;
- params.target = SurfaceTarget::Texture2D;
- params.identity = SurfaceClass::DepthBuffer;
- params.depth = 1;
- params.max_mip_level = 1;
- params.is_layered = false;
- params.rt = {};
-
- params.InitCacheParameters(zeta_address);
-
- return params;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config) {
- SurfaceParams params{};
-
- params.is_tiled = !config.linear;
- params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 32U) : 0,
- params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 32U) : 0,
- params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 32U) : 0,
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
- config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- params.width = config.width;
- params.pitch = config.pitch;
- params.height = config.height;
- params.unaligned_height = config.height;
- params.target = SurfaceTarget::Texture2D;
- params.identity = SurfaceClass::Copy;
- params.depth = 1;
- params.max_mip_level = 1;
- params.rt = {};
-
- params.InitCacheParameters(config.Address());
-
- return params;
-}
-
-static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm, false}, // ABGR8U
- {GL_RGBA8, GL_RGBA, GL_BYTE, ComponentType::SNorm, false}, // ABGR8S
- {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // ABGR8UI
- {GL_RGB8, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, ComponentType::UNorm, false}, // B5G6R5U
- {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, ComponentType::UNorm,
- false}, // A2B10G10R10U
- {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, ComponentType::UNorm, false}, // A1B5G5R5U
- {GL_R8, GL_RED, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // R8U
- {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // R8UI
- {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, ComponentType::Float, false}, // RGBA16F
- {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RGBA16U
- {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RGBA16UI
- {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, ComponentType::Float,
- false}, // R11FG11FB10F
- {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RGBA32UI
- {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT1
- {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT23
- {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT45
- {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXN1
- {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXN2UNORM
- {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM
- {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // BC7U
- {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
- true}, // BC6H_UF16
- {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
- true}, // BC6H_SF16
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4
- {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
- {GL_RGBA32F, GL_RGBA, GL_FLOAT, ComponentType::Float, false}, // RGBA32F
- {GL_RG32F, GL_RG, GL_FLOAT, ComponentType::Float, false}, // RG32F
- {GL_R32F, GL_RED, GL_FLOAT, ComponentType::Float, false}, // R32F
- {GL_R16F, GL_RED, GL_HALF_FLOAT, ComponentType::Float, false}, // R16F
- {GL_R16, GL_RED, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // R16U
- {GL_R16_SNORM, GL_RED, GL_SHORT, ComponentType::SNorm, false}, // R16S
- {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // R16UI
- {GL_R16I, GL_RED_INTEGER, GL_SHORT, ComponentType::SInt, false}, // R16I
- {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RG16
- {GL_RG16F, GL_RG, GL_HALF_FLOAT, ComponentType::Float, false}, // RG16F
- {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RG16UI
- {GL_RG16I, GL_RG_INTEGER, GL_SHORT, ComponentType::SInt, false}, // RG16I
- {GL_RG16_SNORM, GL_RG, GL_SHORT, ComponentType::SNorm, false}, // RG16S
- {GL_RGB32F, GL_RGB, GL_FLOAT, ComponentType::Float, false}, // RGB32F
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm,
- false}, // RGBA8_SRGB
- {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // RG8U
- {GL_RG8, GL_RG, GL_BYTE, ComponentType::SNorm, false}, // RG8S
- {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RG32UI
- {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // R32UI
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4
- {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
- // Compressed sRGB formats
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT1_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT23_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT45_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // BC7U_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB
-
- // Depth formats
- {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F
- {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, ComponentType::UNorm,
- false}, // Z16
-
- // DepthStencil formats
- {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
- false}, // Z24S8
- {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
- false}, // S8Z24
- {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV,
- ComponentType::Float, false}, // Z32FS8
-}};
-
-static GLenum SurfaceTargetToGL(SurfaceTarget target) {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return GL_TEXTURE_1D;
- case SurfaceTarget::Texture2D:
- return GL_TEXTURE_2D;
- case SurfaceTarget::Texture3D:
- return GL_TEXTURE_3D;
- case SurfaceTarget::Texture1DArray:
- return GL_TEXTURE_1D_ARRAY;
- case SurfaceTarget::Texture2DArray:
- return GL_TEXTURE_2D_ARRAY;
- case SurfaceTarget::TextureCubemap:
- return GL_TEXTURE_CUBE_MAP;
- case SurfaceTarget::TextureCubeArray:
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- }
- LOG_CRITICAL(Render_OpenGL, "Unimplemented texture target={}", static_cast<u32>(target));
- UNREACHABLE();
- return {};
-}
-
-static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
- ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
- auto& format = tex_format_tuples[static_cast<unsigned int>(pixel_format)];
- ASSERT(component_type == format.component_type);
-
- return format;
-}
-
-/// Returns the discrepant array target
-constexpr GLenum GetArrayDiscrepantTarget(SurfaceTarget target) {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return GL_TEXTURE_1D_ARRAY;
- case SurfaceTarget::Texture2D:
- return GL_TEXTURE_2D_ARRAY;
- case SurfaceTarget::Texture3D:
- return GL_NONE;
- case SurfaceTarget::Texture1DArray:
- return GL_TEXTURE_1D;
- case SurfaceTarget::Texture2DArray:
- return GL_TEXTURE_2D;
- case SurfaceTarget::TextureCubemap:
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- case SurfaceTarget::TextureCubeArray:
- return GL_TEXTURE_CUBE_MAP;
- }
- return GL_NONE;
-}
-
-Common::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const {
- u32 actual_height{std::max(1U, unaligned_height >> mip_level)};
- if (IsPixelFormatASTC(pixel_format)) {
- // ASTC formats must stop at the ATSC block size boundary
- actual_height = Common::AlignDown(actual_height, GetASTCBlockSize(pixel_format).second);
- }
- return {0, actual_height, MipWidth(mip_level), 0};
-}
-
-void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
- std::vector<u8>& gl_buffer, u32 mip_level) {
- u32 depth = params.MipDepth(mip_level);
- if (params.target == SurfaceTarget::Texture2D) {
- // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented.
- depth = 1U;
- }
- if (params.is_layered) {
- u64 offset = params.GetMipmapLevelOffset(mip_level);
- u64 offset_gl = 0;
- const u64 layer_size = params.LayerMemorySize();
- const u64 gl_size = params.LayerSizeGL(mip_level);
- for (u32 i = 0; i < params.depth; i++) {
- MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
- params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
- params.MipBlockDepth(mip_level), 1, params.tile_width_spacing,
- gl_buffer.data() + offset_gl, params.host_ptr + offset);
- offset += layer_size;
- offset_gl += gl_size;
- }
- } else {
- const u64 offset = params.GetMipmapLevelOffset(mip_level);
- MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
- params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
- params.MipBlockDepth(mip_level), depth, params.tile_width_spacing,
- gl_buffer.data(), params.host_ptr + offset);
- }
-}
-
-void RasterizerCacheOpenGL::FastCopySurface(const Surface& src_surface,
- const Surface& dst_surface) {
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- const u32 width{std::min(src_params.width, dst_params.width)};
- const u32 height{std::min(src_params.height, dst_params.height)};
-
- glCopyImageSubData(src_surface->Texture().handle, SurfaceTargetToGL(src_params.target), 0, 0, 0,
- 0, dst_surface->Texture().handle, SurfaceTargetToGL(dst_params.target), 0, 0,
- 0, 0, width, height, 1);
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-MICROPROFILE_DEFINE(OpenGL_CopySurface, "OpenGL", "CopySurface", MP_RGB(128, 192, 64));
-void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surface& dst_surface,
- const GLuint copy_pbo_handle, const GLenum src_attachment,
- const GLenum dst_attachment,
- const std::size_t cubemap_face) {
- MICROPROFILE_SCOPE(OpenGL_CopySurface);
- ASSERT_MSG(dst_attachment == 0, "Unimplemented");
-
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- const auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type);
- const auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type);
-
- const std::size_t buffer_size = std::max(src_params.size_in_bytes, dst_params.size_in_bytes);
-
- glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
- glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_COPY);
- if (source_format.compressed) {
- glGetCompressedTextureImage(src_surface->Texture().handle, src_attachment,
- static_cast<GLsizei>(src_params.size_in_bytes), nullptr);
- } else {
- glGetTextureImage(src_surface->Texture().handle, src_attachment, source_format.format,
- source_format.type, static_cast<GLsizei>(src_params.size_in_bytes),
- nullptr);
- }
- // If the new texture is bigger than the previous one, we need to fill in the rest with data
- // from the CPU.
- if (src_params.size_in_bytes < dst_params.size_in_bytes) {
- // Upload the rest of the memory.
- if (dst_params.is_tiled) {
- // TODO(Subv): We might have to de-tile the subtexture and re-tile it with the rest
- // of the data in this case. Games like Super Mario Odyssey seem to hit this case
- // when drawing, it re-uses the memory of a previous texture as a bigger framebuffer
- // but it doesn't clear it beforehand, the texture is already full of zeros.
- LOG_DEBUG(HW_GPU, "Trying to upload extra texture data from the CPU during "
- "reinterpretation but the texture is tiled.");
- }
- const std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes;
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
- glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes, remaining_size,
- memory_manager.GetPointer(dst_params.gpu_addr + src_params.size_in_bytes));
- }
-
- glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
-
- const GLsizei width{static_cast<GLsizei>(
- std::min(src_params.GetRect().GetWidth(), dst_params.GetRect().GetWidth()))};
- const GLsizei height{static_cast<GLsizei>(
- std::min(src_params.GetRect().GetHeight(), dst_params.GetRect().GetHeight()))};
-
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, copy_pbo_handle);
- if (dest_format.compressed) {
- LOG_CRITICAL(HW_GPU, "Compressed copy is unimplemented!");
- UNREACHABLE();
- } else {
- switch (dst_params.target) {
- case SurfaceTarget::Texture1D:
- glTextureSubImage1D(dst_surface->Texture().handle, 0, 0, width, dest_format.format,
- dest_format.type, nullptr);
- break;
- case SurfaceTarget::Texture2D:
- glTextureSubImage2D(dst_surface->Texture().handle, 0, 0, 0, width, height,
- dest_format.format, dest_format.type, nullptr);
- break;
- case SurfaceTarget::Texture3D:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glTextureSubImage3D(dst_surface->Texture().handle, 0, 0, 0, 0, width, height,
- static_cast<GLsizei>(dst_params.depth), dest_format.format,
- dest_format.type, nullptr);
- break;
- case SurfaceTarget::TextureCubemap:
- glTextureSubImage3D(dst_surface->Texture().handle, 0, 0, 0,
- static_cast<GLint>(cubemap_face), width, height, 1,
- dest_format.format, dest_format.type, nullptr);
- break;
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(dst_params.target));
- UNREACHABLE();
- }
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- }
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-CachedSurface::CachedSurface(const SurfaceParams& params)
- : RasterizerCacheObject{params.host_ptr}, params{params},
- gl_target{SurfaceTargetToGL(params.target)}, cached_size_in_bytes{params.size_in_bytes} {
-
- const auto optional_cpu_addr{
- Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)};
- ASSERT_MSG(optional_cpu_addr, "optional_cpu_addr is invalid");
- cpu_addr = *optional_cpu_addr;
-
- texture.Create(gl_target);
-
- // TODO(Rodrigo): Using params.GetRect() returns a different size than using its Mip*(0)
- // alternatives. This signals a bug on those functions.
- const auto width = static_cast<GLsizei>(params.MipWidth(0));
- const auto height = static_cast<GLsizei>(params.MipHeight(0));
- memory_size = params.MemorySize();
- reinterpreted = false;
-
- const auto& format_tuple = GetFormatTuple(params.pixel_format, params.component_type);
- gl_internal_format = format_tuple.internal_format;
-
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- glTextureStorage1D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width);
- break;
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::TextureCubemap:
- glTextureStorage2D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width, height);
- break;
- case SurfaceTarget::Texture3D:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glTextureStorage3D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width, height, params.depth);
- break;
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(params.target));
- UNREACHABLE();
- glTextureStorage2D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width, height);
- }
-
- ApplyTextureDefaults(texture.handle, params.max_mip_level);
-
- OpenGL::LabelGLObject(GL_TEXTURE, texture.handle, params.gpu_addr, params.IdentityString());
-}
-
-MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64));
-void CachedSurface::LoadGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem) {
- MICROPROFILE_SCOPE(OpenGL_SurfaceLoad);
- auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
- if (gl_buffer.size() < params.max_mip_level)
- gl_buffer.resize(params.max_mip_level);
- for (u32 i = 0; i < params.max_mip_level; i++)
- gl_buffer[i].resize(params.GetMipmapSizeGL(i));
- if (params.is_tiled) {
- ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
- params.block_width, static_cast<u32>(params.target));
- for (u32 i = 0; i < params.max_mip_level; i++)
- SwizzleFunc(MortonSwizzleMode::MortonToLinear, params, gl_buffer[i], i);
- } else {
- const u32 bpp = params.GetFormatBpp() / 8;
- const u32 copy_size = (params.width * bpp + GetDefaultBlockWidth(params.pixel_format) - 1) /
- GetDefaultBlockWidth(params.pixel_format);
- if (params.pitch == copy_size) {
- std::memcpy(gl_buffer[0].data(), params.host_ptr, params.size_in_bytes_gl);
- } else {
- const u32 height = (params.height + GetDefaultBlockHeight(params.pixel_format) - 1) /
- GetDefaultBlockHeight(params.pixel_format);
- const u8* start{params.host_ptr};
- u8* write_to = gl_buffer[0].data();
- for (u32 h = height; h > 0; h--) {
- std::memcpy(write_to, start, copy_size);
- start += params.pitch;
- write_to += copy_size;
- }
- }
- }
- for (u32 i = 0; i < params.max_mip_level; i++) {
- const u32 width = params.MipWidth(i);
- const u32 height = params.MipHeight(i);
- const u32 depth = params.MipDepth(i);
- if (VideoCore::Surface::IsPixelFormatASTC(params.pixel_format)) {
- // Reserve size for RGBA8 conversion
- constexpr std::size_t rgba_bpp = 4;
- gl_buffer[i].resize(std::max(gl_buffer[i].size(), width * height * depth * rgba_bpp));
- }
- Tegra::Texture::ConvertFromGuestToHost(gl_buffer[i].data(), params.pixel_format, width,
- height, depth, true, true);
- }
-}
-
-MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
-void CachedSurface::FlushGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem) {
- MICROPROFILE_SCOPE(OpenGL_SurfaceFlush);
-
- ASSERT_MSG(!IsPixelFormatASTC(params.pixel_format), "Unimplemented");
-
- auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
- // OpenGL temporary buffer needs to be big enough to store raw texture size
- gl_buffer[0].resize(GetSizeInBytes());
-
- const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type);
- const u32 align = std::clamp(params.RowAlign(0), 1U, 8U);
- glPixelStorei(GL_PACK_ALIGNMENT, align);
- glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.width));
- ASSERT(!tuple.compressed);
- glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
- glGetTextureImage(texture.handle, 0, tuple.format, tuple.type,
- static_cast<GLsizei>(gl_buffer[0].size()), gl_buffer[0].data());
- glPixelStorei(GL_PACK_ROW_LENGTH, 0);
- Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width,
- params.height, params.depth, true, true);
- if (params.is_tiled) {
- ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
- params.block_width, static_cast<u32>(params.target));
-
- SwizzleFunc(MortonSwizzleMode::LinearToMorton, params, gl_buffer[0], 0);
- } else {
- const u32 bpp = params.GetFormatBpp() / 8;
- const u32 copy_size = params.width * bpp;
- if (params.pitch == copy_size) {
- std::memcpy(params.host_ptr, gl_buffer[0].data(), GetSizeInBytes());
- } else {
- u8* start{params.host_ptr};
- const u8* read_to = gl_buffer[0].data();
- for (u32 h = params.height; h > 0; h--) {
- std::memcpy(start, read_to, copy_size);
- start += params.pitch;
- read_to += copy_size;
- }
- }
- }
-}
-
-void CachedSurface::UploadGLMipmapTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, u32 mip_map,
- GLuint read_fb_handle, GLuint draw_fb_handle) {
- const auto& rect{params.GetRect(mip_map)};
-
- auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
-
- // Load data from memory to the surface
- const auto x0 = static_cast<GLint>(rect.left);
- const auto y0 = static_cast<GLint>(rect.bottom);
- auto buffer_offset =
- static_cast<std::size_t>(static_cast<std::size_t>(y0) * params.MipWidth(mip_map) +
- static_cast<std::size_t>(x0)) *
- GetBytesPerPixel(params.pixel_format);
-
- const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type);
-
- const u32 align = std::clamp(params.RowAlign(mip_map), 1U, 8U);
- glPixelStorei(GL_UNPACK_ALIGNMENT, align);
- glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.MipWidth(mip_map)));
-
- const auto image_size = static_cast<GLsizei>(params.GetMipmapSizeGL(mip_map, false));
- if (tuple.compressed) {
- switch (params.target) {
- case SurfaceTarget::Texture2D:
- glCompressedTextureSubImage2D(
- texture.handle, mip_map, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), tuple.internal_format, image_size,
- &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture3D:
- glCompressedTextureSubImage3D(
- texture.handle, mip_map, 0, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)),
- static_cast<GLsizei>(params.MipDepth(mip_map)), tuple.internal_format, image_size,
- &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glCompressedTextureSubImage3D(
- texture.handle, mip_map, 0, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), static_cast<GLsizei>(params.depth),
- tuple.internal_format, image_size, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::TextureCubemap: {
- const auto layer_size = static_cast<GLsizei>(params.LayerSizeGL(mip_map));
- for (std::size_t face = 0; face < params.depth; ++face) {
- glCompressedTextureSubImage3D(
- texture.handle, mip_map, 0, 0, static_cast<GLint>(face),
- static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), 1, tuple.internal_format,
- layer_size, &gl_buffer[mip_map][buffer_offset]);
- buffer_offset += layer_size;
- }
- break;
- }
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(params.target));
- UNREACHABLE();
- glCompressedTextureSubImage2D(
- texture.handle, mip_map, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), tuple.internal_format,
- static_cast<GLsizei>(params.size_in_bytes_gl), &gl_buffer[mip_map][buffer_offset]);
- }
- } else {
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- glTextureSubImage1D(texture.handle, mip_map, x0, static_cast<GLsizei>(rect.GetWidth()),
- tuple.format, tuple.type, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture2D:
- glTextureSubImage2D(texture.handle, mip_map, x0, y0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type,
- &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture3D:
- glTextureSubImage3D(texture.handle, mip_map, x0, y0, 0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), params.MipDepth(mip_map),
- tuple.format, tuple.type, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glTextureSubImage3D(texture.handle, mip_map, x0, y0, 0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), params.depth, tuple.format,
- tuple.type, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::TextureCubemap: {
- for (std::size_t face = 0; face < params.depth; ++face) {
- glTextureSubImage3D(texture.handle, mip_map, x0, y0, static_cast<GLint>(face),
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), 1, tuple.format,
- tuple.type, &gl_buffer[mip_map][buffer_offset]);
- buffer_offset += params.LayerSizeGL(mip_map);
- }
- break;
- }
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(params.target));
- UNREACHABLE();
- glTextureSubImage2D(texture.handle, mip_map, x0, y0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type,
- &gl_buffer[mip_map][buffer_offset]);
- }
- }
-
- glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
-}
-
-void CachedSurface::EnsureTextureDiscrepantView() {
- if (discrepant_view.handle != 0)
- return;
-
- const GLenum target{GetArrayDiscrepantTarget(params.target)};
- ASSERT(target != GL_NONE);
-
- const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u};
- constexpr GLuint min_layer = 0;
- constexpr GLuint min_level = 0;
-
- glGenTextures(1, &discrepant_view.handle);
- glTextureView(discrepant_view.handle, target, texture.handle, gl_internal_format, min_level,
- params.max_mip_level, min_layer, num_layers);
- ApplyTextureDefaults(discrepant_view.handle, params.max_mip_level);
- glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA,
- reinterpret_cast<const GLint*>(swizzle.data()));
-}
-
-MICROPROFILE_DEFINE(OpenGL_TextureUL, "OpenGL", "Texture Upload", MP_RGB(128, 192, 64));
-void CachedSurface::UploadGLTexture(RasterizerTemporaryMemory& res_cache_tmp_mem,
- GLuint read_fb_handle, GLuint draw_fb_handle) {
- MICROPROFILE_SCOPE(OpenGL_TextureUL);
-
- for (u32 i = 0; i < params.max_mip_level; i++)
- UploadGLMipmapTexture(res_cache_tmp_mem, i, read_fb_handle, draw_fb_handle);
-}
-
-void CachedSurface::UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
- Tegra::Texture::SwizzleSource swizzle_y,
- Tegra::Texture::SwizzleSource swizzle_z,
- Tegra::Texture::SwizzleSource swizzle_w) {
- const GLenum new_x = MaxwellToGL::SwizzleSource(swizzle_x);
- const GLenum new_y = MaxwellToGL::SwizzleSource(swizzle_y);
- const GLenum new_z = MaxwellToGL::SwizzleSource(swizzle_z);
- const GLenum new_w = MaxwellToGL::SwizzleSource(swizzle_w);
- if (swizzle[0] == new_x && swizzle[1] == new_y && swizzle[2] == new_z && swizzle[3] == new_w) {
- return;
- }
- swizzle = {new_x, new_y, new_z, new_w};
- const auto swizzle_data = reinterpret_cast<const GLint*>(swizzle.data());
- glTextureParameteriv(texture.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
- if (discrepant_view.handle != 0) {
- glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
- }
-}
-
-RasterizerCacheOpenGL::RasterizerCacheOpenGL(RasterizerOpenGL& rasterizer)
- : RasterizerCache{rasterizer} {
- read_framebuffer.Create();
- draw_framebuffer.Create();
- copy_pbo.Create();
-}
-
-Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
- const GLShader::SamplerEntry& entry) {
- return GetSurface(SurfaceParams::CreateForTexture(config, entry));
-}
-
-Surface RasterizerCacheOpenGL::GetDepthBufferSurface(bool preserve_contents) {
- auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()};
- const auto& regs{gpu.regs};
-
- if (!gpu.dirty_flags.zeta_buffer) {
- return last_depth_buffer;
- }
- gpu.dirty_flags.zeta_buffer = false;
-
- if (!regs.zeta.Address() || !regs.zeta_enable) {
- return last_depth_buffer = {};
- }
-
- SurfaceParams depth_params{SurfaceParams::CreateForDepthBuffer(
- regs.zeta_width, regs.zeta_height, regs.zeta.Address(), regs.zeta.format,
- regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
- regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
-
- return last_depth_buffer = GetSurface(depth_params, preserve_contents);
-}
-
-Surface RasterizerCacheOpenGL::GetColorBufferSurface(std::size_t index, bool preserve_contents) {
- auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()};
- const auto& regs{gpu.regs};
-
- if (!gpu.dirty_flags.color_buffer[index]) {
- return current_color_buffers[index];
- }
- gpu.dirty_flags.color_buffer.reset(index);
-
- ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
-
- if (index >= regs.rt_control.count) {
- return current_color_buffers[index] = {};
- }
-
- if (regs.rt[index].Address() == 0 || regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
- return current_color_buffers[index] = {};
- }
-
- const SurfaceParams color_params{SurfaceParams::CreateForFramebuffer(index)};
-
- return current_color_buffers[index] = GetSurface(color_params, preserve_contents);
-}
-
-void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) {
- surface->LoadGLBuffer(temporal_memory);
- surface->UploadGLTexture(temporal_memory, read_framebuffer.handle, draw_framebuffer.handle);
- surface->MarkAsModified(false, *this);
- surface->MarkForReload(false);
-}
-
-Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) {
- if (!params.IsValid()) {
- return {};
- }
-
- // Look up surface in the cache based on address
- Surface surface{TryGet(params.host_ptr)};
- if (surface) {
- if (surface->GetSurfaceParams().IsCompatibleSurface(params)) {
- // Use the cached surface as-is unless it's not synced with memory
- if (surface->MustReload())
- LoadSurface(surface);
- return surface;
- } else if (preserve_contents) {
- // If surface parameters changed and we care about keeping the previous data, recreate
- // the surface from the old one
- Surface new_surface{RecreateSurface(surface, params)};
- Unregister(surface);
- Register(new_surface);
- if (new_surface->IsUploaded()) {
- RegisterReinterpretSurface(new_surface);
- }
- return new_surface;
- } else {
- // Delete the old surface before creating a new one to prevent collisions.
- Unregister(surface);
- }
- }
-
- // No cached surface found - get a new one
- surface = GetUncachedSurface(params);
- Register(surface);
-
- // Only load surface from memory if we care about the contents
- if (preserve_contents) {
- LoadSurface(surface);
- }
-
- return surface;
-}
-
-Surface RasterizerCacheOpenGL::GetUncachedSurface(const SurfaceParams& params) {
- Surface surface{TryGetReservedSurface(params)};
- if (!surface) {
- // No reserved surface available, create a new one and reserve it
- surface = std::make_shared<CachedSurface>(params);
- ReserveSurface(surface);
- }
- return surface;
-}
-
-void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface,
- const Surface& dst_surface) {
- const auto& init_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
- GPUVAddr address{init_params.gpu_addr};
- const std::size_t layer_size{dst_params.LayerMemorySize()};
- for (u32 layer = 0; layer < dst_params.depth; layer++) {
- for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) {
- const GPUVAddr sub_address{address + dst_params.GetMipmapLevelOffset(mipmap)};
- const Surface& copy{TryGet(memory_manager.GetPointer(sub_address))};
- if (!copy) {
- continue;
- }
- const auto& src_params{copy->GetSurfaceParams()};
- const u32 width{std::min(src_params.width, dst_params.MipWidth(mipmap))};
- const u32 height{std::min(src_params.height, dst_params.MipHeight(mipmap))};
-
- glCopyImageSubData(copy->Texture().handle, SurfaceTargetToGL(src_params.target), 0, 0,
- 0, 0, dst_surface->Texture().handle,
- SurfaceTargetToGL(dst_params.target), mipmap, 0, 0, layer, width,
- height, 1);
- }
- address += layer_size;
- }
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect, GLuint read_fb_handle,
- GLuint draw_fb_handle, GLenum src_attachment = 0, GLenum dst_attachment = 0,
- std::size_t cubemap_face = 0) {
-
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- OpenGLState prev_state{OpenGLState::GetCurState()};
- SCOPE_EXIT({ prev_state.Apply(); });
-
- OpenGLState state;
- state.draw.read_framebuffer = read_fb_handle;
- state.draw.draw_framebuffer = draw_fb_handle;
- state.Apply();
-
- u32 buffers{};
-
- if (src_params.type == SurfaceType::ColorTexture) {
- switch (src_params.target) {
- case SurfaceTarget::Texture2D:
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- case SurfaceTarget::TextureCubemap:
- glFramebufferTexture2D(
- GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face),
- src_surface->Texture().handle, 0);
- glFramebufferTexture2D(
- GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face), 0, 0);
- break;
- case SurfaceTarget::Texture2DArray:
- glFramebufferTextureLayer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- src_surface->Texture().handle, 0, 0);
- glFramebufferTextureLayer(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, 0, 0, 0);
- break;
- case SurfaceTarget::Texture3D:
- glFramebufferTexture3D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- SurfaceTargetToGL(src_params.target),
- src_surface->Texture().handle, 0, 0);
- glFramebufferTexture3D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- SurfaceTargetToGL(src_params.target), 0, 0, 0);
- break;
- default:
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- }
-
- switch (dst_params.target) {
- case SurfaceTarget::Texture2D:
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- case SurfaceTarget::TextureCubemap:
- glFramebufferTexture2D(
- GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face),
- dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(
- GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face), 0, 0);
- break;
- case SurfaceTarget::Texture2DArray:
- glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- dst_surface->Texture().handle, 0, 0);
- glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, 0, 0, 0);
- break;
-
- case SurfaceTarget::Texture3D:
- glFramebufferTexture3D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- SurfaceTargetToGL(dst_params.target),
- dst_surface->Texture().handle, 0, 0);
- glFramebufferTexture3D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- SurfaceTargetToGL(dst_params.target), 0, 0, 0);
- break;
- default:
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- }
-
- buffers = GL_COLOR_BUFFER_BIT;
- } else if (src_params.type == SurfaceType::Depth) {
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
- src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
-
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
- dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
-
- buffers = GL_DEPTH_BUFFER_BIT;
- } else if (src_params.type == SurfaceType::DepthStencil) {
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- src_surface->Texture().handle, 0);
-
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- dst_surface->Texture().handle, 0);
-
- buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
- }
-
- glBlitFramebuffer(src_rect.left, src_rect.top, src_rect.right, src_rect.bottom, dst_rect.left,
- dst_rect.top, dst_rect.right, dst_rect.bottom, buffers,
- buffers == GL_COLOR_BUFFER_BIT ? GL_LINEAR : GL_NEAREST);
-
- return true;
-}
-
-void RasterizerCacheOpenGL::FermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
- const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
- const Common::Rectangle<u32>& src_rect, const Common::Rectangle<u32>& dst_rect) {
-
- const auto& src_params = SurfaceParams::CreateForFermiCopySurface(src_config);
- const auto& dst_params = SurfaceParams::CreateForFermiCopySurface(dst_config);
-
- ASSERT(src_params.pixel_format == dst_params.pixel_format);
- ASSERT(src_params.block_height == dst_params.block_height);
- ASSERT(src_params.is_tiled == dst_params.is_tiled);
- ASSERT(src_params.depth == dst_params.depth);
- ASSERT(src_params.target == dst_params.target);
- ASSERT(src_params.rt.index == dst_params.rt.index);
-
- auto src_surface = GetSurface(src_params, true);
- auto dst_surface = GetSurface(dst_params, true);
-
- BlitSurface(src_surface, dst_surface, src_rect, dst_rect, read_framebuffer.handle,
- draw_framebuffer.handle);
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface,
- const Surface& dst_surface) {
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- // Flush enough memory for both the source and destination surface
- FlushRegion(ToCacheAddr(src_params.host_ptr),
- std::max(src_params.MemorySize(), dst_params.MemorySize()));
-
- LoadSurface(dst_surface);
-}
-
-Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface,
- const SurfaceParams& new_params) {
- // Verify surface is compatible for blitting
- auto old_params{old_surface->GetSurfaceParams()};
-
- // Get a new surface with the new parameters, and blit the previous surface to it
- Surface new_surface{GetUncachedSurface(new_params)};
-
- // With use_accurate_gpu_emulation enabled, do an accurate surface copy
- if (Settings::values.use_accurate_gpu_emulation) {
- AccurateCopySurface(old_surface, new_surface);
- return new_surface;
- }
-
- const bool old_compressed =
- GetFormatTuple(old_params.pixel_format, old_params.component_type).compressed;
- const bool new_compressed =
- GetFormatTuple(new_params.pixel_format, new_params.component_type).compressed;
- const bool compatible_formats =
- GetFormatBpp(old_params.pixel_format) == GetFormatBpp(new_params.pixel_format) &&
- !(old_compressed || new_compressed);
- // For compatible surfaces, we can just do fast glCopyImageSubData based copy
- if (old_params.target == new_params.target && old_params.depth == new_params.depth &&
- old_params.depth == 1 && compatible_formats) {
- FastCopySurface(old_surface, new_surface);
- return new_surface;
- }
-
- switch (new_params.target) {
- case SurfaceTarget::Texture2D:
- CopySurface(old_surface, new_surface, copy_pbo.handle);
- break;
- case SurfaceTarget::Texture3D:
- AccurateCopySurface(old_surface, new_surface);
- break;
- case SurfaceTarget::TextureCubemap:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- if (compatible_formats)
- FastLayeredCopySurface(old_surface, new_surface);
- else {
- AccurateCopySurface(old_surface, new_surface);
- }
- break;
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(new_params.target));
- UNREACHABLE();
- }
-
- return new_surface;
-}
-
-Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(const u8* host_ptr) const {
- return TryGet(host_ptr);
-}
-
-void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
- const auto& surface_reserve_key{SurfaceReserveKey::Create(surface->GetSurfaceParams())};
- surface_reserve[surface_reserve_key] = surface;
-}
-
-Surface RasterizerCacheOpenGL::TryGetReservedSurface(const SurfaceParams& params) {
- const auto& surface_reserve_key{SurfaceReserveKey::Create(params)};
- auto search{surface_reserve.find(surface_reserve_key)};
- if (search != surface_reserve.end()) {
- return search->second;
- }
- return {};
-}
-
-static std::optional<u32> TryFindBestMipMap(std::size_t memory, const SurfaceParams params,
- u32 height) {
- for (u32 i = 0; i < params.max_mip_level; i++) {
- if (memory == params.GetMipmapSingleSize(i) && params.MipHeight(i) == height) {
- return {i};
- }
- }
- return {};
-}
-
-static std::optional<u32> TryFindBestLayer(GPUVAddr addr, const SurfaceParams params, u32 mipmap) {
- const std::size_t size{params.LayerMemorySize()};
- GPUVAddr start{params.gpu_addr + params.GetMipmapLevelOffset(mipmap)};
- for (u32 i = 0; i < params.depth; i++) {
- if (start == addr) {
- return {i};
- }
- start += size;
- }
- return {};
-}
-
-static bool LayerFitReinterpretSurface(RasterizerCacheOpenGL& cache, const Surface render_surface,
- const Surface blitted_surface) {
- const auto& dst_params = blitted_surface->GetSurfaceParams();
- const auto& src_params = render_surface->GetSurfaceParams();
- const std::size_t src_memory_size = src_params.size_in_bytes;
- const std::optional<u32> level =
- TryFindBestMipMap(src_memory_size, dst_params, src_params.height);
- if (level.has_value()) {
- if (src_params.width == dst_params.MipWidthGobAligned(*level) &&
- src_params.height == dst_params.MipHeight(*level) &&
- src_params.block_height >= dst_params.MipBlockHeight(*level)) {
- const std::optional<u32> slot =
- TryFindBestLayer(render_surface->GetSurfaceParams().gpu_addr, dst_params, *level);
- if (slot.has_value()) {
- glCopyImageSubData(render_surface->Texture().handle,
- SurfaceTargetToGL(src_params.target), 0, 0, 0, 0,
- blitted_surface->Texture().handle,
- SurfaceTargetToGL(dst_params.target), *level, 0, 0, *slot,
- dst_params.MipWidth(*level), dst_params.MipHeight(*level), 1);
- blitted_surface->MarkAsModified(true, cache);
- return true;
- }
- }
- }
- return false;
-}
-
-static bool IsReinterpretInvalid(const Surface render_surface, const Surface blitted_surface) {
- const VAddr bound1 = blitted_surface->GetCpuAddr() + blitted_surface->GetMemorySize();
- const VAddr bound2 = render_surface->GetCpuAddr() + render_surface->GetMemorySize();
- if (bound2 > bound1)
- return true;
- const auto& dst_params = blitted_surface->GetSurfaceParams();
- const auto& src_params = render_surface->GetSurfaceParams();
- return (dst_params.component_type != src_params.component_type);
-}
-
-static bool IsReinterpretInvalidSecond(const Surface render_surface,
- const Surface blitted_surface) {
- const auto& dst_params = blitted_surface->GetSurfaceParams();
- const auto& src_params = render_surface->GetSurfaceParams();
- return (dst_params.height > src_params.height && dst_params.width > src_params.width);
-}
-
-bool RasterizerCacheOpenGL::PartialReinterpretSurface(Surface triggering_surface,
- Surface intersect) {
- if (IsReinterpretInvalid(triggering_surface, intersect)) {
- Unregister(intersect);
- return false;
- }
- if (!LayerFitReinterpretSurface(*this, triggering_surface, intersect)) {
- if (IsReinterpretInvalidSecond(triggering_surface, intersect)) {
- Unregister(intersect);
- return false;
- }
- FlushObject(intersect);
- FlushObject(triggering_surface);
- intersect->MarkForReload(true);
- }
- return true;
-}
-
-void RasterizerCacheOpenGL::SignalPreDrawCall() {
- if (texception && GLAD_GL_ARB_texture_barrier) {
- glTextureBarrier();
- }
- texception = false;
-}
-
-void RasterizerCacheOpenGL::SignalPostDrawCall() {
- for (u32 i = 0; i < Maxwell::NumRenderTargets; i++) {
- if (current_color_buffers[i] != nullptr) {
- Surface intersect =
- CollideOnReinterpretedSurface(current_color_buffers[i]->GetCacheAddr());
- if (intersect != nullptr) {
- PartialReinterpretSurface(current_color_buffers[i], intersect);
- texception = true;
- }
- }
- }
-}
-
-} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
deleted file mode 100644
index 6263ef3e7..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ /dev/null
@@ -1,572 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include "common/alignment.h"
-#include "common/bit_util.h"
-#include "common/common_types.h"
-#include "common/hash.h"
-#include "common/math_util.h"
-#include "video_core/engines/fermi_2d.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/rasterizer_cache.h"
-#include "video_core/renderer_opengl/gl_resource_manager.h"
-#include "video_core/renderer_opengl/gl_shader_gen.h"
-#include "video_core/surface.h"
-#include "video_core/textures/decoders.h"
-#include "video_core/textures/texture.h"
-
-namespace OpenGL {
-
-class CachedSurface;
-using Surface = std::shared_ptr<CachedSurface>;
-using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, Common::Rectangle<u32>>;
-
-using SurfaceTarget = VideoCore::Surface::SurfaceTarget;
-using SurfaceType = VideoCore::Surface::SurfaceType;
-using PixelFormat = VideoCore::Surface::PixelFormat;
-using ComponentType = VideoCore::Surface::ComponentType;
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-
-struct SurfaceParams {
- enum class SurfaceClass {
- Uploaded,
- RenderTarget,
- DepthBuffer,
- Copy,
- };
-
- static std::string SurfaceTargetName(SurfaceTarget target) {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return "Texture1D";
- case SurfaceTarget::Texture2D:
- return "Texture2D";
- case SurfaceTarget::Texture3D:
- return "Texture3D";
- case SurfaceTarget::Texture1DArray:
- return "Texture1DArray";
- case SurfaceTarget::Texture2DArray:
- return "Texture2DArray";
- case SurfaceTarget::TextureCubemap:
- return "TextureCubemap";
- case SurfaceTarget::TextureCubeArray:
- return "TextureCubeArray";
- default:
- LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
- UNREACHABLE();
- return fmt::format("TextureUnknown({})", static_cast<u32>(target));
- }
- }
-
- u32 GetFormatBpp() const {
- return VideoCore::Surface::GetFormatBpp(pixel_format);
- }
-
- /// Returns the rectangle corresponding to this surface
- Common::Rectangle<u32> GetRect(u32 mip_level = 0) const;
-
- /// Returns the total size of this surface in bytes, adjusted for compression
- std::size_t SizeInBytesRaw(bool ignore_tiled = false) const {
- const u32 compression_factor{GetCompressionFactor(pixel_format)};
- const u32 bytes_per_pixel{GetBytesPerPixel(pixel_format)};
- const size_t uncompressed_size{
- Tegra::Texture::CalculateSize((ignore_tiled ? false : is_tiled), bytes_per_pixel, width,
- height, depth, block_height, block_depth)};
-
- // Divide by compression_factor^2, as height and width are factored by this
- return uncompressed_size / (compression_factor * compression_factor);
- }
-
- /// Returns the size of this surface as an OpenGL texture in bytes
- std::size_t SizeInBytesGL() const {
- return SizeInBytesRaw(true);
- }
-
- /// Returns the size of this surface as a cube face in bytes
- std::size_t SizeInBytesCubeFace() const {
- return size_in_bytes / 6;
- }
-
- /// Returns the size of this surface as an OpenGL cube face in bytes
- std::size_t SizeInBytesCubeFaceGL() const {
- return size_in_bytes_gl / 6;
- }
-
- /// Returns the exact size of memory occupied by the texture in VRAM, including mipmaps.
- std::size_t MemorySize() const {
- std::size_t size = InnerMemorySize(false, is_layered);
- if (is_layered)
- return size * depth;
- return size;
- }
-
- /// Returns true if the parameters constitute a valid rasterizer surface.
- bool IsValid() const {
- return gpu_addr && host_ptr && height && width;
- }
-
- /// Returns the exact size of the memory occupied by a layer in a texture in VRAM, including
- /// mipmaps.
- std::size_t LayerMemorySize() const {
- return InnerMemorySize(false, true);
- }
-
- /// Returns the size of a layer of this surface in OpenGL.
- std::size_t LayerSizeGL(u32 mip_level) const {
- return InnerMipmapMemorySize(mip_level, true, is_layered, false);
- }
-
- std::size_t GetMipmapSizeGL(u32 mip_level, bool ignore_compressed = true) const {
- std::size_t size = InnerMipmapMemorySize(mip_level, true, is_layered, ignore_compressed);
- if (is_layered)
- return size * depth;
- return size;
- }
-
- std::size_t GetMipmapLevelOffset(u32 mip_level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < mip_level; i++)
- offset += InnerMipmapMemorySize(i, false, is_layered);
- return offset;
- }
-
- std::size_t GetMipmapLevelOffsetGL(u32 mip_level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < mip_level; i++)
- offset += InnerMipmapMemorySize(i, true, is_layered);
- return offset;
- }
-
- std::size_t GetMipmapSingleSize(u32 mip_level) const {
- return InnerMipmapMemorySize(mip_level, false, is_layered);
- }
-
- u32 MipWidth(u32 mip_level) const {
- return std::max(1U, width >> mip_level);
- }
-
- u32 MipWidthGobAligned(u32 mip_level) const {
- return Common::AlignUp(std::max(1U, width >> mip_level), 64U * 8U / GetFormatBpp());
- }
-
- u32 MipHeight(u32 mip_level) const {
- return std::max(1U, height >> mip_level);
- }
-
- u32 MipDepth(u32 mip_level) const {
- return is_layered ? depth : std::max(1U, depth >> mip_level);
- }
-
- // Auto block resizing algorithm from:
- // https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
- u32 MipBlockHeight(u32 mip_level) const {
- if (mip_level == 0)
- return block_height;
- u32 alt_height = MipHeight(mip_level);
- u32 h = GetDefaultBlockHeight(pixel_format);
- u32 blocks_in_y = (alt_height + h - 1) / h;
- u32 bh = 16;
- while (bh > 1 && blocks_in_y <= bh * 4) {
- bh >>= 1;
- }
- return bh;
- }
-
- u32 MipBlockDepth(u32 mip_level) const {
- if (mip_level == 0) {
- return block_depth;
- }
-
- if (is_layered) {
- return 1;
- }
-
- const u32 mip_depth = MipDepth(mip_level);
- u32 bd = 32;
- while (bd > 1 && mip_depth * 2 <= bd) {
- bd >>= 1;
- }
-
- if (bd == 32) {
- const u32 bh = MipBlockHeight(mip_level);
- if (bh >= 4) {
- return 16;
- }
- }
-
- return bd;
- }
-
- u32 RowAlign(u32 mip_level) const {
- const u32 m_width = MipWidth(mip_level);
- const u32 bytes_per_pixel = GetBytesPerPixel(pixel_format);
- const u32 l2 = Common::CountTrailingZeroes32(m_width * bytes_per_pixel);
- return (1U << l2);
- }
-
- /// Creates SurfaceParams from a texture configuration
- static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config,
- const GLShader::SamplerEntry& entry);
-
- /// Creates SurfaceParams from a framebuffer configuration
- static SurfaceParams CreateForFramebuffer(std::size_t index);
-
- /// Creates SurfaceParams for a depth buffer configuration
- static SurfaceParams CreateForDepthBuffer(
- u32 zeta_width, u32 zeta_height, GPUVAddr zeta_address, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
-
- /// Creates SurfaceParams for a Fermi2D surface copy
- static SurfaceParams CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config);
-
- /// Checks if surfaces are compatible for caching
- bool IsCompatibleSurface(const SurfaceParams& other) const {
- if (std::tie(pixel_format, type, width, height, target, depth, is_tiled) ==
- std::tie(other.pixel_format, other.type, other.width, other.height, other.target,
- other.depth, other.is_tiled)) {
- if (!is_tiled)
- return true;
- return std::tie(block_height, block_depth, tile_width_spacing) ==
- std::tie(other.block_height, other.block_depth, other.tile_width_spacing);
- }
- return false;
- }
-
- /// Initializes parameters for caching, should be called after everything has been initialized
- void InitCacheParameters(GPUVAddr gpu_addr);
-
- std::string TargetName() const {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return "1D";
- case SurfaceTarget::Texture2D:
- return "2D";
- case SurfaceTarget::Texture3D:
- return "3D";
- case SurfaceTarget::Texture1DArray:
- return "1DArray";
- case SurfaceTarget::Texture2DArray:
- return "2DArray";
- case SurfaceTarget::TextureCubemap:
- return "Cube";
- default:
- LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
- UNREACHABLE();
- return fmt::format("TUK({})", static_cast<u32>(target));
- }
- }
-
- std::string ClassName() const {
- switch (identity) {
- case SurfaceClass::Uploaded:
- return "UP";
- case SurfaceClass::RenderTarget:
- return "RT";
- case SurfaceClass::DepthBuffer:
- return "DB";
- case SurfaceClass::Copy:
- return "CP";
- default:
- LOG_CRITICAL(HW_GPU, "Unimplemented surface_class={}", static_cast<u32>(identity));
- UNREACHABLE();
- return fmt::format("CUK({})", static_cast<u32>(identity));
- }
- }
-
- std::string IdentityString() const {
- return ClassName() + '_' + TargetName() + '_' + (is_tiled ? 'T' : 'L');
- }
-
- bool is_tiled;
- u32 block_width;
- u32 block_height;
- u32 block_depth;
- u32 tile_width_spacing;
- PixelFormat pixel_format;
- ComponentType component_type;
- SurfaceType type;
- u32 width;
- u32 height;
- u32 depth;
- u32 unaligned_height;
- u32 pitch;
- SurfaceTarget target;
- SurfaceClass identity;
- u32 max_mip_level;
- bool is_layered;
- bool is_array;
- bool srgb_conversion;
- // Parameters used for caching
- u8* host_ptr;
- GPUVAddr gpu_addr;
- std::size_t size_in_bytes;
- std::size_t size_in_bytes_gl;
-
- // Render target specific parameters, not used in caching
- struct {
- u32 index;
- u32 array_mode;
- u32 volume;
- u32 layer_stride;
- u32 base_layer;
- } rt;
-
-private:
- std::size_t InnerMipmapMemorySize(u32 mip_level, bool force_gl = false, bool layer_only = false,
- bool uncompressed = false) const;
- std::size_t InnerMemorySize(bool force_gl = false, bool layer_only = false,
- bool uncompressed = false) const;
-};
-
-}; // namespace OpenGL
-
-/// Hashable variation of SurfaceParams, used for a key in the surface cache
-struct SurfaceReserveKey : Common::HashableStruct<OpenGL::SurfaceParams> {
- static SurfaceReserveKey Create(const OpenGL::SurfaceParams& params) {
- SurfaceReserveKey res;
- res.state = params;
- res.state.identity = {}; // Ignore the origin of the texture
- res.state.gpu_addr = {}; // Ignore GPU vaddr in caching
- res.state.rt = {}; // Ignore rt config in caching
- return res;
- }
-};
-namespace std {
-template <>
-struct hash<SurfaceReserveKey> {
- std::size_t operator()(const SurfaceReserveKey& k) const {
- return k.Hash();
- }
-};
-} // namespace std
-
-namespace OpenGL {
-
-class RasterizerOpenGL;
-
-// This is used to store temporary big buffers,
-// instead of creating/destroying all the time
-struct RasterizerTemporaryMemory {
- std::vector<std::vector<u8>> gl_buffer;
-};
-
-class CachedSurface final : public RasterizerCacheObject {
-public:
- explicit CachedSurface(const SurfaceParams& params);
-
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
-
- std::size_t GetSizeInBytes() const override {
- return cached_size_in_bytes;
- }
-
- std::size_t GetMemorySize() const {
- return memory_size;
- }
-
- const OGLTexture& Texture() const {
- return texture;
- }
-
- const OGLTexture& Texture(bool as_array) {
- if (params.is_array == as_array) {
- return texture;
- } else {
- EnsureTextureDiscrepantView();
- return discrepant_view;
- }
- }
-
- GLenum Target() const {
- return gl_target;
- }
-
- const SurfaceParams& GetSurfaceParams() const {
- return params;
- }
-
- // Read/Write data in Switch memory to/from gl_buffer
- void LoadGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem);
- void FlushGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem);
-
- // Upload data in gl_buffer to this surface's texture
- void UploadGLTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, GLuint read_fb_handle,
- GLuint draw_fb_handle);
-
- void UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
- Tegra::Texture::SwizzleSource swizzle_y,
- Tegra::Texture::SwizzleSource swizzle_z,
- Tegra::Texture::SwizzleSource swizzle_w);
-
- void MarkReinterpreted() {
- reinterpreted = true;
- }
-
- bool IsReinterpreted() const {
- return reinterpreted;
- }
-
- void MarkForReload(bool reload) {
- must_reload = reload;
- }
-
- bool MustReload() const {
- return must_reload;
- }
-
- bool IsUploaded() const {
- return params.identity == SurfaceParams::SurfaceClass::Uploaded;
- }
-
-private:
- void UploadGLMipmapTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, u32 mip_map,
- GLuint read_fb_handle, GLuint draw_fb_handle);
-
- void EnsureTextureDiscrepantView();
-
- OGLTexture texture;
- OGLTexture discrepant_view;
- SurfaceParams params{};
- GLenum gl_target{};
- GLenum gl_internal_format{};
- std::size_t cached_size_in_bytes{};
- std::array<GLenum, 4> swizzle{GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA};
- std::size_t memory_size;
- bool reinterpreted = false;
- bool must_reload = false;
- VAddr cpu_addr{};
-};
-
-class RasterizerCacheOpenGL final : public RasterizerCache<Surface> {
-public:
- explicit RasterizerCacheOpenGL(RasterizerOpenGL& rasterizer);
-
- /// Get a surface based on the texture configuration
- Surface GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
- const GLShader::SamplerEntry& entry);
-
- /// Get the depth surface based on the framebuffer configuration
- Surface GetDepthBufferSurface(bool preserve_contents);
-
- /// Get the color surface based on the framebuffer configuration and the specified render target
- Surface GetColorBufferSurface(std::size_t index, bool preserve_contents);
-
- /// Tries to find a framebuffer using on the provided CPU address
- Surface TryFindFramebufferSurface(const u8* host_ptr) const;
-
- /// Copies the contents of one surface to another
- void FermiCopySurface(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
- const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect);
-
- void SignalPreDrawCall();
- void SignalPostDrawCall();
-
-protected:
- void FlushObjectInner(const Surface& object) override {
- object->FlushGLBuffer(temporal_memory);
- }
-
-private:
- void LoadSurface(const Surface& surface);
- Surface GetSurface(const SurfaceParams& params, bool preserve_contents = true);
-
- /// Gets an uncached surface, creating it if need be
- Surface GetUncachedSurface(const SurfaceParams& params);
-
- /// Recreates a surface with new parameters
- Surface RecreateSurface(const Surface& old_surface, const SurfaceParams& new_params);
-
- /// Reserves a unique surface that can be reused later
- void ReserveSurface(const Surface& surface);
-
- /// Tries to get a reserved surface for the specified parameters
- Surface TryGetReservedSurface(const SurfaceParams& params);
-
- // Partialy reinterpret a surface based on a triggering_surface that collides with it.
- // returns true if the reinterpret was successful, false in case it was not.
- bool PartialReinterpretSurface(Surface triggering_surface, Surface intersect);
-
- /// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data
- void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface);
- void FastLayeredCopySurface(const Surface& src_surface, const Surface& dst_surface);
- void FastCopySurface(const Surface& src_surface, const Surface& dst_surface);
- void CopySurface(const Surface& src_surface, const Surface& dst_surface,
- const GLuint copy_pbo_handle, const GLenum src_attachment = 0,
- const GLenum dst_attachment = 0, const std::size_t cubemap_face = 0);
-
- /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
- /// previously been used. This is to prevent surfaces from being constantly created and
- /// destroyed when used with different surface parameters.
- std::unordered_map<SurfaceReserveKey, Surface> surface_reserve;
-
- OGLFramebuffer read_framebuffer;
- OGLFramebuffer draw_framebuffer;
-
- bool texception = false;
-
- /// Use a Pixel Buffer Object to download the previous texture and then upload it to the new one
- /// using the new format.
- OGLBuffer copy_pbo;
-
- std::array<Surface, Maxwell::NumRenderTargets> last_color_buffers;
- std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers;
- Surface last_depth_buffer;
-
- RasterizerTemporaryMemory temporal_memory;
-
- using SurfaceIntervalCache = boost::icl::interval_map<CacheAddr, Surface>;
- using SurfaceInterval = typename SurfaceIntervalCache::interval_type;
-
- static auto GetReinterpretInterval(const Surface& object) {
- return SurfaceInterval::right_open(object->GetCacheAddr() + 1,
- object->GetCacheAddr() + object->GetMemorySize() - 1);
- }
-
- // Reinterpreted surfaces are very fragil as the game may keep rendering into them.
- SurfaceIntervalCache reinterpreted_surfaces;
-
- void RegisterReinterpretSurface(Surface reinterpret_surface) {
- auto interval = GetReinterpretInterval(reinterpret_surface);
- reinterpreted_surfaces.insert({interval, reinterpret_surface});
- reinterpret_surface->MarkReinterpreted();
- }
-
- Surface CollideOnReinterpretedSurface(CacheAddr addr) const {
- const SurfaceInterval interval{addr};
- for (auto& pair :
- boost::make_iterator_range(reinterpreted_surfaces.equal_range(interval))) {
- return pair.second;
- }
- return nullptr;
- }
-
- void Register(const Surface& object) override {
- RasterizerCache<Surface>::Register(object);
- }
-
- /// Unregisters an object from the cache
- void Unregister(const Surface& object) override {
- if (object->IsReinterpreted()) {
- auto interval = GetReinterpretInterval(object);
- reinterpreted_surfaces.erase(interval);
- }
- RasterizerCache<Surface>::Unregister(object);
- }
-};
-
-} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.cpp b/src/video_core/renderer_opengl/gl_resource_manager.cpp
index bfe666a73..5c96c1d46 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_resource_manager.cpp
@@ -33,6 +33,24 @@ void OGLTexture::Release() {
handle = 0;
}
+void OGLTextureView::Create() {
+ if (handle != 0)
+ return;
+
+ MICROPROFILE_SCOPE(OpenGL_ResourceCreation);
+ glGenTextures(1, &handle);
+}
+
+void OGLTextureView::Release() {
+ if (handle == 0)
+ return;
+
+ MICROPROFILE_SCOPE(OpenGL_ResourceDeletion);
+ glDeleteTextures(1, &handle);
+ OpenGLState::GetCurState().UnbindTexture(handle).Apply();
+ handle = 0;
+}
+
void OGLSampler::Create() {
if (handle != 0)
return;
@@ -130,6 +148,12 @@ void OGLBuffer::Release() {
handle = 0;
}
+void OGLBuffer::MakeStreamCopy(std::size_t buffer_size) {
+ ASSERT_OR_EXECUTE((handle != 0 && buffer_size != 0), { return; });
+
+ glNamedBufferData(handle, buffer_size, nullptr, GL_STREAM_COPY);
+}
+
void OGLSync::Create() {
if (handle != 0)
return;
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.h b/src/video_core/renderer_opengl/gl_resource_manager.h
index fbb93ee49..3a85a1d4c 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.h
+++ b/src/video_core/renderer_opengl/gl_resource_manager.h
@@ -36,6 +36,31 @@ public:
GLuint handle = 0;
};
+class OGLTextureView : private NonCopyable {
+public:
+ OGLTextureView() = default;
+
+ OGLTextureView(OGLTextureView&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
+
+ ~OGLTextureView() {
+ Release();
+ }
+
+ OGLTextureView& operator=(OGLTextureView&& o) noexcept {
+ Release();
+ handle = std::exchange(o.handle, 0);
+ return *this;
+ }
+
+ /// Creates a new internal OpenGL resource and stores the handle
+ void Create();
+
+ /// Deletes the internal OpenGL resource
+ void Release();
+
+ GLuint handle = 0;
+};
+
class OGLSampler : private NonCopyable {
public:
OGLSampler() = default;
@@ -161,6 +186,9 @@ public:
/// Deletes the internal OpenGL resource
void Release();
+ // Converts the buffer into a stream copy buffer with a fixed size
+ void MakeStreamCopy(std::size_t buffer_size);
+
GLuint handle = 0;
};
diff --git a/src/video_core/renderer_opengl/gl_sampler_cache.h b/src/video_core/renderer_opengl/gl_sampler_cache.h
index defbc2d81..34ee37f00 100644
--- a/src/video_core/renderer_opengl/gl_sampler_cache.h
+++ b/src/video_core/renderer_opengl/gl_sampler_cache.h
@@ -17,9 +17,9 @@ public:
~SamplerCacheOpenGL();
protected:
- OGLSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const;
+ OGLSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override;
- GLuint ToSamplerType(const OGLSampler& sampler) const;
+ GLuint ToSamplerType(const OGLSampler& sampler) const override;
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index ac8a9e6b7..0dbc4c02f 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -23,13 +23,13 @@ namespace OpenGL {
using VideoCommon::Shader::ProgramCode;
-// One UBO is always reserved for emulation values
-constexpr u32 RESERVED_UBOS = 1;
+// One UBO is always reserved for emulation values on staged shaders
+constexpr u32 STAGE_RESERVED_UBOS = 1;
struct UnspecializedShader {
std::string code;
GLShader::ShaderEntries entries;
- Maxwell::ShaderProgram program_type;
+ ProgramType program_type;
};
namespace {
@@ -55,15 +55,17 @@ ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr g
}
/// Gets the shader type from a Maxwell program type
-constexpr GLenum GetShaderType(Maxwell::ShaderProgram program_type) {
+constexpr GLenum GetShaderType(ProgramType program_type) {
switch (program_type) {
- case Maxwell::ShaderProgram::VertexA:
- case Maxwell::ShaderProgram::VertexB:
+ case ProgramType::VertexA:
+ case ProgramType::VertexB:
return GL_VERTEX_SHADER;
- case Maxwell::ShaderProgram::Geometry:
+ case ProgramType::Geometry:
return GL_GEOMETRY_SHADER;
- case Maxwell::ShaderProgram::Fragment:
+ case ProgramType::Fragment:
return GL_FRAGMENT_SHADER;
+ case ProgramType::Compute:
+ return GL_COMPUTE_SHADER;
default:
return GL_NONE;
}
@@ -100,18 +102,44 @@ constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLen
}
}
+ProgramType GetProgramType(Maxwell::ShaderProgram program) {
+ switch (program) {
+ case Maxwell::ShaderProgram::VertexA:
+ return ProgramType::VertexA;
+ case Maxwell::ShaderProgram::VertexB:
+ return ProgramType::VertexB;
+ case Maxwell::ShaderProgram::TesselationControl:
+ return ProgramType::TessellationControl;
+ case Maxwell::ShaderProgram::TesselationEval:
+ return ProgramType::TessellationEval;
+ case Maxwell::ShaderProgram::Geometry:
+ return ProgramType::Geometry;
+ case Maxwell::ShaderProgram::Fragment:
+ return ProgramType::Fragment;
+ }
+ UNREACHABLE();
+ return {};
+}
+
/// Calculates the size of a program stream
std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
constexpr std::size_t start_offset = 10;
+ // This is the encoded version of BRA that jumps to itself. All Nvidia
+ // shaders end with one.
+ constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
+ constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
std::size_t offset = start_offset;
std::size_t size = start_offset * sizeof(u64);
while (offset < program.size()) {
const u64 instruction = program[offset];
if (!IsSchedInstruction(offset, start_offset)) {
- if (instruction == 0 || (instruction >> 52) == 0x50b) {
+ if ((instruction & mask) == self_jumping_branch) {
// End on Maxwell's "nop" instruction
break;
}
+ if (instruction == 0) {
+ break;
+ }
}
size += sizeof(u64);
offset++;
@@ -121,11 +149,13 @@ std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
}
/// Hashes one (or two) program streams
-u64 GetUniqueIdentifier(Maxwell::ShaderProgram program_type, const ProgramCode& code,
- const ProgramCode& code_b) {
- u64 unique_identifier =
- Common::CityHash64(reinterpret_cast<const char*>(code.data()), CalculateProgramSize(code));
- if (program_type != Maxwell::ShaderProgram::VertexA) {
+u64 GetUniqueIdentifier(ProgramType program_type, const ProgramCode& code,
+ const ProgramCode& code_b, std::size_t size_a = 0, std::size_t size_b = 0) {
+ if (size_a == 0) {
+ size_a = CalculateProgramSize(code);
+ }
+ u64 unique_identifier = Common::CityHash64(reinterpret_cast<const char*>(code.data()), size_a);
+ if (program_type != ProgramType::VertexA) {
return unique_identifier;
}
// VertexA programs include two programs
@@ -133,46 +163,70 @@ u64 GetUniqueIdentifier(Maxwell::ShaderProgram program_type, const ProgramCode&
std::size_t seed = 0;
boost::hash_combine(seed, unique_identifier);
- const u64 identifier_b = Common::CityHash64(reinterpret_cast<const char*>(code_b.data()),
- CalculateProgramSize(code_b));
+ if (size_b == 0) {
+ size_b = CalculateProgramSize(code_b);
+ }
+ const u64 identifier_b =
+ Common::CityHash64(reinterpret_cast<const char*>(code_b.data()), size_b);
boost::hash_combine(seed, identifier_b);
return static_cast<u64>(seed);
}
/// Creates an unspecialized program from code streams
-GLShader::ProgramResult CreateProgram(const Device& device, Maxwell::ShaderProgram program_type,
+GLShader::ProgramResult CreateProgram(const Device& device, ProgramType program_type,
ProgramCode program_code, ProgramCode program_code_b) {
GLShader::ShaderSetup setup(program_code);
- if (program_type == Maxwell::ShaderProgram::VertexA) {
+ setup.program.size_a = CalculateProgramSize(program_code);
+ setup.program.size_b = 0;
+ if (program_type == ProgramType::VertexA) {
// VertexB is always enabled, so when VertexA is enabled, we have two vertex shaders.
// Conventional HW does not support this, so we combine VertexA and VertexB into one
// stage here.
setup.SetProgramB(program_code_b);
+ setup.program.size_b = CalculateProgramSize(program_code_b);
}
- setup.program.unique_identifier =
- GetUniqueIdentifier(program_type, program_code, program_code_b);
+ setup.program.unique_identifier = GetUniqueIdentifier(
+ program_type, program_code, program_code_b, setup.program.size_a, setup.program.size_b);
switch (program_type) {
- case Maxwell::ShaderProgram::VertexA:
- case Maxwell::ShaderProgram::VertexB:
+ case ProgramType::VertexA:
+ case ProgramType::VertexB:
return GLShader::GenerateVertexShader(device, setup);
- case Maxwell::ShaderProgram::Geometry:
+ case ProgramType::Geometry:
return GLShader::GenerateGeometryShader(device, setup);
- case Maxwell::ShaderProgram::Fragment:
+ case ProgramType::Fragment:
return GLShader::GenerateFragmentShader(device, setup);
+ case ProgramType::Compute:
+ return GLShader::GenerateComputeShader(device, setup);
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented program_type={}", static_cast<u32>(program_type));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented program_type={}", static_cast<u32>(program_type));
return {};
}
}
CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEntries& entries,
- Maxwell::ShaderProgram program_type, BaseBindings base_bindings,
- GLenum primitive_mode, bool hint_retrievable = false) {
+ ProgramType program_type, const ProgramVariant& variant,
+ bool hint_retrievable = false) {
+ auto base_bindings{variant.base_bindings};
+ const auto primitive_mode{variant.primitive_mode};
+ const auto texture_buffer_usage{variant.texture_buffer_usage};
+
std::string source = "#version 430 core\n"
- "#extension GL_ARB_separate_shader_objects : enable\n\n";
- source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++);
+ "#extension GL_ARB_separate_shader_objects : enable\n"
+ "#extension GL_NV_gpu_shader5 : enable\n"
+ "#extension GL_NV_shader_thread_group : enable\n"
+ "#extension GL_NV_shader_thread_shuffle : enable\n";
+ if (entries.shader_viewport_layer_array) {
+ source += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
+ }
+ if (program_type == ProgramType::Compute) {
+ source += "#extension GL_ARB_compute_variable_group_size : require\n";
+ }
+ source += '\n';
+
+ if (program_type != ProgramType::Compute) {
+ source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++);
+ }
for (const auto& cbuf : entries.const_buffers) {
source +=
@@ -186,15 +240,34 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn
source += fmt::format("#define SAMPLER_BINDING_{} {}\n", sampler.GetIndex(),
base_bindings.sampler++);
}
+ for (const auto& image : entries.images) {
+ source +=
+ fmt::format("#define IMAGE_BINDING_{} {}\n", image.GetIndex(), base_bindings.image++);
+ }
+
+ // Transform 1D textures to texture samplers by declaring its preprocessor macros.
+ for (std::size_t i = 0; i < texture_buffer_usage.size(); ++i) {
+ if (!texture_buffer_usage.test(i)) {
+ continue;
+ }
+ source += fmt::format("#define SAMPLER_{}_IS_BUFFER\n", i);
+ }
+ if (texture_buffer_usage.any()) {
+ source += '\n';
+ }
- if (program_type == Maxwell::ShaderProgram::Geometry) {
+ if (program_type == ProgramType::Geometry) {
const auto [glsl_topology, debug_name, max_vertices] =
GetPrimitiveDescription(primitive_mode);
- source += "layout (" + std::string(glsl_topology) + ") in;\n";
+ source += "layout (" + std::string(glsl_topology) + ") in;\n\n";
source += "#define MAX_VERTEX_INPUT " + std::to_string(max_vertices) + '\n';
}
+ if (program_type == ProgramType::Compute) {
+ source += "layout (local_size_variable) in;\n";
+ }
+ source += '\n';
source += code;
OGLShader shader;
@@ -221,131 +294,97 @@ std::set<GLenum> GetSupportedFormats() {
} // Anonymous namespace
-CachedShader::CachedShader(const Device& device, VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr)
- : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr},
- unique_identifier{unique_identifier}, program_type{program_type}, disk_cache{disk_cache},
- precompiled_programs{precompiled_programs} {
- const std::size_t code_size{CalculateProgramSize(program_code)};
- const std::size_t code_size_b{program_code_b.empty() ? 0
- : CalculateProgramSize(program_code_b)};
- GLShader::ProgramResult program_result{
- CreateProgram(device, program_type, program_code, program_code_b)};
- if (program_result.first.empty()) {
+CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_type,
+ GLShader::ProgramResult result)
+ : RasterizerCacheObject{params.host_ptr}, cpu_addr{params.cpu_addr},
+ unique_identifier{params.unique_identifier}, program_type{program_type},
+ disk_cache{params.disk_cache}, precompiled_programs{params.precompiled_programs},
+ entries{result.second}, code{std::move(result.first)}, shader_length{entries.shader_length} {}
+
+Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ ProgramCode&& program_code,
+ ProgramCode&& program_code_b) {
+ const auto code_size{CalculateProgramSize(program_code)};
+ const auto code_size_b{CalculateProgramSize(program_code_b)};
+ auto result{
+ CreateProgram(params.device, GetProgramType(program_type), program_code, program_code_b)};
+ if (result.first.empty()) {
// TODO(Rodrigo): Unimplemented shader stages hit here, avoid using these for now
- return;
+ return {};
}
- code = program_result.first;
- entries = program_result.second;
- shader_length = entries.shader_length;
+ params.disk_cache.SaveRaw(ShaderDiskCacheRaw(
+ params.unique_identifier, GetProgramType(program_type),
+ static_cast<u32>(code_size / sizeof(u64)), static_cast<u32>(code_size_b / sizeof(u64)),
+ std::move(program_code), std::move(program_code_b)));
- const ShaderDiskCacheRaw raw(unique_identifier, program_type,
- static_cast<u32>(code_size / sizeof(u64)),
- static_cast<u32>(code_size_b / sizeof(u64)),
- std::move(program_code), std::move(program_code_b));
- disk_cache.SaveRaw(raw);
+ return std::shared_ptr<CachedShader>(
+ new CachedShader(params, GetProgramType(program_type), std::move(result)));
}
-CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- GLShader::ProgramResult result, u8* host_ptr)
- : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier},
- program_type{program_type}, disk_cache{disk_cache}, precompiled_programs{
- precompiled_programs} {
- code = std::move(result.first);
- entries = result.second;
- shader_length = entries.shader_length;
+Shader CachedShader::CreateStageFromCache(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ GLShader::ProgramResult result) {
+ return std::shared_ptr<CachedShader>(
+ new CachedShader(params, GetProgramType(program_type), std::move(result)));
}
-std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive_mode,
- BaseBindings base_bindings) {
- GLuint handle{};
- if (program_type == Maxwell::ShaderProgram::Geometry) {
- handle = GetGeometryShader(primitive_mode, base_bindings);
- } else {
- const auto [entry, is_cache_miss] = programs.try_emplace(base_bindings);
- auto& program = entry->second;
- if (is_cache_miss) {
- program = TryLoadProgram(primitive_mode, base_bindings);
- if (!program) {
- program =
- SpecializeShader(code, entries, program_type, base_bindings, primitive_mode);
- disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings));
- }
-
- LabelGLObject(GL_PROGRAM, program->handle, cpu_addr);
- }
+Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code) {
+ auto result{CreateProgram(params.device, ProgramType::Compute, code, {})};
- handle = program->handle;
- }
+ const auto code_size{CalculateProgramSize(code)};
+ params.disk_cache.SaveRaw(ShaderDiskCacheRaw(params.unique_identifier, ProgramType::Compute,
+ static_cast<u32>(code_size / sizeof(u64)), 0,
+ std::move(code), {}));
- base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size()) + RESERVED_UBOS;
- base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size());
- base_bindings.sampler += static_cast<u32>(entries.samplers.size());
+ return std::shared_ptr<CachedShader>(
+ new CachedShader(params, ProgramType::Compute, std::move(result)));
+}
- return {handle, base_bindings};
+Shader CachedShader::CreateKernelFromCache(const ShaderParameters& params,
+ GLShader::ProgramResult result) {
+ return std::shared_ptr<CachedShader>(
+ new CachedShader(params, ProgramType::Compute, std::move(result)));
}
-GLuint CachedShader::GetGeometryShader(GLenum primitive_mode, BaseBindings base_bindings) {
- const auto [entry, is_cache_miss] = geometry_programs.try_emplace(base_bindings);
- auto& programs = entry->second;
+std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVariant& variant) {
+ const auto [entry, is_cache_miss] = programs.try_emplace(variant);
+ auto& program = entry->second;
+ if (is_cache_miss) {
+ program = TryLoadProgram(variant);
+ if (!program) {
+ program = SpecializeShader(code, entries, program_type, variant);
+ disk_cache.SaveUsage(GetUsage(variant));
+ }
- switch (primitive_mode) {
- case GL_POINTS:
- return LazyGeometryProgram(programs.points, base_bindings, primitive_mode);
- case GL_LINES:
- case GL_LINE_STRIP:
- return LazyGeometryProgram(programs.lines, base_bindings, primitive_mode);
- case GL_LINES_ADJACENCY:
- case GL_LINE_STRIP_ADJACENCY:
- return LazyGeometryProgram(programs.lines_adjacency, base_bindings, primitive_mode);
- case GL_TRIANGLES:
- case GL_TRIANGLE_STRIP:
- case GL_TRIANGLE_FAN:
- return LazyGeometryProgram(programs.triangles, base_bindings, primitive_mode);
- case GL_TRIANGLES_ADJACENCY:
- case GL_TRIANGLE_STRIP_ADJACENCY:
- return LazyGeometryProgram(programs.triangles_adjacency, base_bindings, primitive_mode);
- default:
- UNREACHABLE_MSG("Unknown primitive mode.");
- return LazyGeometryProgram(programs.points, base_bindings, primitive_mode);
+ LabelGLObject(GL_PROGRAM, program->handle, cpu_addr);
}
-}
-GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program, BaseBindings base_bindings,
- GLenum primitive_mode) {
- if (target_program) {
- return target_program->handle;
- }
- const auto [glsl_name, debug_name, vertices] = GetPrimitiveDescription(primitive_mode);
- target_program = TryLoadProgram(primitive_mode, base_bindings);
- if (!target_program) {
- target_program =
- SpecializeShader(code, entries, program_type, base_bindings, primitive_mode);
- disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings));
+ auto base_bindings = variant.base_bindings;
+ base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size());
+ if (program_type != ProgramType::Compute) {
+ base_bindings.cbuf += STAGE_RESERVED_UBOS;
}
+ base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size());
+ base_bindings.sampler += static_cast<u32>(entries.samplers.size());
- LabelGLObject(GL_PROGRAM, target_program->handle, cpu_addr, debug_name);
-
- return target_program->handle;
-};
+ return {program->handle, base_bindings};
+}
-CachedProgram CachedShader::TryLoadProgram(GLenum primitive_mode,
- BaseBindings base_bindings) const {
- const auto found = precompiled_programs.find(GetUsage(primitive_mode, base_bindings));
+CachedProgram CachedShader::TryLoadProgram(const ProgramVariant& variant) const {
+ const auto found = precompiled_programs.find(GetUsage(variant));
if (found == precompiled_programs.end()) {
return {};
}
return found->second;
}
-ShaderDiskCacheUsage CachedShader::GetUsage(GLenum primitive_mode,
- BaseBindings base_bindings) const {
- return {unique_identifier, base_bindings, primitive_mode};
+ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant) const {
+ ShaderDiskCacheUsage usage;
+ usage.unique_identifier = unique_identifier;
+ usage.variant = variant;
+ return usage;
}
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
@@ -411,8 +450,7 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
}
if (!shader) {
shader = SpecializeShader(unspecialized.code, unspecialized.entries,
- unspecialized.program_type, usage.bindings,
- usage.primitive, true);
+ unspecialized.program_type, usage.variant, true);
}
std::scoped_lock lock(mutex);
@@ -547,7 +585,7 @@ std::unordered_map<u64, UnspecializedShader> ShaderCacheOpenGL::GenerateUnspecia
}
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
- if (!system.GPU().Maxwell3D().dirty_flags.shaders) {
+ if (!system.GPU().Maxwell3D().dirty.shaders) {
return last_shaders[static_cast<std::size_t>(program)];
}
@@ -564,28 +602,55 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
// No shader found - create a new one
ProgramCode program_code{GetShaderCode(memory_manager, program_addr, host_ptr)};
ProgramCode program_code_b;
- if (program == Maxwell::ShaderProgram::VertexA) {
+ const bool is_program_a{program == Maxwell::ShaderProgram::VertexA};
+ if (is_program_a) {
const GPUVAddr program_addr_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
program_code_b = GetShaderCode(memory_manager, program_addr_b,
memory_manager.GetPointer(program_addr_b));
}
- const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b);
- const VAddr cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
+ const auto unique_identifier =
+ GetUniqueIdentifier(GetProgramType(program), program_code, program_code_b);
+ const auto cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
+ const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr,
+ host_ptr, unique_identifier};
+
const auto found = precompiled_shaders.find(unique_identifier);
- if (found != precompiled_shaders.end()) {
- // Create a shader from the cache
- shader = std::make_shared<CachedShader>(cpu_addr, unique_identifier, program, disk_cache,
- precompiled_programs, found->second, host_ptr);
+ if (found == precompiled_shaders.end()) {
+ shader = CachedShader::CreateStageFromMemory(params, program, std::move(program_code),
+ std::move(program_code_b));
} else {
- // Create a shader from guest memory
- shader = std::make_shared<CachedShader>(
- device, cpu_addr, unique_identifier, program, disk_cache, precompiled_programs,
- std::move(program_code), std::move(program_code_b), host_ptr);
+ shader = CachedShader::CreateStageFromCache(params, program, found->second);
}
Register(shader);
return last_shaders[static_cast<std::size_t>(program)] = shader;
}
+Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
+ auto& memory_manager{system.GPU().MemoryManager()};
+ const auto host_ptr{memory_manager.GetPointer(code_addr)};
+ auto kernel = TryGet(host_ptr);
+ if (kernel) {
+ return kernel;
+ }
+
+ // No kernel found - create a new one
+ auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
+ const auto unique_identifier{GetUniqueIdentifier(ProgramType::Compute, code, {})};
+ const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)};
+ const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr,
+ host_ptr, unique_identifier};
+
+ const auto found = precompiled_shaders.find(unique_identifier);
+ if (found == precompiled_shaders.end()) {
+ kernel = CachedShader::CreateKernelFromMemory(params, std::move(code));
+ } else {
+ kernel = CachedShader::CreateKernelFromCache(params, found->second);
+ }
+
+ Register(kernel);
+ return kernel;
+}
+
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index 09bd0761d..de195cc5d 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -6,6 +6,7 @@
#include <array>
#include <atomic>
+#include <bitset>
#include <memory>
#include <set>
#include <tuple>
@@ -41,17 +42,29 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using PrecompiledPrograms = std::unordered_map<ShaderDiskCacheUsage, CachedProgram>;
using PrecompiledShaders = std::unordered_map<u64, GLShader::ProgramResult>;
+struct ShaderParameters {
+ ShaderDiskCacheOpenGL& disk_cache;
+ const PrecompiledPrograms& precompiled_programs;
+ const Device& device;
+ VAddr cpu_addr;
+ u8* host_ptr;
+ u64 unique_identifier;
+};
+
class CachedShader final : public RasterizerCacheObject {
public:
- explicit CachedShader(const Device& device, VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr);
+ static Shader CreateStageFromMemory(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ ProgramCode&& program_code, ProgramCode&& program_code_b);
+
+ static Shader CreateStageFromCache(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ GLShader::ProgramResult result);
+
+ static Shader CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code);
- explicit CachedShader(VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- GLShader::ProgramResult result, u8* host_ptr);
+ static Shader CreateKernelFromCache(const ShaderParameters& params,
+ GLShader::ProgramResult result);
VAddr GetCpuAddr() const override {
return cpu_addr;
@@ -67,49 +80,27 @@ public:
}
/// Gets the GL program handle for the shader
- std::tuple<GLuint, BaseBindings> GetProgramHandle(GLenum primitive_mode,
- BaseBindings base_bindings);
+ std::tuple<GLuint, BaseBindings> GetProgramHandle(const ProgramVariant& variant);
private:
- // Geometry programs. These are needed because GLSL needs an input topology but it's not
- // declared by the hardware. Workaround this issue by generating a different shader per input
- // topology class.
- struct GeometryPrograms {
- CachedProgram points;
- CachedProgram lines;
- CachedProgram lines_adjacency;
- CachedProgram triangles;
- CachedProgram triangles_adjacency;
- };
+ explicit CachedShader(const ShaderParameters& params, ProgramType program_type,
+ GLShader::ProgramResult result);
- GLuint GetGeometryShader(GLenum primitive_mode, BaseBindings base_bindings);
+ CachedProgram TryLoadProgram(const ProgramVariant& variant) const;
- /// Generates a geometry shader or returns one that already exists.
- GLuint LazyGeometryProgram(CachedProgram& target_program, BaseBindings base_bindings,
- GLenum primitive_mode);
+ ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant) const;
- CachedProgram TryLoadProgram(GLenum primitive_mode, BaseBindings base_bindings) const;
-
- ShaderDiskCacheUsage GetUsage(GLenum primitive_mode, BaseBindings base_bindings) const;
-
- u8* host_ptr{};
VAddr cpu_addr{};
u64 unique_identifier{};
- Maxwell::ShaderProgram program_type{};
+ ProgramType program_type{};
ShaderDiskCacheOpenGL& disk_cache;
const PrecompiledPrograms& precompiled_programs;
- std::size_t shader_length{};
GLShader::ShaderEntries entries;
-
std::string code;
+ std::size_t shader_length{};
- std::unordered_map<BaseBindings, CachedProgram> programs;
- std::unordered_map<BaseBindings, GeometryPrograms> geometry_programs;
-
- std::unordered_map<u32, GLuint> cbuf_resource_cache;
- std::unordered_map<u32, GLuint> gmem_resource_cache;
- std::unordered_map<u32, GLint> uniform_cache;
+ std::unordered_map<ProgramVariant, CachedProgram> programs;
};
class ShaderCacheOpenGL final : public RasterizerCache<Shader> {
@@ -124,6 +115,9 @@ public:
/// Gets the current specified shader stage program
Shader GetStageProgram(Maxwell::ShaderProgram program);
+ /// Gets a compute kernel in the passed address
+ Shader GetComputeKernel(GPUVAddr code_addr);
+
protected:
// We do not have to flush this cache as things in it are never modified by us.
void FlushObjectInner(const Shader& object) override {}
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 739477cc9..76439e7ab 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -14,6 +14,7 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/logging/log.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
@@ -36,19 +37,18 @@ using namespace std::string_literals;
using namespace VideoCommon::Shader;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage;
using Operation = const OperationNode&;
-enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat };
+enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat };
struct TextureAoffi {};
using TextureArgument = std::pair<Type, Node>;
using TextureIR = std::variant<TextureAoffi, TextureArgument>;
constexpr u32 MAX_CONSTBUFFER_ELEMENTS =
- static_cast<u32>(RasterizerOpenGL::MaxConstbufferSize) / (4 * sizeof(float));
+ static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float));
-class ShaderWriter {
+class ShaderWriter final {
public:
void AddExpression(std::string_view text) {
DEBUG_ASSERT(scope >= 0);
@@ -93,9 +93,157 @@ private:
u32 temporary_index = 1;
};
+class Expression final {
+public:
+ Expression(std::string code, Type type) : code{std::move(code)}, type{type} {
+ ASSERT(type != Type::Void);
+ }
+ Expression() : type{Type::Void} {}
+
+ Type GetType() const {
+ return type;
+ }
+
+ std::string GetCode() const {
+ return code;
+ }
+
+ void CheckVoid() const {
+ ASSERT(type == Type::Void);
+ }
+
+ std::string As(Type type) const {
+ switch (type) {
+ case Type::Bool:
+ return AsBool();
+ case Type::Bool2:
+ return AsBool2();
+ case Type::Float:
+ return AsFloat();
+ case Type::Int:
+ return AsInt();
+ case Type::Uint:
+ return AsUint();
+ case Type::HalfFloat:
+ return AsHalfFloat();
+ default:
+ UNREACHABLE_MSG("Invalid type");
+ return code;
+ }
+ }
+
+ std::string AsBool() const {
+ switch (type) {
+ case Type::Bool:
+ return code;
+ default:
+ UNREACHABLE_MSG("Incompatible types");
+ return code;
+ }
+ }
+
+ std::string AsBool2() const {
+ switch (type) {
+ case Type::Bool2:
+ return code;
+ default:
+ UNREACHABLE_MSG("Incompatible types");
+ return code;
+ }
+ }
+
+ std::string AsFloat() const {
+ switch (type) {
+ case Type::Float:
+ return code;
+ case Type::Uint:
+ return fmt::format("utof({})", code);
+ case Type::Int:
+ return fmt::format("itof({})", code);
+ case Type::HalfFloat:
+ return fmt::format("utof(packHalf2x16({}))", code);
+ default:
+ UNREACHABLE_MSG("Incompatible types");
+ return code;
+ }
+ }
+
+ std::string AsInt() const {
+ switch (type) {
+ case Type::Float:
+ return fmt::format("ftoi({})", code);
+ case Type::Uint:
+ return fmt::format("int({})", code);
+ case Type::Int:
+ return code;
+ case Type::HalfFloat:
+ return fmt::format("int(packHalf2x16({}))", code);
+ default:
+ UNREACHABLE_MSG("Incompatible types");
+ return code;
+ }
+ }
+
+ std::string AsUint() const {
+ switch (type) {
+ case Type::Float:
+ return fmt::format("ftou({})", code);
+ case Type::Uint:
+ return code;
+ case Type::Int:
+ return fmt::format("uint({})", code);
+ case Type::HalfFloat:
+ return fmt::format("packHalf2x16({})", code);
+ default:
+ UNREACHABLE_MSG("Incompatible types");
+ return code;
+ }
+ }
+
+ std::string AsHalfFloat() const {
+ switch (type) {
+ case Type::Float:
+ return fmt::format("unpackHalf2x16(ftou({}))", code);
+ case Type::Uint:
+ return fmt::format("unpackHalf2x16({})", code);
+ case Type::Int:
+ return fmt::format("unpackHalf2x16(int({}))", code);
+ case Type::HalfFloat:
+ return code;
+ default:
+ UNREACHABLE_MSG("Incompatible types");
+ return code;
+ }
+ }
+
+private:
+ std::string code;
+ Type type{};
+};
+
+constexpr const char* GetTypeString(Type type) {
+ switch (type) {
+ case Type::Bool:
+ return "bool";
+ case Type::Bool2:
+ return "bvec2";
+ case Type::Float:
+ return "float";
+ case Type::Int:
+ return "int";
+ case Type::Uint:
+ return "uint";
+ case Type::HalfFloat:
+ return "vec2";
+ default:
+ UNREACHABLE_MSG("Invalid type");
+ return "<invalid type>";
+ }
+}
+
/// Generates code to use for a swizzle operation.
constexpr const char* GetSwizzle(u32 element) {
- constexpr std::array<const char*, 4> swizzle = {".x", ".y", ".z", ".w"};
+ constexpr std::array swizzle = {".x", ".y", ".z", ".w"};
return swizzle.at(element);
}
@@ -134,8 +282,8 @@ constexpr bool IsGenericAttribute(Attribute::Index index) {
return index >= Attribute::Index::Attribute_0 && index <= Attribute::Index::Attribute_31;
}
-constexpr Attribute::Index ToGenericAttribute(u32 value) {
- return static_cast<Attribute::Index>(value + static_cast<u32>(Attribute::Index::Attribute_0));
+constexpr Attribute::Index ToGenericAttribute(u64 value) {
+ return static_cast<Attribute::Index>(value + static_cast<u64>(Attribute::Index::Attribute_0));
}
u32 GetGenericAttributeIndex(Attribute::Index index) {
@@ -143,9 +291,31 @@ u32 GetGenericAttributeIndex(Attribute::Index index) {
return static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0);
}
+constexpr const char* GetFlowStackPrefix(MetaStackClass stack) {
+ switch (stack) {
+ case MetaStackClass::Ssy:
+ return "ssy";
+ case MetaStackClass::Pbk:
+ return "pbk";
+ }
+ return {};
+}
+
+std::string FlowStackName(MetaStackClass stack) {
+ return fmt::format("{}_flow_stack", GetFlowStackPrefix(stack));
+}
+
+std::string FlowStackTopName(MetaStackClass stack) {
+ return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack));
+}
+
+constexpr bool IsVertexShader(ProgramType stage) {
+ return stage == ProgramType::VertexA || stage == ProgramType::VertexB;
+}
+
class GLSLDecompiler final {
public:
- explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ShaderStage stage,
+ explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ProgramType stage,
std::string suffix)
: device{device}, ir{ir}, stage{stage}, suffix{suffix}, header{ir.GetHeader()} {}
@@ -155,6 +325,7 @@ public:
DeclareRegisters();
DeclarePredicates();
DeclareLocalMemory();
+ DeclareSharedMemory();
DeclareInternalFlags();
DeclareInputAttributes();
DeclareOutputAttributes();
@@ -162,19 +333,24 @@ public:
DeclareGlobalMemory();
DeclareSamplers();
DeclarePhysicalAttributeReader();
+ DeclareImages();
code.AddLine("void execute_{}() {{", suffix);
++code.scope;
// VM's program counter
const auto first_address = ir.GetBasicBlocks().begin()->first;
- code.AddLine("uint jmp_to = {}u;", first_address);
+ code.AddLine("uint jmp_to = {}U;", first_address);
// TODO(Subv): Figure out the actual depth of the flow stack, for now it seems
// unlikely that shaders will use 20 nested SSYs and PBKs.
- constexpr u32 FLOW_STACK_SIZE = 20;
- code.AddLine("uint flow_stack[{}];", FLOW_STACK_SIZE);
- code.AddLine("uint flow_stack_top = 0u;");
+ if (!ir.IsFlowStackDisabled()) {
+ constexpr u32 FLOW_STACK_SIZE = 20;
+ for (const auto stack : std::array{MetaStackClass::Ssy, MetaStackClass::Pbk}) {
+ code.AddLine("uint {}[{}];", FlowStackName(stack), FLOW_STACK_SIZE);
+ code.AddLine("uint {} = 0U;", FlowStackTopName(stack));
+ }
+ }
code.AddLine("while (true) {{");
++code.scope;
@@ -183,7 +359,7 @@ public:
for (const auto& pair : ir.GetBasicBlocks()) {
const auto [address, bb] = pair;
- code.AddLine("case 0x{:x}u: {{", address);
+ code.AddLine("case 0x{:X}U: {{", address);
++code.scope;
VisitBlock(bb);
@@ -214,30 +390,30 @@ public:
for (const auto& sampler : ir.GetSamplers()) {
entries.samplers.emplace_back(sampler);
}
- for (const auto& gmem_pair : ir.GetGlobalMemory()) {
- const auto& [base, usage] = gmem_pair;
+ for (const auto& [offset, image] : ir.GetImages()) {
+ entries.images.emplace_back(image);
+ }
+ for (const auto& [base, usage] : ir.GetGlobalMemory()) {
entries.global_memory_entries.emplace_back(base.cbuf_index, base.cbuf_offset,
usage.is_read, usage.is_written);
}
entries.clip_distances = ir.GetClipDistances();
+ entries.shader_viewport_layer_array =
+ IsVertexShader(stage) && (ir.UsesLayer() || ir.UsesViewportIndex());
entries.shader_length = ir.GetLength();
return entries;
}
private:
- using OperationDecompilerFn = std::string (GLSLDecompiler::*)(Operation);
- using OperationDecompilersArray =
- std::array<OperationDecompilerFn, static_cast<std::size_t>(OperationCode::Amount)>;
-
void DeclareVertex() {
- if (stage != ShaderStage::Vertex)
+ if (!IsVertexShader(stage))
return;
DeclareVertexRedeclarations();
}
void DeclareGeometry() {
- if (stage != ShaderStage::Geometry) {
+ if (stage != ProgramType::Geometry) {
return;
}
@@ -256,21 +432,34 @@ private:
}
void DeclareVertexRedeclarations() {
- bool clip_distances_declared = false;
-
code.AddLine("out gl_PerVertex {{");
++code.scope;
code.AddLine("vec4 gl_Position;");
- for (const auto o : ir.GetOutputAttributes()) {
- if (o == Attribute::Index::PointSize)
- code.AddLine("float gl_PointSize;");
- if (!clip_distances_declared && (o == Attribute::Index::ClipDistances0123 ||
- o == Attribute::Index::ClipDistances4567)) {
+ for (const auto attribute : ir.GetOutputAttributes()) {
+ if (attribute == Attribute::Index::ClipDistances0123 ||
+ attribute == Attribute::Index::ClipDistances4567) {
code.AddLine("float gl_ClipDistance[];");
- clip_distances_declared = true;
+ break;
+ }
+ }
+ if (!IsVertexShader(stage) || device.HasVertexViewportLayer()) {
+ if (ir.UsesLayer()) {
+ code.AddLine("int gl_Layer;");
+ }
+ if (ir.UsesViewportIndex()) {
+ code.AddLine("int gl_ViewportIndex;");
}
+ } else if ((ir.UsesLayer() || ir.UsesViewportIndex()) && IsVertexShader(stage) &&
+ !device.HasVertexViewportLayer()) {
+ LOG_ERROR(
+ Render_OpenGL,
+ "GL_ARB_shader_viewport_layer_array is not available and its required by a shader");
+ }
+
+ if (ir.UsesPointSize()) {
+ code.AddLine("float gl_PointSize;");
}
--code.scope;
@@ -281,7 +470,7 @@ private:
void DeclareRegisters() {
const auto& registers = ir.GetRegisters();
for (const u32 gpr : registers) {
- code.AddLine("float {} = 0;", GetRegister(gpr));
+ code.AddLine("float {} = 0.0f;", GetRegister(gpr));
}
if (!registers.empty()) {
code.AddNewLine();
@@ -299,11 +488,23 @@ private:
}
void DeclareLocalMemory() {
- if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) {
- const auto element_count = Common::AlignUp(local_memory_size, 4) / 4;
- code.AddLine("float {}[{}];", GetLocalMemory(), element_count);
- code.AddNewLine();
+ // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at
+ // specialization time.
+ const u64 local_memory_size =
+ stage == ProgramType::Compute ? 0x400 : header.GetLocalMemorySize();
+ if (local_memory_size == 0) {
+ return;
}
+ const auto element_count = Common::AlignUp(local_memory_size, 4) / 4;
+ code.AddLine("uint {}[{}];", GetLocalMemory(), element_count);
+ code.AddNewLine();
+ }
+
+ void DeclareSharedMemory() {
+ if (stage != ProgramType::Compute) {
+ return;
+ }
+ code.AddLine("shared uint {}[];", GetSharedMemory());
}
void DeclareInternalFlags() {
@@ -325,8 +526,6 @@ private:
return "noperspective ";
default:
case AttributeUse::Unused:
- UNREACHABLE_MSG("Unused attribute being fetched");
- return {};
UNIMPLEMENTED_MSG("Unknown attribute usage index={}", static_cast<u32>(attribute));
return {};
}
@@ -357,12 +556,12 @@ private:
const u32 location{GetGenericAttributeIndex(index)};
std::string name{GetInputAttribute(index)};
- if (stage == ShaderStage::Geometry) {
+ if (stage == ProgramType::Geometry) {
name = "gs_" + name + "[]";
}
std::string suffix;
- if (stage == ShaderStage::Fragment) {
+ if (stage == ProgramType::Fragment) {
const auto input_mode{header.ps.GetAttributeUse(location)};
if (skip_unused && input_mode == AttributeUse::Unused) {
return;
@@ -374,7 +573,7 @@ private:
}
void DeclareOutputAttributes() {
- if (ir.HasPhysicalAttributes() && stage != ShaderStage::Fragment) {
+ if (ir.HasPhysicalAttributes() && stage != ProgramType::Fragment) {
for (u32 i = 0; i < GetNumPhysicalVaryings(); ++i) {
DeclareOutputAttribute(ToGenericAttribute(i));
}
@@ -403,7 +602,7 @@ private:
const auto [index, size] = entry;
code.AddLine("layout (std140, binding = CBUF_BINDING_{}) uniform {} {{", index,
GetConstBufferBlock(index));
- code.AddLine(" vec4 {}[MAX_CONSTBUFFER_ELEMENTS];", GetConstBuffer(index));
+ code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS);
code.AddLine("}};");
code.AddNewLine();
}
@@ -424,7 +623,7 @@ private:
code.AddLine("layout (std430, binding = GMEM_BINDING_{}_{}) {} buffer {} {{",
base.cbuf_index, base.cbuf_offset, qualifier, GetGlobalMemoryBlock(base));
- code.AddLine(" float {}[];", GetGlobalMemory(base));
+ code.AddLine(" uint {}[];", GetGlobalMemory(base));
code.AddLine("}};");
code.AddNewLine();
}
@@ -433,9 +632,13 @@ private:
void DeclareSamplers() {
const auto& samplers = ir.GetSamplers();
for (const auto& sampler : samplers) {
- std::string sampler_type = [&sampler] {
+ const std::string name{GetSampler(sampler)};
+ const std::string description{"layout (binding = SAMPLER_BINDING_" +
+ std::to_string(sampler.GetIndex()) + ") uniform"};
+ std::string sampler_type = [&]() {
switch (sampler.GetType()) {
case Tegra::Shader::TextureType::Texture1D:
+ // Special cased, read below.
return "sampler1D";
case Tegra::Shader::TextureType::Texture2D:
return "sampler2D";
@@ -455,8 +658,19 @@ private:
sampler_type += "Shadow";
}
- code.AddLine("layout (binding = SAMPLER_BINDING_{}) uniform {} {};", sampler.GetIndex(),
- sampler_type, GetSampler(sampler));
+ if (sampler.GetType() == Tegra::Shader::TextureType::Texture1D) {
+ // 1D textures can be aliased to texture buffers, hide the declarations behind a
+ // preprocessor flag and use one or the other from the GPU state. This has to be
+ // done because shaders don't have enough information to determine the texture type.
+ EmitIfdefIsBuffer(sampler);
+ code.AddLine("{} samplerBuffer {};", description, name);
+ code.AddLine("#else");
+ code.AddLine("{} {} {};", description, sampler_type, name);
+ code.AddLine("#endif");
+ } else {
+ // The other texture types (2D, 3D and cubes) don't have this issue.
+ code.AddLine("{} {} {};", description, sampler_type, name);
+ }
}
if (!samplers.empty()) {
code.AddNewLine();
@@ -467,7 +681,7 @@ private:
if (!ir.HasPhysicalAttributes()) {
return;
}
- code.AddLine("float readPhysicalAttribute(uint physical_address) {{");
+ code.AddLine("float ReadPhysicalAttribute(uint physical_address) {{");
++code.scope;
code.AddLine("switch (physical_address) {{");
@@ -476,15 +690,16 @@ private:
for (u32 index = 0; index < num_attributes; ++index) {
const auto attribute{ToGenericAttribute(index)};
for (u32 element = 0; element < 4; ++element) {
- constexpr u32 generic_base{0x80};
- constexpr u32 generic_stride{16};
- constexpr u32 element_stride{4};
+ constexpr u32 generic_base = 0x80;
+ constexpr u32 generic_stride = 16;
+ constexpr u32 element_stride = 4;
const u32 address{generic_base + index * generic_stride + element * element_stride};
- const bool declared{stage != ShaderStage::Fragment ||
- header.ps.GetAttributeUse(index) != AttributeUse::Unused};
- const std::string value{declared ? ReadAttribute(attribute, element) : "0"};
- code.AddLine("case 0x{:x}: return {};", address, value);
+ const bool declared = stage != ProgramType::Fragment ||
+ header.ps.GetAttributeUse(index) != AttributeUse::Unused;
+ const std::string value =
+ declared ? ReadAttribute(attribute, element).AsFloat() : "0.0f";
+ code.AddLine("case 0x{:X}U: return {};", address, value);
}
}
@@ -496,15 +711,68 @@ private:
code.AddNewLine();
}
+ void DeclareImages() {
+ const auto& images{ir.GetImages()};
+ for (const auto& [offset, image] : images) {
+ const char* image_type = [&] {
+ switch (image.GetType()) {
+ case Tegra::Shader::ImageType::Texture1D:
+ return "image1D";
+ case Tegra::Shader::ImageType::TextureBuffer:
+ return "imageBuffer";
+ case Tegra::Shader::ImageType::Texture1DArray:
+ return "image1DArray";
+ case Tegra::Shader::ImageType::Texture2D:
+ return "image2D";
+ case Tegra::Shader::ImageType::Texture2DArray:
+ return "image2DArray";
+ case Tegra::Shader::ImageType::Texture3D:
+ return "image3D";
+ default:
+ UNREACHABLE();
+ return "image1D";
+ }
+ }();
+
+ const auto [type_prefix, format] = [&]() -> std::pair<const char*, const char*> {
+ if (!image.IsSizeKnown()) {
+ return {"", ""};
+ }
+ switch (image.GetSize()) {
+ case Tegra::Shader::ImageAtomicSize::U32:
+ return {"u", "r32ui, "};
+ case Tegra::Shader::ImageAtomicSize::S32:
+ return {"i", "r32i, "};
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented atomic size={}",
+ static_cast<u32>(image.GetSize()));
+ return {"", ""};
+ }
+ }();
+
+ std::string qualifier = "coherent volatile";
+ if (image.IsRead() && !image.IsWritten()) {
+ qualifier += " readonly";
+ } else if (image.IsWritten() && !image.IsRead()) {
+ qualifier += " writeonly";
+ }
+
+ code.AddLine("layout (binding = IMAGE_BINDING_{}) {} uniform "
+ "{} {};",
+ image.GetIndex(), qualifier, image_type, GetImage(image));
+ }
+ if (!images.empty()) {
+ code.AddNewLine();
+ }
+ }
+
void VisitBlock(const NodeBlock& bb) {
for (const auto& node : bb) {
- if (const std::string expr = Visit(node); !expr.empty()) {
- code.AddLine(expr);
- }
+ Visit(node).CheckVoid();
}
}
- std::string Visit(const Node& node) {
+ Expression Visit(const Node& node) {
if (const auto operation = std::get_if<OperationNode>(&*node)) {
const auto operation_index = static_cast<std::size_t>(operation->GetCode());
if (operation_index >= operation_decompilers.size()) {
@@ -522,18 +790,18 @@ private:
if (const auto gpr = std::get_if<GprNode>(&*node)) {
const u32 index = gpr->GetIndex();
if (index == Register::ZeroIndex) {
- return "0";
+ return {"0U", Type::Uint};
}
- return GetRegister(index);
+ return {GetRegister(index), Type::Float};
}
if (const auto immediate = std::get_if<ImmediateNode>(&*node)) {
const u32 value = immediate->GetValue();
if (value < 10) {
// For eyecandy avoid using hex numbers on single digits
- return fmt::format("utof({}u)", immediate->GetValue());
+ return {fmt::format("{}U", immediate->GetValue()), Type::Uint};
}
- return fmt::format("utof(0x{:x}u)", immediate->GetValue());
+ return {fmt::format("0x{:X}U", immediate->GetValue()), Type::Uint};
}
if (const auto predicate = std::get_if<PredicateNode>(&*node)) {
@@ -548,17 +816,18 @@ private:
}
}();
if (predicate->IsNegated()) {
- return fmt::format("!({})", value);
+ return {fmt::format("!({})", value), Type::Bool};
}
- return value;
+ return {value, Type::Bool};
}
if (const auto abuf = std::get_if<AbufNode>(&*node)) {
- UNIMPLEMENTED_IF_MSG(abuf->IsPhysicalBuffer() && stage == ShaderStage::Geometry,
+ UNIMPLEMENTED_IF_MSG(abuf->IsPhysicalBuffer() && stage == ProgramType::Geometry,
"Physical attributes in geometry shaders are not implemented");
if (abuf->IsPhysicalBuffer()) {
- return fmt::format("readPhysicalAttribute(ftou({}))",
- Visit(abuf->GetPhysicalAddress()));
+ return {fmt::format("ReadPhysicalAttribute({})",
+ Visit(abuf->GetPhysicalAddress()).AsUint()),
+ Type::Float};
}
return ReadAttribute(abuf->GetIndex(), abuf->GetElement(), abuf->GetBuffer());
}
@@ -569,56 +838,70 @@ private:
// Direct access
const u32 offset_imm = immediate->GetValue();
ASSERT_MSG(offset_imm % 4 == 0, "Unaligned cbuf direct access");
- return fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()),
- offset_imm / (4 * 4), (offset_imm / 4) % 4);
+ return {fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()),
+ offset_imm / (4 * 4), (offset_imm / 4) % 4),
+ Type::Uint};
}
if (std::holds_alternative<OperationNode>(*offset)) {
// Indirect access
const std::string final_offset = code.GenerateTemporary();
- code.AddLine("uint {} = ftou({}) >> 2;", final_offset, Visit(offset));
+ code.AddLine("uint {} = {} >> 2;", final_offset, Visit(offset).AsUint());
if (!device.HasComponentIndexingBug()) {
- return fmt::format("{}[{} >> 2][{} & 3]", GetConstBuffer(cbuf->GetIndex()),
- final_offset, final_offset);
+ return {fmt::format("{}[{} >> 2][{} & 3]", GetConstBuffer(cbuf->GetIndex()),
+ final_offset, final_offset),
+ Type::Uint};
}
// AMD's proprietary GLSL compiler emits ill code for variable component access.
// To bypass this driver bug generate 4 ifs, one per each component.
const std::string pack = code.GenerateTemporary();
- code.AddLine("vec4 {} = {}[{} >> 2];", pack, GetConstBuffer(cbuf->GetIndex()),
+ code.AddLine("uvec4 {} = {}[{} >> 2];", pack, GetConstBuffer(cbuf->GetIndex()),
final_offset);
const std::string result = code.GenerateTemporary();
- code.AddLine("float {};", result);
+ code.AddLine("uint {};", result);
for (u32 swizzle = 0; swizzle < 4; ++swizzle) {
code.AddLine("if (({} & 3) == {}) {} = {}{};", final_offset, swizzle, result,
pack, GetSwizzle(swizzle));
}
- return result;
+ return {result, Type::Uint};
}
UNREACHABLE_MSG("Unmanaged offset node type");
}
if (const auto gmem = std::get_if<GmemNode>(&*node)) {
- const std::string real = Visit(gmem->GetRealAddress());
- const std::string base = Visit(gmem->GetBaseAddress());
- const std::string final_offset = fmt::format("(ftou({}) - ftou({})) / 4", real, base);
- return fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset);
+ const std::string real = Visit(gmem->GetRealAddress()).AsUint();
+ const std::string base = Visit(gmem->GetBaseAddress()).AsUint();
+ const std::string final_offset = fmt::format("({} - {}) >> 2", real, base);
+ return {fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset),
+ Type::Uint};
}
if (const auto lmem = std::get_if<LmemNode>(&*node)) {
- return fmt::format("{}[ftou({}) / 4]", GetLocalMemory(), Visit(lmem->GetAddress()));
+ if (stage == ProgramType::Compute) {
+ LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders");
+ }
+ return {
+ fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()),
+ Type::Uint};
+ }
+
+ if (const auto smem = std::get_if<SmemNode>(&*node)) {
+ return {
+ fmt::format("{}[{} >> 2]", GetSharedMemory(), Visit(smem->GetAddress()).AsUint()),
+ Type::Uint};
}
if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) {
- return GetInternalFlag(internal_flag->GetFlag());
+ return {GetInternalFlag(internal_flag->GetFlag()), Type::Bool};
}
if (const auto conditional = std::get_if<ConditionalNode>(&*node)) {
// It's invalid to call conditional on nested nodes, use an operation instead
- code.AddLine("if ({}) {{", Visit(conditional->GetCondition()));
+ code.AddLine("if ({}) {{", Visit(conditional->GetCondition()).AsBool());
++code.scope;
VisitBlock(conditional->GetCode());
@@ -629,20 +912,21 @@ private:
}
if (const auto comment = std::get_if<CommentNode>(&*node)) {
- return "// " + comment->GetText();
+ code.AddLine("// " + comment->GetText());
+ return {};
}
UNREACHABLE();
return {};
}
- std::string ReadAttribute(Attribute::Index attribute, u32 element, const Node& buffer = {}) {
+ Expression ReadAttribute(Attribute::Index attribute, u32 element, const Node& buffer = {}) {
const auto GeometryPass = [&](std::string_view name) {
- if (stage == ShaderStage::Geometry && buffer) {
+ if (stage == ProgramType::Geometry && buffer) {
// TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games
// set an 0x80000000 index for those and the shader fails to build. Find out why
// this happens and what's its intent.
- return fmt::format("gs_{}[ftou({}) % MAX_VERTEX_INPUT]", name, Visit(buffer));
+ return fmt::format("gs_{}[{} % MAX_VERTEX_INPUT]", name, Visit(buffer).AsUint());
}
return std::string(name);
};
@@ -650,72 +934,79 @@ private:
switch (attribute) {
case Attribute::Index::Position:
switch (stage) {
- case ShaderStage::Geometry:
- return fmt::format("gl_in[ftou({})].gl_Position{}", Visit(buffer),
- GetSwizzle(element));
- case ShaderStage::Fragment:
- return element == 3 ? "1.0f" : ("gl_FragCoord"s + GetSwizzle(element));
+ case ProgramType::Geometry:
+ return {fmt::format("gl_in[{}].gl_Position{}", Visit(buffer).AsUint(),
+ GetSwizzle(element)),
+ Type::Float};
+ case ProgramType::Fragment:
+ return {element == 3 ? "1.0f" : ("gl_FragCoord"s + GetSwizzle(element)),
+ Type::Float};
default:
UNREACHABLE();
}
case Attribute::Index::PointCoord:
switch (element) {
case 0:
- return "gl_PointCoord.x";
+ return {"gl_PointCoord.x", Type::Float};
case 1:
- return "gl_PointCoord.y";
+ return {"gl_PointCoord.y", Type::Float};
case 2:
case 3:
- return "0";
+ return {"0.0f", Type::Float};
}
UNREACHABLE();
- return "0";
+ return {"0", Type::Int};
case Attribute::Index::TessCoordInstanceIDVertexID:
// TODO(Subv): Find out what the values are for the first two elements when inside a
// vertex shader, and what's the value of the fourth element when inside a Tess Eval
// shader.
- ASSERT(stage == ShaderStage::Vertex);
+ ASSERT(IsVertexShader(stage));
switch (element) {
case 2:
// Config pack's first value is instance_id.
- return "uintBitsToFloat(config_pack[0])";
+ return {"config_pack[0]", Type::Uint};
case 3:
- return "uintBitsToFloat(gl_VertexID)";
+ return {"gl_VertexID", Type::Int};
}
UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element);
- return "0";
+ return {"0", Type::Int};
case Attribute::Index::FrontFacing:
// TODO(Subv): Find out what the values are for the other elements.
- ASSERT(stage == ShaderStage::Fragment);
+ ASSERT(stage == ProgramType::Fragment);
switch (element) {
case 3:
- return "itof(gl_FrontFacing ? -1 : 0)";
+ return {"(gl_FrontFacing ? -1 : 0)", Type::Int};
}
UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element);
- return "0";
+ return {"0", Type::Int};
default:
if (IsGenericAttribute(attribute)) {
- return GeometryPass(GetInputAttribute(attribute)) + GetSwizzle(element);
+ return {GeometryPass(GetInputAttribute(attribute)) + GetSwizzle(element),
+ Type::Float};
}
break;
}
UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute));
- return "0";
+ return {"0", Type::Int};
}
- std::string ApplyPrecise(Operation operation, const std::string& value) {
+ Expression ApplyPrecise(Operation operation, std::string value, Type type) {
if (!IsPrecise(operation)) {
- return value;
+ return {std::move(value), type};
}
- // There's a bug in NVidia's proprietary drivers that makes precise fail on fragment shaders
- const std::string precise = stage != ShaderStage::Fragment ? "precise " : "";
+ // Old Nvidia drivers have a bug with precise and texture sampling. These are more likely to
+ // be found in fragment shaders, so we disable precise there. There are vertex shaders that
+ // also fail to build but nobody seems to care about those.
+ // Note: Only bugged drivers will skip precise.
+ const bool disable_precise = device.HasPreciseBug() && stage == ProgramType::Fragment;
- const std::string temporary = code.GenerateTemporary();
- code.AddLine("{}float {} = {};", precise, temporary, value);
- return temporary;
+ std::string temporary = code.GenerateTemporary();
+ code.AddLine("{}{} {} = {};", disable_precise ? "" : "precise ", GetTypeString(type),
+ temporary, value);
+ return {std::move(temporary), type};
}
- std::string VisitOperand(Operation operation, std::size_t operand_index) {
+ Expression VisitOperand(Operation operation, std::size_t operand_index) {
const auto& operand = operation[operand_index];
const bool parent_precise = IsPrecise(operation);
const bool child_precise = IsPrecise(operand);
@@ -724,102 +1015,98 @@ private:
return Visit(operand);
}
- const std::string temporary = code.GenerateTemporary();
- code.AddLine("float {} = {};", temporary, Visit(operand));
- return temporary;
- }
-
- std::string VisitOperand(Operation operation, std::size_t operand_index, Type type) {
- return CastOperand(VisitOperand(operation, operand_index), type);
- }
-
- std::string CastOperand(const std::string& value, Type type) const {
- switch (type) {
- case Type::Bool:
- case Type::Bool2:
- case Type::Float:
- return value;
- case Type::Int:
- return fmt::format("ftoi({})", value);
- case Type::Uint:
- return fmt::format("ftou({})", value);
- case Type::HalfFloat:
- return fmt::format("toHalf2({})", value);
- }
- UNREACHABLE();
- return value;
+ Expression value = Visit(operand);
+ std::string temporary = code.GenerateTemporary();
+ code.AddLine("{} {} = {};", GetTypeString(value.GetType()), temporary, value.GetCode());
+ return {std::move(temporary), value.GetType()};
}
- std::string BitwiseCastResult(const std::string& value, Type type,
- bool needs_parenthesis = false) {
- switch (type) {
- case Type::Bool:
- case Type::Bool2:
- case Type::Float:
- if (needs_parenthesis) {
- return fmt::format("({})", value);
+ std::optional<Expression> GetOutputAttribute(const AbufNode* abuf) {
+ switch (const auto attribute = abuf->GetIndex()) {
+ case Attribute::Index::Position:
+ return {{"gl_Position"s + GetSwizzle(abuf->GetElement()), Type::Float}};
+ case Attribute::Index::LayerViewportPointSize:
+ switch (abuf->GetElement()) {
+ case 0:
+ UNIMPLEMENTED();
+ return {};
+ case 1:
+ if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) {
+ return {};
+ }
+ return {{"gl_Layer", Type::Int}};
+ case 2:
+ if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) {
+ return {};
+ }
+ return {{"gl_ViewportIndex", Type::Int}};
+ case 3:
+ UNIMPLEMENTED_MSG("Requires some state changes for gl_PointSize to work in shader");
+ return {{"gl_PointSize", Type::Float}};
}
- return value;
- case Type::Int:
- return fmt::format("itof({})", value);
- case Type::Uint:
- return fmt::format("utof({})", value);
- case Type::HalfFloat:
- return fmt::format("fromHalf2({})", value);
+ return {};
+ case Attribute::Index::ClipDistances0123:
+ return {{fmt::format("gl_ClipDistance[{}]", abuf->GetElement()), Type::Float}};
+ case Attribute::Index::ClipDistances4567:
+ return {{fmt::format("gl_ClipDistance[{}]", abuf->GetElement() + 4), Type::Float}};
+ default:
+ if (IsGenericAttribute(attribute)) {
+ return {
+ {GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()), Type::Float}};
+ }
+ UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute));
+ return {};
}
- UNREACHABLE();
- return value;
}
- std::string GenerateUnary(Operation operation, const std::string& func, Type result_type,
- Type type_a, bool needs_parenthesis = true) {
- const std::string op_str = fmt::format("{}({})", func, VisitOperand(operation, 0, type_a));
-
- return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type, needs_parenthesis));
+ Expression GenerateUnary(Operation operation, std::string_view func, Type result_type,
+ Type type_a) {
+ std::string op_str = fmt::format("{}({})", func, VisitOperand(operation, 0).As(type_a));
+ return ApplyPrecise(operation, std::move(op_str), result_type);
}
- std::string GenerateBinaryInfix(Operation operation, const std::string& func, Type result_type,
- Type type_a, Type type_b) {
- const std::string op_a = VisitOperand(operation, 0, type_a);
- const std::string op_b = VisitOperand(operation, 1, type_b);
- const std::string op_str = fmt::format("({} {} {})", op_a, func, op_b);
+ Expression GenerateBinaryInfix(Operation operation, std::string_view func, Type result_type,
+ Type type_a, Type type_b) {
+ const std::string op_a = VisitOperand(operation, 0).As(type_a);
+ const std::string op_b = VisitOperand(operation, 1).As(type_b);
+ std::string op_str = fmt::format("({} {} {})", op_a, func, op_b);
- return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type));
+ return ApplyPrecise(operation, std::move(op_str), result_type);
}
- std::string GenerateBinaryCall(Operation operation, const std::string& func, Type result_type,
- Type type_a, Type type_b) {
- const std::string op_a = VisitOperand(operation, 0, type_a);
- const std::string op_b = VisitOperand(operation, 1, type_b);
- const std::string op_str = fmt::format("{}({}, {})", func, op_a, op_b);
+ Expression GenerateBinaryCall(Operation operation, std::string_view func, Type result_type,
+ Type type_a, Type type_b) {
+ const std::string op_a = VisitOperand(operation, 0).As(type_a);
+ const std::string op_b = VisitOperand(operation, 1).As(type_b);
+ std::string op_str = fmt::format("{}({}, {})", func, op_a, op_b);
- return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type));
+ return ApplyPrecise(operation, std::move(op_str), result_type);
}
- std::string GenerateTernary(Operation operation, const std::string& func, Type result_type,
- Type type_a, Type type_b, Type type_c) {
- const std::string op_a = VisitOperand(operation, 0, type_a);
- const std::string op_b = VisitOperand(operation, 1, type_b);
- const std::string op_c = VisitOperand(operation, 2, type_c);
- const std::string op_str = fmt::format("{}({}, {}, {})", func, op_a, op_b, op_c);
+ Expression GenerateTernary(Operation operation, std::string_view func, Type result_type,
+ Type type_a, Type type_b, Type type_c) {
+ const std::string op_a = VisitOperand(operation, 0).As(type_a);
+ const std::string op_b = VisitOperand(operation, 1).As(type_b);
+ const std::string op_c = VisitOperand(operation, 2).As(type_c);
+ std::string op_str = fmt::format("{}({}, {}, {})", func, op_a, op_b, op_c);
- return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type));
+ return ApplyPrecise(operation, std::move(op_str), result_type);
}
- std::string GenerateQuaternary(Operation operation, const std::string& func, Type result_type,
- Type type_a, Type type_b, Type type_c, Type type_d) {
- const std::string op_a = VisitOperand(operation, 0, type_a);
- const std::string op_b = VisitOperand(operation, 1, type_b);
- const std::string op_c = VisitOperand(operation, 2, type_c);
- const std::string op_d = VisitOperand(operation, 3, type_d);
- const std::string op_str = fmt::format("{}({}, {}, {}, {})", func, op_a, op_b, op_c, op_d);
+ Expression GenerateQuaternary(Operation operation, const std::string& func, Type result_type,
+ Type type_a, Type type_b, Type type_c, Type type_d) {
+ const std::string op_a = VisitOperand(operation, 0).As(type_a);
+ const std::string op_b = VisitOperand(operation, 1).As(type_b);
+ const std::string op_c = VisitOperand(operation, 2).As(type_c);
+ const std::string op_d = VisitOperand(operation, 3).As(type_d);
+ std::string op_str = fmt::format("{}({}, {}, {}, {})", func, op_a, op_b, op_c, op_d);
- return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type));
+ return ApplyPrecise(operation, std::move(op_str), result_type);
}
std::string GenerateTexture(Operation operation, const std::string& function_suffix,
const std::vector<TextureIR>& extras) {
- constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"};
+ constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"};
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
@@ -836,17 +1123,17 @@ private:
expr += coord_constructors.at(count + (has_array ? 1 : 0) + (has_shadow ? 1 : 0) - 1);
expr += '(';
for (std::size_t i = 0; i < count; ++i) {
- expr += Visit(operation[i]);
+ expr += Visit(operation[i]).AsFloat();
const std::size_t next = i + 1;
if (next < count)
expr += ", ";
}
if (has_array) {
- expr += ", float(ftoi(" + Visit(meta->array) + "))";
+ expr += ", float(" + Visit(meta->array).AsInt() + ')';
}
if (has_shadow) {
- expr += ", " + Visit(meta->depth_compare);
+ expr += ", " + Visit(meta->depth_compare).AsFloat();
}
expr += ')';
@@ -877,11 +1164,11 @@ private:
// required to be constant)
expr += std::to_string(static_cast<s32>(immediate->GetValue()));
} else {
- expr += fmt::format("ftoi({})", Visit(operand));
+ expr += Visit(operand).AsInt();
}
break;
case Type::Float:
- expr += Visit(operand);
+ expr += Visit(operand).AsFloat();
break;
default: {
const auto type_int = static_cast<u32>(type);
@@ -897,7 +1184,7 @@ private:
if (aoffi.empty()) {
return {};
}
- constexpr std::array<const char*, 3> coord_constructors = {"int", "ivec2", "ivec3"};
+ constexpr std::array coord_constructors = {"int", "ivec2", "ivec3"};
std::string expr = ", ";
expr += coord_constructors.at(aoffi.size() - 1);
expr += '(';
@@ -910,7 +1197,7 @@ private:
expr += std::to_string(static_cast<s32>(immediate->GetValue()));
} else if (device.HasVariableAoffi()) {
// Avoid using variable AOFFI on unsupported devices.
- expr += fmt::format("ftoi({})", Visit(operand));
+ expr += Visit(operand).AsInt();
} else {
// Insert 0 on devices not supporting variable AOFFI.
expr += '0';
@@ -924,318 +1211,391 @@ private:
return expr;
}
- std::string Assign(Operation operation) {
+ std::string BuildIntegerCoordinates(Operation operation) {
+ constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("};
+ const std::size_t coords_count{operation.GetOperandsCount()};
+ std::string expr = constructors.at(coords_count - 1);
+ for (std::size_t i = 0; i < coords_count; ++i) {
+ expr += VisitOperand(operation, i).AsInt();
+ if (i + 1 < coords_count) {
+ expr += ", ";
+ }
+ }
+ expr += ')';
+ return expr;
+ }
+
+ std::string BuildImageValues(Operation operation) {
+ const auto meta{std::get<MetaImage>(operation.GetMeta())};
+ const auto [constructors, type] = [&]() -> std::pair<std::array<const char*, 4>, Type> {
+ constexpr std::array float_constructors{"float", "vec2", "vec3", "vec4"};
+ if (!meta.image.IsSizeKnown()) {
+ return {float_constructors, Type::Float};
+ }
+ switch (meta.image.GetSize()) {
+ case Tegra::Shader::ImageAtomicSize::U32:
+ return {{"uint", "uvec2", "uvec3", "uvec4"}, Type::Uint};
+ case Tegra::Shader::ImageAtomicSize::S32:
+ return {{"int", "ivec2", "ivec3", "ivec4"}, Type::Uint};
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented image size={}",
+ static_cast<u32>(meta.image.GetSize()));
+ return {float_constructors, Type::Float};
+ }
+ }();
+
+ const std::size_t values_count{meta.values.size()};
+ std::string expr = fmt::format("{}(", constructors.at(values_count - 1));
+ for (std::size_t i = 0; i < values_count; ++i) {
+ expr += Visit(meta.values.at(i)).As(type);
+ if (i + 1 < values_count) {
+ expr += ", ";
+ }
+ }
+ expr += ')';
+ return expr;
+ }
+
+ Expression AtomicImage(Operation operation, const char* opname) {
+ constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("};
+ const auto meta{std::get<MetaImage>(operation.GetMeta())};
+ ASSERT(meta.values.size() == 1);
+ ASSERT(meta.image.IsSizeKnown());
+
+ const auto type = [&]() {
+ switch (const auto size = meta.image.GetSize()) {
+ case Tegra::Shader::ImageAtomicSize::U32:
+ return Type::Uint;
+ case Tegra::Shader::ImageAtomicSize::S32:
+ return Type::Int;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented image size={}", static_cast<u32>(size));
+ return Type::Uint;
+ }
+ }();
+
+ return {fmt::format("{}({}, {}, {})", opname, GetImage(meta.image),
+ BuildIntegerCoordinates(operation), Visit(meta.values[0]).As(type)),
+ type};
+ }
+
+ Expression Assign(Operation operation) {
const Node& dest = operation[0];
const Node& src = operation[1];
- std::string target;
+ Expression target;
if (const auto gpr = std::get_if<GprNode>(&*dest)) {
if (gpr->GetIndex() == Register::ZeroIndex) {
// Writing to Register::ZeroIndex is a no op
return {};
}
- target = GetRegister(gpr->GetIndex());
+ target = {GetRegister(gpr->GetIndex()), Type::Float};
} else if (const auto abuf = std::get_if<AbufNode>(&*dest)) {
UNIMPLEMENTED_IF(abuf->IsPhysicalBuffer());
-
- target = [&]() -> std::string {
- switch (const auto attribute = abuf->GetIndex(); abuf->GetIndex()) {
- case Attribute::Index::Position:
- return "gl_Position"s + GetSwizzle(abuf->GetElement());
- case Attribute::Index::PointSize:
- return "gl_PointSize";
- case Attribute::Index::ClipDistances0123:
- return fmt::format("gl_ClipDistance[{}]", abuf->GetElement());
- case Attribute::Index::ClipDistances4567:
- return fmt::format("gl_ClipDistance[{}]", abuf->GetElement() + 4);
- default:
- if (IsGenericAttribute(attribute)) {
- return GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement());
- }
- UNIMPLEMENTED_MSG("Unhandled output attribute: {}",
- static_cast<u32>(attribute));
- return "0";
- }
- }();
+ auto output = GetOutputAttribute(abuf);
+ if (!output) {
+ return {};
+ }
+ target = std::move(*output);
} else if (const auto lmem = std::get_if<LmemNode>(&*dest)) {
- target = fmt::format("{}[ftou({}) / 4]", GetLocalMemory(), Visit(lmem->GetAddress()));
+ if (stage == ProgramType::Compute) {
+ LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders");
+ }
+ target = {
+ fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()),
+ Type::Uint};
+ } else if (const auto smem = std::get_if<SmemNode>(&*dest)) {
+ ASSERT(stage == ProgramType::Compute);
+ target = {
+ fmt::format("{}[{} >> 2]", GetSharedMemory(), Visit(smem->GetAddress()).AsUint()),
+ Type::Uint};
} else if (const auto gmem = std::get_if<GmemNode>(&*dest)) {
- const std::string real = Visit(gmem->GetRealAddress());
- const std::string base = Visit(gmem->GetBaseAddress());
- const std::string final_offset = fmt::format("(ftou({}) - ftou({})) / 4", real, base);
- target = fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset);
+ const std::string real = Visit(gmem->GetRealAddress()).AsUint();
+ const std::string base = Visit(gmem->GetBaseAddress()).AsUint();
+ const std::string final_offset = fmt::format("({} - {}) >> 2", real, base);
+ target = {fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset),
+ Type::Uint};
} else {
UNREACHABLE_MSG("Assign called without a proper target");
}
- code.AddLine("{} = {};", target, Visit(src));
+ code.AddLine("{} = {};", target.GetCode(), Visit(src).As(target.GetType()));
return {};
}
template <Type type>
- std::string Add(Operation operation) {
+ Expression Add(Operation operation) {
return GenerateBinaryInfix(operation, "+", type, type, type);
}
template <Type type>
- std::string Mul(Operation operation) {
+ Expression Mul(Operation operation) {
return GenerateBinaryInfix(operation, "*", type, type, type);
}
template <Type type>
- std::string Div(Operation operation) {
+ Expression Div(Operation operation) {
return GenerateBinaryInfix(operation, "/", type, type, type);
}
template <Type type>
- std::string Fma(Operation operation) {
+ Expression Fma(Operation operation) {
return GenerateTernary(operation, "fma", type, type, type, type);
}
template <Type type>
- std::string Negate(Operation operation) {
- return GenerateUnary(operation, "-", type, type, true);
+ Expression Negate(Operation operation) {
+ return GenerateUnary(operation, "-", type, type);
}
template <Type type>
- std::string Absolute(Operation operation) {
- return GenerateUnary(operation, "abs", type, type, false);
+ Expression Absolute(Operation operation) {
+ return GenerateUnary(operation, "abs", type, type);
}
- std::string FClamp(Operation operation) {
+ Expression FClamp(Operation operation) {
return GenerateTernary(operation, "clamp", Type::Float, Type::Float, Type::Float,
Type::Float);
}
+ Expression FCastHalf0(Operation operation) {
+ return {fmt::format("({})[0]", VisitOperand(operation, 0).AsHalfFloat()), Type::Float};
+ }
+
+ Expression FCastHalf1(Operation operation) {
+ return {fmt::format("({})[1]", VisitOperand(operation, 0).AsHalfFloat()), Type::Float};
+ }
+
template <Type type>
- std::string Min(Operation operation) {
+ Expression Min(Operation operation) {
return GenerateBinaryCall(operation, "min", type, type, type);
}
template <Type type>
- std::string Max(Operation operation) {
+ Expression Max(Operation operation) {
return GenerateBinaryCall(operation, "max", type, type, type);
}
- std::string Select(Operation operation) {
- const std::string condition = Visit(operation[0]);
- const std::string true_case = Visit(operation[1]);
- const std::string false_case = Visit(operation[2]);
- const std::string op_str = fmt::format("({} ? {} : {})", condition, true_case, false_case);
+ Expression Select(Operation operation) {
+ const std::string condition = Visit(operation[0]).AsBool();
+ const std::string true_case = Visit(operation[1]).AsUint();
+ const std::string false_case = Visit(operation[2]).AsUint();
+ std::string op_str = fmt::format("({} ? {} : {})", condition, true_case, false_case);
- return ApplyPrecise(operation, op_str);
+ return ApplyPrecise(operation, std::move(op_str), Type::Uint);
}
- std::string FCos(Operation operation) {
- return GenerateUnary(operation, "cos", Type::Float, Type::Float, false);
+ Expression FCos(Operation operation) {
+ return GenerateUnary(operation, "cos", Type::Float, Type::Float);
}
- std::string FSin(Operation operation) {
- return GenerateUnary(operation, "sin", Type::Float, Type::Float, false);
+ Expression FSin(Operation operation) {
+ return GenerateUnary(operation, "sin", Type::Float, Type::Float);
}
- std::string FExp2(Operation operation) {
- return GenerateUnary(operation, "exp2", Type::Float, Type::Float, false);
+ Expression FExp2(Operation operation) {
+ return GenerateUnary(operation, "exp2", Type::Float, Type::Float);
}
- std::string FLog2(Operation operation) {
- return GenerateUnary(operation, "log2", Type::Float, Type::Float, false);
+ Expression FLog2(Operation operation) {
+ return GenerateUnary(operation, "log2", Type::Float, Type::Float);
}
- std::string FInverseSqrt(Operation operation) {
- return GenerateUnary(operation, "inversesqrt", Type::Float, Type::Float, false);
+ Expression FInverseSqrt(Operation operation) {
+ return GenerateUnary(operation, "inversesqrt", Type::Float, Type::Float);
}
- std::string FSqrt(Operation operation) {
- return GenerateUnary(operation, "sqrt", Type::Float, Type::Float, false);
+ Expression FSqrt(Operation operation) {
+ return GenerateUnary(operation, "sqrt", Type::Float, Type::Float);
}
- std::string FRoundEven(Operation operation) {
- return GenerateUnary(operation, "roundEven", Type::Float, Type::Float, false);
+ Expression FRoundEven(Operation operation) {
+ return GenerateUnary(operation, "roundEven", Type::Float, Type::Float);
}
- std::string FFloor(Operation operation) {
- return GenerateUnary(operation, "floor", Type::Float, Type::Float, false);
+ Expression FFloor(Operation operation) {
+ return GenerateUnary(operation, "floor", Type::Float, Type::Float);
}
- std::string FCeil(Operation operation) {
- return GenerateUnary(operation, "ceil", Type::Float, Type::Float, false);
+ Expression FCeil(Operation operation) {
+ return GenerateUnary(operation, "ceil", Type::Float, Type::Float);
}
- std::string FTrunc(Operation operation) {
- return GenerateUnary(operation, "trunc", Type::Float, Type::Float, false);
+ Expression FTrunc(Operation operation) {
+ return GenerateUnary(operation, "trunc", Type::Float, Type::Float);
}
template <Type type>
- std::string FCastInteger(Operation operation) {
- return GenerateUnary(operation, "float", Type::Float, type, false);
+ Expression FCastInteger(Operation operation) {
+ return GenerateUnary(operation, "float", Type::Float, type);
}
- std::string ICastFloat(Operation operation) {
- return GenerateUnary(operation, "int", Type::Int, Type::Float, false);
+ Expression ICastFloat(Operation operation) {
+ return GenerateUnary(operation, "int", Type::Int, Type::Float);
}
- std::string ICastUnsigned(Operation operation) {
- return GenerateUnary(operation, "int", Type::Int, Type::Uint, false);
+ Expression ICastUnsigned(Operation operation) {
+ return GenerateUnary(operation, "int", Type::Int, Type::Uint);
}
template <Type type>
- std::string LogicalShiftLeft(Operation operation) {
+ Expression LogicalShiftLeft(Operation operation) {
return GenerateBinaryInfix(operation, "<<", type, type, Type::Uint);
}
- std::string ILogicalShiftRight(Operation operation) {
- const std::string op_a = VisitOperand(operation, 0, Type::Uint);
- const std::string op_b = VisitOperand(operation, 1, Type::Uint);
- const std::string op_str = fmt::format("int({} >> {})", op_a, op_b);
+ Expression ILogicalShiftRight(Operation operation) {
+ const std::string op_a = VisitOperand(operation, 0).AsUint();
+ const std::string op_b = VisitOperand(operation, 1).AsUint();
+ std::string op_str = fmt::format("int({} >> {})", op_a, op_b);
- return ApplyPrecise(operation, BitwiseCastResult(op_str, Type::Int));
+ return ApplyPrecise(operation, std::move(op_str), Type::Int);
}
- std::string IArithmeticShiftRight(Operation operation) {
+ Expression IArithmeticShiftRight(Operation operation) {
return GenerateBinaryInfix(operation, ">>", Type::Int, Type::Int, Type::Uint);
}
template <Type type>
- std::string BitwiseAnd(Operation operation) {
+ Expression BitwiseAnd(Operation operation) {
return GenerateBinaryInfix(operation, "&", type, type, type);
}
template <Type type>
- std::string BitwiseOr(Operation operation) {
+ Expression BitwiseOr(Operation operation) {
return GenerateBinaryInfix(operation, "|", type, type, type);
}
template <Type type>
- std::string BitwiseXor(Operation operation) {
+ Expression BitwiseXor(Operation operation) {
return GenerateBinaryInfix(operation, "^", type, type, type);
}
template <Type type>
- std::string BitwiseNot(Operation operation) {
- return GenerateUnary(operation, "~", type, type, false);
+ Expression BitwiseNot(Operation operation) {
+ return GenerateUnary(operation, "~", type, type);
}
- std::string UCastFloat(Operation operation) {
- return GenerateUnary(operation, "uint", Type::Uint, Type::Float, false);
+ Expression UCastFloat(Operation operation) {
+ return GenerateUnary(operation, "uint", Type::Uint, Type::Float);
}
- std::string UCastSigned(Operation operation) {
- return GenerateUnary(operation, "uint", Type::Uint, Type::Int, false);
+ Expression UCastSigned(Operation operation) {
+ return GenerateUnary(operation, "uint", Type::Uint, Type::Int);
}
- std::string UShiftRight(Operation operation) {
+ Expression UShiftRight(Operation operation) {
return GenerateBinaryInfix(operation, ">>", Type::Uint, Type::Uint, Type::Uint);
}
template <Type type>
- std::string BitfieldInsert(Operation operation) {
+ Expression BitfieldInsert(Operation operation) {
return GenerateQuaternary(operation, "bitfieldInsert", type, type, type, Type::Int,
Type::Int);
}
template <Type type>
- std::string BitfieldExtract(Operation operation) {
+ Expression BitfieldExtract(Operation operation) {
return GenerateTernary(operation, "bitfieldExtract", type, type, Type::Int, Type::Int);
}
template <Type type>
- std::string BitCount(Operation operation) {
- return GenerateUnary(operation, "bitCount", type, type, false);
+ Expression BitCount(Operation operation) {
+ return GenerateUnary(operation, "bitCount", type, type);
}
- std::string HNegate(Operation operation) {
+ Expression HNegate(Operation operation) {
const auto GetNegate = [&](std::size_t index) {
- return VisitOperand(operation, index, Type::Bool) + " ? -1 : 1";
+ return VisitOperand(operation, index).AsBool() + " ? -1 : 1";
};
- const std::string value =
- fmt::format("({} * vec2({}, {}))", VisitOperand(operation, 0, Type::HalfFloat),
- GetNegate(1), GetNegate(2));
- return BitwiseCastResult(value, Type::HalfFloat);
- }
-
- std::string HClamp(Operation operation) {
- const std::string value = VisitOperand(operation, 0, Type::HalfFloat);
- const std::string min = VisitOperand(operation, 1, Type::Float);
- const std::string max = VisitOperand(operation, 2, Type::Float);
- const std::string clamped = fmt::format("clamp({}, vec2({}), vec2({}))", value, min, max);
-
- return ApplyPrecise(operation, BitwiseCastResult(clamped, Type::HalfFloat));
- }
-
- std::string HUnpack(Operation operation) {
- const std::string operand{VisitOperand(operation, 0, Type::HalfFloat)};
- const auto value = [&]() -> std::string {
- switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) {
- case Tegra::Shader::HalfType::H0_H1:
- return operand;
- case Tegra::Shader::HalfType::F32:
- return fmt::format("vec2(fromHalf2({}))", operand);
- case Tegra::Shader::HalfType::H0_H0:
- return fmt::format("vec2({}[0])", operand);
- case Tegra::Shader::HalfType::H1_H1:
- return fmt::format("vec2({}[1])", operand);
- }
- UNREACHABLE();
- return "0";
- }();
- return fmt::format("fromHalf2({})", value);
+ return {fmt::format("({} * vec2({}, {}))", VisitOperand(operation, 0).AsHalfFloat(),
+ GetNegate(1), GetNegate(2)),
+ Type::HalfFloat};
}
- std::string HMergeF32(Operation operation) {
- return fmt::format("float(toHalf2({})[0])", Visit(operation[0]));
+ Expression HClamp(Operation operation) {
+ const std::string value = VisitOperand(operation, 0).AsHalfFloat();
+ const std::string min = VisitOperand(operation, 1).AsFloat();
+ const std::string max = VisitOperand(operation, 2).AsFloat();
+ std::string clamped = fmt::format("clamp({}, vec2({}), vec2({}))", value, min, max);
+
+ return ApplyPrecise(operation, std::move(clamped), Type::HalfFloat);
}
- std::string HMergeH0(Operation operation) {
- return fmt::format("fromHalf2(vec2(toHalf2({})[0], toHalf2({})[1]))", Visit(operation[1]),
- Visit(operation[0]));
+ Expression HCastFloat(Operation operation) {
+ return {fmt::format("vec2({})", VisitOperand(operation, 0).AsFloat()), Type::HalfFloat};
}
- std::string HMergeH1(Operation operation) {
- return fmt::format("fromHalf2(vec2(toHalf2({})[0], toHalf2({})[1]))", Visit(operation[0]),
- Visit(operation[1]));
+ Expression HUnpack(Operation operation) {
+ Expression operand = VisitOperand(operation, 0);
+ switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) {
+ case Tegra::Shader::HalfType::H0_H1:
+ return operand;
+ case Tegra::Shader::HalfType::F32:
+ return {fmt::format("vec2({})", operand.AsFloat()), Type::HalfFloat};
+ case Tegra::Shader::HalfType::H0_H0:
+ return {fmt::format("vec2({}[0])", operand.AsHalfFloat()), Type::HalfFloat};
+ case Tegra::Shader::HalfType::H1_H1:
+ return {fmt::format("vec2({}[1])", operand.AsHalfFloat()), Type::HalfFloat};
+ }
}
- std::string HPack2(Operation operation) {
- return fmt::format("utof(packHalf2x16(vec2({}, {})))", Visit(operation[0]),
- Visit(operation[1]));
+ Expression HMergeF32(Operation operation) {
+ return {fmt::format("float({}[0])", VisitOperand(operation, 0).AsHalfFloat()), Type::Float};
+ }
+
+ Expression HMergeH0(Operation operation) {
+ std::string dest = VisitOperand(operation, 0).AsUint();
+ std::string src = VisitOperand(operation, 1).AsUint();
+ return {fmt::format("(({} & 0x0000FFFFU) | ({} & 0xFFFF0000U))", src, dest), Type::Uint};
+ }
+
+ Expression HMergeH1(Operation operation) {
+ std::string dest = VisitOperand(operation, 0).AsUint();
+ std::string src = VisitOperand(operation, 1).AsUint();
+ return {fmt::format("(({} & 0x0000FFFFU) | ({} & 0xFFFF0000U))", dest, src), Type::Uint};
+ }
+
+ Expression HPack2(Operation operation) {
+ return {fmt::format("vec2({}, {})", VisitOperand(operation, 0).AsFloat(),
+ VisitOperand(operation, 1).AsFloat()),
+ Type::HalfFloat};
}
template <Type type>
- std::string LogicalLessThan(Operation operation) {
+ Expression LogicalLessThan(Operation operation) {
return GenerateBinaryInfix(operation, "<", Type::Bool, type, type);
}
template <Type type>
- std::string LogicalEqual(Operation operation) {
+ Expression LogicalEqual(Operation operation) {
return GenerateBinaryInfix(operation, "==", Type::Bool, type, type);
}
template <Type type>
- std::string LogicalLessEqual(Operation operation) {
+ Expression LogicalLessEqual(Operation operation) {
return GenerateBinaryInfix(operation, "<=", Type::Bool, type, type);
}
template <Type type>
- std::string LogicalGreaterThan(Operation operation) {
+ Expression LogicalGreaterThan(Operation operation) {
return GenerateBinaryInfix(operation, ">", Type::Bool, type, type);
}
template <Type type>
- std::string LogicalNotEqual(Operation operation) {
+ Expression LogicalNotEqual(Operation operation) {
return GenerateBinaryInfix(operation, "!=", Type::Bool, type, type);
}
template <Type type>
- std::string LogicalGreaterEqual(Operation operation) {
+ Expression LogicalGreaterEqual(Operation operation) {
return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type);
}
- std::string LogicalFIsNan(Operation operation) {
- return GenerateUnary(operation, "isnan", Type::Bool, Type::Float, false);
+ Expression LogicalFIsNan(Operation operation) {
+ return GenerateUnary(operation, "isnan", Type::Bool, Type::Float);
}
- std::string LogicalAssign(Operation operation) {
+ Expression LogicalAssign(Operation operation) {
const Node& dest = operation[0];
const Node& src = operation[1];
@@ -1256,82 +1616,80 @@ private:
target = GetInternalFlag(flag->GetFlag());
}
- code.AddLine("{} = {};", target, Visit(src));
+ code.AddLine("{} = {};", target, Visit(src).AsBool());
return {};
}
- std::string LogicalAnd(Operation operation) {
+ Expression LogicalAnd(Operation operation) {
return GenerateBinaryInfix(operation, "&&", Type::Bool, Type::Bool, Type::Bool);
}
- std::string LogicalOr(Operation operation) {
+ Expression LogicalOr(Operation operation) {
return GenerateBinaryInfix(operation, "||", Type::Bool, Type::Bool, Type::Bool);
}
- std::string LogicalXor(Operation operation) {
+ Expression LogicalXor(Operation operation) {
return GenerateBinaryInfix(operation, "^^", Type::Bool, Type::Bool, Type::Bool);
}
- std::string LogicalNegate(Operation operation) {
- return GenerateUnary(operation, "!", Type::Bool, Type::Bool, false);
+ Expression LogicalNegate(Operation operation) {
+ return GenerateUnary(operation, "!", Type::Bool, Type::Bool);
}
- std::string LogicalPick2(Operation operation) {
- const std::string pair = VisitOperand(operation, 0, Type::Bool2);
- return fmt::format("{}[{}]", pair, VisitOperand(operation, 1, Type::Uint));
+ Expression LogicalPick2(Operation operation) {
+ return {fmt::format("{}[{}]", VisitOperand(operation, 0).AsBool2(),
+ VisitOperand(operation, 1).AsUint()),
+ Type::Bool};
}
- std::string LogicalAll2(Operation operation) {
+ Expression LogicalAnd2(Operation operation) {
return GenerateUnary(operation, "all", Type::Bool, Type::Bool2);
}
- std::string LogicalAny2(Operation operation) {
- return GenerateUnary(operation, "any", Type::Bool, Type::Bool2);
- }
-
template <bool with_nan>
- std::string GenerateHalfComparison(Operation operation, const std::string& compare_op) {
- const std::string comparison{GenerateBinaryCall(operation, compare_op, Type::Bool2,
- Type::HalfFloat, Type::HalfFloat)};
+ Expression GenerateHalfComparison(Operation operation, std::string_view compare_op) {
+ Expression comparison = GenerateBinaryCall(operation, compare_op, Type::Bool2,
+ Type::HalfFloat, Type::HalfFloat);
if constexpr (!with_nan) {
return comparison;
}
- return fmt::format("halfFloatNanComparison({}, {}, {})", comparison,
- VisitOperand(operation, 0, Type::HalfFloat),
- VisitOperand(operation, 1, Type::HalfFloat));
+ return {fmt::format("HalfFloatNanComparison({}, {}, {})", comparison.AsBool2(),
+ VisitOperand(operation, 0).AsHalfFloat(),
+ VisitOperand(operation, 1).AsHalfFloat()),
+ Type::Bool2};
}
template <bool with_nan>
- std::string Logical2HLessThan(Operation operation) {
+ Expression Logical2HLessThan(Operation operation) {
return GenerateHalfComparison<with_nan>(operation, "lessThan");
}
template <bool with_nan>
- std::string Logical2HEqual(Operation operation) {
+ Expression Logical2HEqual(Operation operation) {
return GenerateHalfComparison<with_nan>(operation, "equal");
}
template <bool with_nan>
- std::string Logical2HLessEqual(Operation operation) {
+ Expression Logical2HLessEqual(Operation operation) {
return GenerateHalfComparison<with_nan>(operation, "lessThanEqual");
}
template <bool with_nan>
- std::string Logical2HGreaterThan(Operation operation) {
+ Expression Logical2HGreaterThan(Operation operation) {
return GenerateHalfComparison<with_nan>(operation, "greaterThan");
}
template <bool with_nan>
- std::string Logical2HNotEqual(Operation operation) {
+ Expression Logical2HNotEqual(Operation operation) {
return GenerateHalfComparison<with_nan>(operation, "notEqual");
}
template <bool with_nan>
- std::string Logical2HGreaterEqual(Operation operation) {
+ Expression Logical2HGreaterEqual(Operation operation) {
return GenerateHalfComparison<with_nan>(operation, "greaterThanEqual");
}
- std::string Texture(Operation operation) {
+ Expression Texture(Operation operation) {
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
@@ -1340,10 +1698,10 @@ private:
if (meta->sampler.IsShadow()) {
expr = "vec4(" + expr + ')';
}
- return expr + GetSwizzle(meta->element);
+ return {expr + GetSwizzle(meta->element), Type::Float};
}
- std::string TextureLod(Operation operation) {
+ Expression TextureLod(Operation operation) {
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
@@ -1352,54 +1710,54 @@ private:
if (meta->sampler.IsShadow()) {
expr = "vec4(" + expr + ')';
}
- return expr + GetSwizzle(meta->element);
+ return {expr + GetSwizzle(meta->element), Type::Float};
}
- std::string TextureGather(Operation operation) {
+ Expression TextureGather(Operation operation) {
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int;
- return GenerateTexture(operation, "Gather",
- {TextureArgument{type, meta->component}, TextureAoffi{}}) +
- GetSwizzle(meta->element);
+ return {GenerateTexture(operation, "Gather",
+ {TextureArgument{type, meta->component}, TextureAoffi{}}) +
+ GetSwizzle(meta->element),
+ Type::Float};
}
- std::string TextureQueryDimensions(Operation operation) {
+ Expression TextureQueryDimensions(Operation operation) {
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
const std::string sampler = GetSampler(meta->sampler);
- const std::string lod = VisitOperand(operation, 0, Type::Int);
+ const std::string lod = VisitOperand(operation, 0).AsInt();
switch (meta->element) {
case 0:
case 1:
- return fmt::format("itof(int(textureSize({}, {}){}))", sampler, lod,
- GetSwizzle(meta->element));
- case 2:
- return "0";
+ return {fmt::format("textureSize({}, {}){}", sampler, lod, GetSwizzle(meta->element)),
+ Type::Int};
case 3:
- return fmt::format("itof(textureQueryLevels({}))", sampler);
+ return {fmt::format("textureQueryLevels({})", sampler), Type::Int};
}
UNREACHABLE();
- return "0";
+ return {"0", Type::Int};
}
- std::string TextureQueryLod(Operation operation) {
+ Expression TextureQueryLod(Operation operation) {
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
if (meta->element < 2) {
- return fmt::format("itof(int(({} * vec2(256)){}))",
- GenerateTexture(operation, "QueryLod", {}),
- GetSwizzle(meta->element));
+ return {fmt::format("int(({} * vec2(256)){})",
+ GenerateTexture(operation, "QueryLod", {}),
+ GetSwizzle(meta->element)),
+ Type::Int};
}
- return "0";
+ return {"0", Type::Int};
}
- std::string TexelFetch(Operation operation) {
- constexpr std::array<const char*, 4> constructors = {"int", "ivec2", "ivec3", "ivec4"};
+ Expression TexelFetch(Operation operation) {
+ constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"};
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
UNIMPLEMENTED_IF(meta->sampler.IsArray());
@@ -1412,57 +1770,117 @@ private:
expr += constructors.at(operation.GetOperandsCount() - 1);
expr += '(';
for (std::size_t i = 0; i < count; ++i) {
- expr += VisitOperand(operation, i, Type::Int);
+ expr += VisitOperand(operation, i).AsInt();
const std::size_t next = i + 1;
if (next == count)
expr += ')';
else if (next < count)
expr += ", ";
}
+
+ // Store a copy of the expression without the lod to be used with texture buffers
+ std::string expr_buffer = expr;
+
if (meta->lod) {
expr += ", ";
- expr += CastOperand(Visit(meta->lod), Type::Int);
+ expr += Visit(meta->lod).AsInt();
}
expr += ')';
+ expr += GetSwizzle(meta->element);
+
+ expr_buffer += ')';
+ expr_buffer += GetSwizzle(meta->element);
+
+ const std::string tmp{code.GenerateTemporary()};
+ EmitIfdefIsBuffer(meta->sampler);
+ code.AddLine("float {} = {};", tmp, expr_buffer);
+ code.AddLine("#else");
+ code.AddLine("float {} = {};", tmp, expr);
+ code.AddLine("#endif");
- return expr + GetSwizzle(meta->element);
+ return {tmp, Type::Float};
}
- std::string Branch(Operation operation) {
+ Expression ImageStore(Operation operation) {
+ const auto meta{std::get<MetaImage>(operation.GetMeta())};
+ code.AddLine("imageStore({}, {}, {});", GetImage(meta.image),
+ BuildIntegerCoordinates(operation), BuildImageValues(operation));
+ return {};
+ }
+
+ Expression AtomicImageAdd(Operation operation) {
+ return AtomicImage(operation, "imageAtomicAdd");
+ }
+
+ Expression AtomicImageMin(Operation operation) {
+ return AtomicImage(operation, "imageAtomicMin");
+ }
+
+ Expression AtomicImageMax(Operation operation) {
+ return AtomicImage(operation, "imageAtomicMax");
+ }
+ Expression AtomicImageAnd(Operation operation) {
+ return AtomicImage(operation, "imageAtomicAnd");
+ }
+
+ Expression AtomicImageOr(Operation operation) {
+ return AtomicImage(operation, "imageAtomicOr");
+ }
+
+ Expression AtomicImageXor(Operation operation) {
+ return AtomicImage(operation, "imageAtomicXor");
+ }
+
+ Expression AtomicImageExchange(Operation operation) {
+ return AtomicImage(operation, "imageAtomicExchange");
+ }
+
+ Expression Branch(Operation operation) {
const auto target = std::get_if<ImmediateNode>(&*operation[0]);
UNIMPLEMENTED_IF(!target);
- code.AddLine("jmp_to = 0x{:x}u;", target->GetValue());
+ code.AddLine("jmp_to = 0x{:X}U;", target->GetValue());
+ code.AddLine("break;");
+ return {};
+ }
+
+ Expression BranchIndirect(Operation operation) {
+ const std::string op_a = VisitOperand(operation, 0).AsUint();
+
+ code.AddLine("jmp_to = {};", op_a);
code.AddLine("break;");
return {};
}
- std::string PushFlowStack(Operation operation) {
+ Expression PushFlowStack(Operation operation) {
+ const auto stack = std::get<MetaStackClass>(operation.GetMeta());
const auto target = std::get_if<ImmediateNode>(&*operation[0]);
UNIMPLEMENTED_IF(!target);
- code.AddLine("flow_stack[flow_stack_top++] = 0x{:x}u;", target->GetValue());
+ code.AddLine("{}[{}++] = 0x{:X}U;", FlowStackName(stack), FlowStackTopName(stack),
+ target->GetValue());
return {};
}
- std::string PopFlowStack(Operation operation) {
- code.AddLine("jmp_to = flow_stack[--flow_stack_top];");
+ Expression PopFlowStack(Operation operation) {
+ const auto stack = std::get<MetaStackClass>(operation.GetMeta());
+ code.AddLine("jmp_to = {}[--{}];", FlowStackName(stack), FlowStackTopName(stack));
code.AddLine("break;");
return {};
}
- std::string Exit(Operation operation) {
- if (stage != ShaderStage::Fragment) {
+ Expression Exit(Operation operation) {
+ if (stage != ProgramType::Fragment) {
code.AddLine("return;");
return {};
}
const auto& used_registers = ir.GetRegisters();
- const auto SafeGetRegister = [&](u32 reg) -> std::string {
+ const auto SafeGetRegister = [&](u32 reg) -> Expression {
// TODO(Rodrigo): Replace with contains once C++20 releases
if (used_registers.find(reg) != used_registers.end()) {
- return GetRegister(reg);
+ return {GetRegister(reg), Type::Float};
}
- return "0.0f";
+ return {"0.0f", Type::Float};
};
UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Sample mask write is unimplemented");
@@ -1475,7 +1893,7 @@ private:
for (u32 component = 0; component < 4; ++component) {
if (header.ps.IsColorComponentOutputEnabled(render_target, component)) {
code.AddLine("FragColor{}[{}] = {};", render_target, component,
- SafeGetRegister(current_reg));
+ SafeGetRegister(current_reg).AsFloat());
++current_reg;
}
}
@@ -1484,14 +1902,14 @@ private:
if (header.ps.omap.depth) {
// The depth output is always 2 registers after the last color output, and current_reg
// already contains one past the last color register.
- code.AddLine("gl_FragDepth = {};", SafeGetRegister(current_reg + 1));
+ code.AddLine("gl_FragDepth = {};", SafeGetRegister(current_reg + 1).AsFloat());
}
code.AddLine("return;");
return {};
}
- std::string Discard(Operation operation) {
+ Expression Discard(Operation operation) {
// Enclose "discard" in a conditional, so that GLSL compilation does not complain
// about unexecuted instructions that may follow this.
code.AddLine("if (true) {{");
@@ -1502,8 +1920,8 @@ private:
return {};
}
- std::string EmitVertex(Operation operation) {
- ASSERT_MSG(stage == ShaderStage::Geometry,
+ Expression EmitVertex(Operation operation) {
+ ASSERT_MSG(stage == ProgramType::Geometry,
"EmitVertex is expected to be used in a geometry shader.");
// If a geometry shader is attached, it will always flip (it's the last stage before
@@ -1513,30 +1931,109 @@ private:
return {};
}
- std::string EndPrimitive(Operation operation) {
- ASSERT_MSG(stage == ShaderStage::Geometry,
+ Expression EndPrimitive(Operation operation) {
+ ASSERT_MSG(stage == ProgramType::Geometry,
"EndPrimitive is expected to be used in a geometry shader.");
code.AddLine("EndPrimitive();");
return {};
}
- std::string YNegate(Operation operation) {
+ Expression YNegate(Operation operation) {
// Config pack's third value is Y_NEGATE's state.
- return "uintBitsToFloat(config_pack[2])";
+ return {"config_pack[2]", Type::Uint};
}
template <u32 element>
- std::string LocalInvocationId(Operation) {
- return "utof(gl_LocalInvocationID"s + GetSwizzle(element) + ')';
+ Expression LocalInvocationId(Operation) {
+ return {"gl_LocalInvocationID"s + GetSwizzle(element), Type::Uint};
}
template <u32 element>
- std::string WorkGroupId(Operation) {
- return "utof(gl_WorkGroupID"s + GetSwizzle(element) + ')';
+ Expression WorkGroupId(Operation) {
+ return {"gl_WorkGroupID"s + GetSwizzle(element), Type::Uint};
}
- static constexpr OperationDecompilersArray operation_decompilers = {
+ Expression BallotThread(Operation operation) {
+ const std::string value = VisitOperand(operation, 0).AsBool();
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL, "Nvidia vote intrinsics are required by this shader");
+ // Stub on non-Nvidia devices by simulating all threads voting the same as the active
+ // one.
+ return {fmt::format("({} ? 0xFFFFFFFFU : 0U)", value), Type::Uint};
+ }
+ return {fmt::format("ballotThreadNV({})", value), Type::Uint};
+ }
+
+ Expression Vote(Operation operation, const char* func) {
+ const std::string value = VisitOperand(operation, 0).AsBool();
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL, "Nvidia vote intrinsics are required by this shader");
+ // Stub with a warp size of one.
+ return {value, Type::Bool};
+ }
+ return {fmt::format("{}({})", func, value), Type::Bool};
+ }
+
+ Expression VoteAll(Operation operation) {
+ return Vote(operation, "allThreadsNV");
+ }
+
+ Expression VoteAny(Operation operation) {
+ return Vote(operation, "anyThreadNV");
+ }
+
+ Expression VoteEqual(Operation operation) {
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL, "Nvidia vote intrinsics are required by this shader");
+ // We must return true here since a stub for a theoretical warp size of 1.
+ // This will always return an equal result across all votes.
+ return {"true", Type::Bool};
+ }
+ return Vote(operation, "allThreadsEqualNV");
+ }
+
+ template <const std::string_view& func>
+ Expression Shuffle(Operation operation) {
+ const std::string value = VisitOperand(operation, 0).AsFloat();
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL, "Nvidia shuffle intrinsics are required by this shader");
+ // On a "single-thread" device we are either on the same thread or out of bounds. Both
+ // cases return the passed value.
+ return {value, Type::Float};
+ }
+
+ const std::string index = VisitOperand(operation, 1).AsUint();
+ const std::string width = VisitOperand(operation, 2).AsUint();
+ return {fmt::format("{}({}, {}, {})", func, value, index, width), Type::Float};
+ }
+
+ template <const std::string_view& func>
+ Expression InRangeShuffle(Operation operation) {
+ const std::string index = VisitOperand(operation, 0).AsUint();
+ const std::string width = VisitOperand(operation, 1).AsUint();
+ if (!device.HasWarpIntrinsics()) {
+ // On a "single-thread" device we are only in bounds when the requested index is 0.
+ return {fmt::format("({} == 0U)", index), Type::Bool};
+ }
+
+ const std::string in_range = code.GenerateTemporary();
+ code.AddLine("bool {};", in_range);
+ code.AddLine("{}(0U, {}, {}, {});", func, index, width, in_range);
+ return {in_range, Type::Bool};
+ }
+
+ struct Func final {
+ Func() = delete;
+ ~Func() = delete;
+
+ static constexpr std::string_view ShuffleIndexed = "shuffleNV";
+ static constexpr std::string_view ShuffleUp = "shuffleUpNV";
+ static constexpr std::string_view ShuffleDown = "shuffleDownNV";
+ static constexpr std::string_view ShuffleButterfly = "shuffleXorNV";
+ };
+
+ static constexpr std::array operation_decompilers = {
&GLSLDecompiler::Assign,
&GLSLDecompiler::Select,
@@ -1548,6 +2045,8 @@ private:
&GLSLDecompiler::Negate<Type::Float>,
&GLSLDecompiler::Absolute<Type::Float>,
&GLSLDecompiler::FClamp,
+ &GLSLDecompiler::FCastHalf0,
+ &GLSLDecompiler::FCastHalf1,
&GLSLDecompiler::Min<Type::Float>,
&GLSLDecompiler::Max<Type::Float>,
&GLSLDecompiler::FCos,
@@ -1608,6 +2107,7 @@ private:
&GLSLDecompiler::Absolute<Type::HalfFloat>,
&GLSLDecompiler::HNegate,
&GLSLDecompiler::HClamp,
+ &GLSLDecompiler::HCastFloat,
&GLSLDecompiler::HUnpack,
&GLSLDecompiler::HMergeF32,
&GLSLDecompiler::HMergeH0,
@@ -1620,8 +2120,7 @@ private:
&GLSLDecompiler::LogicalXor,
&GLSLDecompiler::LogicalNegate,
&GLSLDecompiler::LogicalPick2,
- &GLSLDecompiler::LogicalAll2,
- &GLSLDecompiler::LogicalAny2,
+ &GLSLDecompiler::LogicalAnd2,
&GLSLDecompiler::LogicalLessThan<Type::Float>,
&GLSLDecompiler::LogicalEqual<Type::Float>,
@@ -1665,7 +2164,17 @@ private:
&GLSLDecompiler::TextureQueryLod,
&GLSLDecompiler::TexelFetch,
+ &GLSLDecompiler::ImageStore,
+ &GLSLDecompiler::AtomicImageAdd,
+ &GLSLDecompiler::AtomicImageMin,
+ &GLSLDecompiler::AtomicImageMax,
+ &GLSLDecompiler::AtomicImageAnd,
+ &GLSLDecompiler::AtomicImageOr,
+ &GLSLDecompiler::AtomicImageXor,
+ &GLSLDecompiler::AtomicImageExchange,
+
&GLSLDecompiler::Branch,
+ &GLSLDecompiler::BranchIndirect,
&GLSLDecompiler::PushFlowStack,
&GLSLDecompiler::PopFlowStack,
&GLSLDecompiler::Exit,
@@ -1681,7 +2190,23 @@ private:
&GLSLDecompiler::WorkGroupId<0>,
&GLSLDecompiler::WorkGroupId<1>,
&GLSLDecompiler::WorkGroupId<2>,
+
+ &GLSLDecompiler::BallotThread,
+ &GLSLDecompiler::VoteAll,
+ &GLSLDecompiler::VoteAny,
+ &GLSLDecompiler::VoteEqual,
+
+ &GLSLDecompiler::Shuffle<Func::ShuffleIndexed>,
+ &GLSLDecompiler::Shuffle<Func::ShuffleUp>,
+ &GLSLDecompiler::Shuffle<Func::ShuffleDown>,
+ &GLSLDecompiler::Shuffle<Func::ShuffleButterfly>,
+
+ &GLSLDecompiler::InRangeShuffle<Func::ShuffleIndexed>,
+ &GLSLDecompiler::InRangeShuffle<Func::ShuffleUp>,
+ &GLSLDecompiler::InRangeShuffle<Func::ShuffleDown>,
+ &GLSLDecompiler::InRangeShuffle<Func::ShuffleButterfly>,
};
+ static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
std::string GetRegister(u32 index) const {
return GetDeclarationWithSuffix(index, "gpr");
@@ -1720,9 +2245,13 @@ private:
return "lmem_" + suffix;
}
+ std::string GetSharedMemory() const {
+ return fmt::format("smem_{}", suffix);
+ }
+
std::string GetInternalFlag(InternalFlag flag) const {
- constexpr std::array<const char*, 4> InternalFlagNames = {"zero_flag", "sign_flag",
- "carry_flag", "overflow_flag"};
+ constexpr std::array InternalFlagNames = {"zero_flag", "sign_flag", "carry_flag",
+ "overflow_flag"};
const auto index = static_cast<u32>(flag);
ASSERT(index < static_cast<u32>(InternalFlag::Amount));
@@ -1733,12 +2262,20 @@ private:
return GetDeclarationWithSuffix(static_cast<u32>(sampler.GetIndex()), "sampler");
}
+ std::string GetImage(const Image& image) const {
+ return GetDeclarationWithSuffix(static_cast<u32>(image.GetIndex()), "image");
+ }
+
+ void EmitIfdefIsBuffer(const Sampler& sampler) {
+ code.AddLine("#ifdef SAMPLER_{}_IS_BUFFER", sampler.GetIndex());
+ }
+
std::string GetDeclarationWithSuffix(u32 index, const std::string& name) const {
return fmt::format("{}_{}_{}", name, index, suffix);
}
u32 GetNumPhysicalInputAttributes() const {
- return stage == ShaderStage::Vertex ? GetNumPhysicalAttributes() : GetNumPhysicalVaryings();
+ return IsVertexShader(stage) ? GetNumPhysicalAttributes() : GetNumPhysicalVaryings();
}
u32 GetNumPhysicalAttributes() const {
@@ -1751,7 +2288,7 @@ private:
const Device& device;
const ShaderIR& ir;
- const ShaderStage stage;
+ const ProgramType stage;
const std::string suffix;
const Header header;
@@ -1762,27 +2299,19 @@ private:
std::string GetCommonDeclarations() {
return fmt::format(
- "#define MAX_CONSTBUFFER_ELEMENTS {}\n"
"#define ftoi floatBitsToInt\n"
"#define ftou floatBitsToUint\n"
"#define itof intBitsToFloat\n"
"#define utof uintBitsToFloat\n\n"
- "float fromHalf2(vec2 pair) {{\n"
- " return utof(packHalf2x16(pair));\n"
- "}}\n\n"
- "vec2 toHalf2(float value) {{\n"
- " return unpackHalf2x16(ftou(value));\n"
- "}}\n\n"
- "bvec2 halfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {{\n"
+ "bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {{\n"
" bvec2 is_nan1 = isnan(pair1);\n"
" bvec2 is_nan2 = isnan(pair2);\n"
" return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || "
"is_nan2.y);\n"
- "}}\n",
- MAX_CONSTBUFFER_ELEMENTS);
+ "}}\n\n");
}
-ProgramResult Decompile(const Device& device, const ShaderIR& ir, Maxwell::ShaderStage stage,
+ProgramResult Decompile(const Device& device, const ShaderIR& ir, ProgramType stage,
const std::string& suffix) {
GLSLDecompiler decompiler(device, ir, stage, suffix);
decompiler.Decompile();
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index c1569e737..2ea02f5bf 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -12,14 +12,26 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/shader/shader_ir.h"
-namespace OpenGL {
-class Device;
-}
-
namespace VideoCommon::Shader {
class ShaderIR;
}
+namespace OpenGL {
+
+class Device;
+
+enum class ProgramType : u32 {
+ VertexA = 0,
+ VertexB = 1,
+ TessellationControl = 2,
+ TessellationEval = 3,
+ Geometry = 4,
+ Fragment = 5,
+ Compute = 6
+};
+
+} // namespace OpenGL
+
namespace OpenGL::GLShader {
struct ShaderEntries;
@@ -27,6 +39,7 @@ struct ShaderEntries;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using ProgramResult = std::pair<std::string, ShaderEntries>;
using SamplerEntry = VideoCommon::Shader::Sampler;
+using ImageEntry = VideoCommon::Shader::Image;
class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer {
public:
@@ -74,14 +87,16 @@ struct ShaderEntries {
std::vector<ConstBufferEntry> const_buffers;
std::vector<SamplerEntry> samplers;
std::vector<SamplerEntry> bindless_samplers;
+ std::vector<ImageEntry> images;
std::vector<GlobalMemoryEntry> global_memory_entries;
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
+ bool shader_viewport_layer_array{};
std::size_t shader_length{};
};
std::string GetCommonDeclarations();
ProgramResult Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
- Maxwell::ShaderStage stage, const std::string& suffix);
+ ProgramType stage, const std::string& suffix);
} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
index ee4a45ca2..f141c4e3b 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
@@ -34,11 +34,11 @@ enum class PrecompiledEntryKind : u32 {
Dump,
};
-constexpr u32 NativeVersion = 1;
+constexpr u32 NativeVersion = 4;
// Making sure sizes doesn't change by accident
-static_assert(sizeof(BaseBindings) == 12);
-static_assert(sizeof(ShaderDiskCacheUsage) == 24);
+static_assert(sizeof(BaseBindings) == 16);
+static_assert(sizeof(ShaderDiskCacheUsage) == 40);
namespace {
@@ -51,7 +51,7 @@ ShaderCacheVersionHash GetShaderCacheVersionHash() {
} // namespace
-ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, Maxwell::ShaderProgram program_type,
+ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type,
u32 program_code_size, u32 program_code_size_b,
ProgramCode program_code, ProgramCode program_code_b)
: unique_identifier{unique_identifier}, program_type{program_type},
@@ -332,11 +332,37 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn
static_cast<Tegra::Shader::TextureType>(type), is_array, is_shadow, is_bindless);
}
+ u32 images_count{};
+ if (!LoadObjectFromPrecompiled(images_count)) {
+ return {};
+ }
+ for (u32 i = 0; i < images_count; ++i) {
+ u64 offset{};
+ u64 index{};
+ u32 type{};
+ u8 is_bindless{};
+ u8 is_written{};
+ u8 is_read{};
+ u8 is_size_known{};
+ u32 size{};
+ if (!LoadObjectFromPrecompiled(offset) || !LoadObjectFromPrecompiled(index) ||
+ !LoadObjectFromPrecompiled(type) || !LoadObjectFromPrecompiled(is_bindless) ||
+ !LoadObjectFromPrecompiled(is_written) || !LoadObjectFromPrecompiled(is_read) ||
+ !LoadObjectFromPrecompiled(is_size_known) || !LoadObjectFromPrecompiled(size)) {
+ return {};
+ }
+ entry.entries.images.emplace_back(
+ static_cast<std::size_t>(offset), static_cast<std::size_t>(index),
+ static_cast<Tegra::Shader::ImageType>(type), is_bindless != 0, is_written != 0,
+ is_read != 0,
+ is_size_known ? std::make_optional(static_cast<Tegra::Shader::ImageAtomicSize>(size))
+ : std::nullopt);
+ }
+
u32 global_memory_count{};
if (!LoadObjectFromPrecompiled(global_memory_count)) {
return {};
}
-
for (u32 i = 0; i < global_memory_count; ++i) {
u32 cbuf_index{};
u32 cbuf_offset{};
@@ -356,11 +382,16 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn
}
}
+ bool shader_viewport_layer_array{};
+ if (!LoadObjectFromPrecompiled(shader_viewport_layer_array)) {
+ return {};
+ }
+ entry.entries.shader_viewport_layer_array = shader_viewport_layer_array;
+
u64 shader_length{};
if (!LoadObjectFromPrecompiled(shader_length)) {
return {};
}
-
entry.entries.shader_length = static_cast<std::size_t>(shader_length);
return entry;
@@ -400,6 +431,22 @@ bool ShaderDiskCacheOpenGL::SaveDecompiledFile(u64 unique_identifier, const std:
}
}
+ if (!SaveObjectToPrecompiled(static_cast<u32>(entries.images.size()))) {
+ return false;
+ }
+ for (const auto& image : entries.images) {
+ const u32 size = image.IsSizeKnown() ? static_cast<u32>(image.GetSize()) : 0U;
+ if (!SaveObjectToPrecompiled(static_cast<u64>(image.GetOffset())) ||
+ !SaveObjectToPrecompiled(static_cast<u64>(image.GetIndex())) ||
+ !SaveObjectToPrecompiled(static_cast<u32>(image.GetType())) ||
+ !SaveObjectToPrecompiled(static_cast<u8>(image.IsBindless() ? 1 : 0)) ||
+ !SaveObjectToPrecompiled(static_cast<u8>(image.IsWritten() ? 1 : 0)) ||
+ !SaveObjectToPrecompiled(static_cast<u8>(image.IsRead() ? 1 : 0)) ||
+ !SaveObjectToPrecompiled(image.IsSizeKnown()) || !SaveObjectToPrecompiled(size)) {
+ return false;
+ }
+ }
+
if (!SaveObjectToPrecompiled(static_cast<u32>(entries.global_memory_entries.size()))) {
return false;
}
@@ -417,6 +464,10 @@ bool ShaderDiskCacheOpenGL::SaveDecompiledFile(u64 unique_identifier, const std:
}
}
+ if (!SaveObjectToPrecompiled(entries.shader_viewport_layer_array)) {
+ return false;
+ }
+
if (!SaveObjectToPrecompiled(static_cast<u64>(entries.shader_length))) {
return false;
}
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h
index ecd72ba58..cc8bbd61e 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h
@@ -4,6 +4,7 @@
#pragma once
+#include <bitset>
#include <optional>
#include <string>
#include <tuple>
@@ -17,7 +18,6 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "core/file_sys/vfs_vector.h"
-#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_opengl/gl_shader_gen.h"
namespace Core {
@@ -30,22 +30,23 @@ class IOFile;
namespace OpenGL {
-using ProgramCode = std::vector<u64>;
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-
struct ShaderDiskCacheUsage;
struct ShaderDiskCacheDump;
+using ProgramCode = std::vector<u64>;
using ShaderDumpsMap = std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>;
+using TextureBufferUsage = std::bitset<64>;
/// Allocated bindings used by an OpenGL shader program
struct BaseBindings {
u32 cbuf{};
u32 gmem{};
u32 sampler{};
+ u32 image{};
bool operator==(const BaseBindings& rhs) const {
- return std::tie(cbuf, gmem, sampler) == std::tie(rhs.cbuf, rhs.gmem, rhs.sampler);
+ return std::tie(cbuf, gmem, sampler, image) ==
+ std::tie(rhs.cbuf, rhs.gmem, rhs.sampler, rhs.image);
}
bool operator!=(const BaseBindings& rhs) const {
@@ -53,15 +54,29 @@ struct BaseBindings {
}
};
-/// Describes how a shader is used
+/// Describes the different variants a single program can be compiled.
+struct ProgramVariant {
+ BaseBindings base_bindings;
+ GLenum primitive_mode{};
+ TextureBufferUsage texture_buffer_usage{};
+
+ bool operator==(const ProgramVariant& rhs) const {
+ return std::tie(base_bindings, primitive_mode, texture_buffer_usage) ==
+ std::tie(rhs.base_bindings, rhs.primitive_mode, rhs.texture_buffer_usage);
+ }
+
+ bool operator!=(const ProgramVariant& rhs) const {
+ return !operator==(rhs);
+ }
+};
+
+/// Describes how a shader is used.
struct ShaderDiskCacheUsage {
u64 unique_identifier{};
- BaseBindings bindings;
- GLenum primitive{};
+ ProgramVariant variant;
bool operator==(const ShaderDiskCacheUsage& rhs) const {
- return std::tie(unique_identifier, bindings, primitive) ==
- std::tie(rhs.unique_identifier, rhs.bindings, rhs.primitive);
+ return std::tie(unique_identifier, variant) == std::tie(rhs.unique_identifier, rhs.variant);
}
bool operator!=(const ShaderDiskCacheUsage& rhs) const {
@@ -76,7 +91,19 @@ namespace std {
template <>
struct hash<OpenGL::BaseBindings> {
std::size_t operator()(const OpenGL::BaseBindings& bindings) const noexcept {
- return bindings.cbuf | bindings.gmem << 8 | bindings.sampler << 16;
+ return static_cast<std::size_t>(bindings.cbuf) ^
+ (static_cast<std::size_t>(bindings.gmem) << 8) ^
+ (static_cast<std::size_t>(bindings.sampler) << 16) ^
+ (static_cast<std::size_t>(bindings.image) << 24);
+ }
+};
+
+template <>
+struct hash<OpenGL::ProgramVariant> {
+ std::size_t operator()(const OpenGL::ProgramVariant& variant) const noexcept {
+ return std::hash<OpenGL::BaseBindings>()(variant.base_bindings) ^
+ std::hash<OpenGL::TextureBufferUsage>()(variant.texture_buffer_usage) ^
+ (static_cast<std::size_t>(variant.primitive_mode) << 6);
}
};
@@ -84,7 +111,7 @@ template <>
struct hash<OpenGL::ShaderDiskCacheUsage> {
std::size_t operator()(const OpenGL::ShaderDiskCacheUsage& usage) const noexcept {
return static_cast<std::size_t>(usage.unique_identifier) ^
- std::hash<OpenGL::BaseBindings>()(usage.bindings) ^ usage.primitive << 16;
+ std::hash<OpenGL::ProgramVariant>()(usage.variant);
}
};
@@ -95,7 +122,7 @@ namespace OpenGL {
/// Describes a shader how it's used by the guest GPU
class ShaderDiskCacheRaw {
public:
- explicit ShaderDiskCacheRaw(u64 unique_identifier, Maxwell::ShaderProgram program_type,
+ explicit ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type,
u32 program_code_size, u32 program_code_size_b,
ProgramCode program_code, ProgramCode program_code_b);
ShaderDiskCacheRaw();
@@ -110,30 +137,13 @@ public:
}
bool HasProgramA() const {
- return program_type == Maxwell::ShaderProgram::VertexA;
+ return program_type == ProgramType::VertexA;
}
- Maxwell::ShaderProgram GetProgramType() const {
+ ProgramType GetProgramType() const {
return program_type;
}
- Maxwell::ShaderStage GetProgramStage() const {
- switch (program_type) {
- case Maxwell::ShaderProgram::VertexA:
- case Maxwell::ShaderProgram::VertexB:
- return Maxwell::ShaderStage::Vertex;
- case Maxwell::ShaderProgram::TesselationControl:
- return Maxwell::ShaderStage::TesselationControl;
- case Maxwell::ShaderProgram::TesselationEval:
- return Maxwell::ShaderStage::TesselationEval;
- case Maxwell::ShaderProgram::Geometry:
- return Maxwell::ShaderStage::Geometry;
- case Maxwell::ShaderProgram::Fragment:
- return Maxwell::ShaderStage::Fragment;
- }
- UNREACHABLE();
- }
-
const ProgramCode& GetProgramCode() const {
return program_code;
}
@@ -144,7 +154,7 @@ public:
private:
u64 unique_identifier{};
- Maxwell::ShaderProgram program_type{};
+ ProgramType program_type{};
u32 program_code_size{};
u32 program_code_size_b{};
@@ -275,26 +285,17 @@ private:
return LoadArrayFromPrecompiled(&object, 1);
}
- bool LoadObjectFromPrecompiled(bool& object) {
- u8 value;
- const bool read_ok = LoadArrayFromPrecompiled(&value, 1);
- if (!read_ok) {
- return false;
- }
-
- object = value != 0;
- return true;
- }
-
- // Core system
Core::System& system;
- // Stored transferable shaders
- std::map<u64, std::unordered_set<ShaderDiskCacheUsage>> transferable;
- // Stores whole precompiled cache which will be read from/saved to the precompiled cache file
+
+ // Stores whole precompiled cache which will be read from or saved to the precompiled chache
+ // file
FileSys::VectorVfsFile precompiled_cache_virtual_file;
// Stores the current offset of the precompiled cache file for IO purposes
std::size_t precompiled_cache_virtual_file_offset = 0;
+ // Stored transferable shaders
+ std::unordered_map<u64, std::unordered_set<ShaderDiskCacheUsage>> transferable;
+
// The cache has been loaded at boot
bool tried_to_load{};
};
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp
index 9148629ec..3a8d9e1da 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp
@@ -14,7 +14,8 @@ using Tegra::Engines::Maxwell3D;
using VideoCommon::Shader::ProgramCode;
using VideoCommon::Shader::ShaderIR;
-static constexpr u32 PROGRAM_OFFSET{10};
+static constexpr u32 PROGRAM_OFFSET = 10;
+static constexpr u32 COMPUTE_OFFSET = 0;
ProgramResult GenerateVertexShader(const Device& device, const ShaderSetup& setup) {
const std::string id = fmt::format("{:016x}", setup.program.unique_identifier);
@@ -29,17 +30,15 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform vs_config {
};
)";
- const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET);
- ProgramResult program =
- Decompile(device, program_ir, Maxwell3D::Regs::ShaderStage::Vertex, "vertex");
+ const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a);
+ const auto stage = setup.IsDualProgram() ? ProgramType::VertexA : ProgramType::VertexB;
+ ProgramResult program = Decompile(device, program_ir, stage, "vertex");
out += program.first;
if (setup.IsDualProgram()) {
- const ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET);
- ProgramResult program_b =
- Decompile(device, program_ir_b, Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b");
-
+ const ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET, setup.program.size_b);
+ ProgramResult program_b = Decompile(device, program_ir_b, ProgramType::VertexB, "vertex_b");
out += program_b.first;
}
@@ -80,9 +79,9 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform gs_config {
};
)";
- const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET);
- ProgramResult program =
- Decompile(device, program_ir, Maxwell3D::Regs::ShaderStage::Geometry, "geometry");
+
+ const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a);
+ ProgramResult program = Decompile(device, program_ir, ProgramType::Geometry, "geometry");
out += program.first;
out += R"(
@@ -115,10 +114,8 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config {
};
)";
- const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET);
- ProgramResult program =
- Decompile(device, program_ir, Maxwell3D::Regs::ShaderStage::Fragment, "fragment");
-
+ const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a);
+ ProgramResult program = Decompile(device, program_ir, ProgramType::Fragment, "fragment");
out += program.first;
out += R"(
@@ -130,4 +127,22 @@ void main() {
return {std::move(out), std::move(program.second)};
}
+ProgramResult GenerateComputeShader(const Device& device, const ShaderSetup& setup) {
+ const std::string id = fmt::format("{:016x}", setup.program.unique_identifier);
+
+ std::string out = "// Shader Unique Id: CS" + id + "\n\n";
+ out += GetCommonDeclarations();
+
+ const ShaderIR program_ir(setup.program.code, COMPUTE_OFFSET, setup.program.size_a);
+ ProgramResult program = Decompile(device, program_ir, ProgramType::Compute, "compute");
+ out += program.first;
+
+ out += R"(
+void main() {
+ execute_compute();
+}
+)";
+ return {std::move(out), std::move(program.second)};
+}
+
} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h
index 0536c8a03..3833e88ab 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.h
+++ b/src/video_core/renderer_opengl/gl_shader_gen.h
@@ -27,6 +27,8 @@ struct ShaderSetup {
ProgramCode code;
ProgramCode code_b; // Used for dual vertex shaders
u64 unique_identifier;
+ std::size_t size_a;
+ std::size_t size_b;
} program;
/// Used in scenarios where we have a dual vertex shaders
@@ -52,4 +54,7 @@ ProgramResult GenerateGeometryShader(const Device& device, const ShaderSetup& se
/// Generates the GLSL fragment shader program source code for the given FS program
ProgramResult GenerateFragmentShader(const Device& device, const ShaderSetup& setup);
+/// Generates the GLSL compute shader program source code for the given CS program
+ProgramResult GenerateComputeShader(const Device& device, const ShaderSetup& setup);
+
} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_util.cpp b/src/video_core/renderer_opengl/gl_shader_util.cpp
index 5f3fe067e..9e74eda0d 100644
--- a/src/video_core/renderer_opengl/gl_shader_util.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_util.cpp
@@ -10,21 +10,25 @@
namespace OpenGL::GLShader {
-GLuint LoadShader(const char* source, GLenum type) {
- const char* debug_type;
+namespace {
+const char* GetStageDebugName(GLenum type) {
switch (type) {
case GL_VERTEX_SHADER:
- debug_type = "vertex";
- break;
+ return "vertex";
case GL_GEOMETRY_SHADER:
- debug_type = "geometry";
- break;
+ return "geometry";
case GL_FRAGMENT_SHADER:
- debug_type = "fragment";
- break;
- default:
- UNREACHABLE();
+ return "fragment";
+ case GL_COMPUTE_SHADER:
+ return "compute";
}
+ UNIMPLEMENTED();
+ return "unknown";
+}
+} // Anonymous namespace
+
+GLuint LoadShader(const char* source, GLenum type) {
+ const char* debug_type = GetStageDebugName(type);
const GLuint shader_id = glCreateShader(type);
glShaderSource(shader_id, 1, &source, nullptr);
LOG_DEBUG(Render_OpenGL, "Compiling {} shader...", debug_type);
diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp
index d86e137ac..bf86b5a0b 100644
--- a/src/video_core/renderer_opengl/gl_state.cpp
+++ b/src/video_core/renderer_opengl/gl_state.cpp
@@ -6,14 +6,16 @@
#include <glad/glad.h>
#include "common/assert.h"
#include "common/logging/log.h"
+#include "common/microprofile.h"
#include "video_core/renderer_opengl/gl_state.h"
+MICROPROFILE_DEFINE(OpenGL_State, "OpenGL", "State Change", MP_RGB(192, 128, 128));
+
namespace OpenGL {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
OpenGLState OpenGLState::cur_state;
-bool OpenGLState::s_rgb_used;
namespace {
@@ -31,6 +33,25 @@ bool UpdateTie(T1 current_value, const T2 new_value) {
return changed;
}
+template <typename T>
+std::optional<std::pair<GLuint, GLsizei>> UpdateArray(T& current_values, const T& new_values) {
+ std::optional<std::size_t> first;
+ std::size_t last;
+ for (std::size_t i = 0; i < std::size(current_values); ++i) {
+ if (!UpdateValue(current_values[i], new_values[i])) {
+ continue;
+ }
+ if (!first) {
+ first = i;
+ }
+ last = i;
+ }
+ if (!first) {
+ return std::nullopt;
+ }
+ return std::make_pair(static_cast<GLuint>(*first), static_cast<GLsizei>(last - *first + 1));
+}
+
void Enable(GLenum cap, bool enable) {
if (enable) {
glEnable(cap);
@@ -131,10 +152,6 @@ OpenGLState::OpenGLState() {
logic_op.enabled = false;
logic_op.operation = GL_COPY;
- for (auto& texture_unit : texture_units) {
- texture_unit.Reset();
- }
-
draw.read_framebuffer = 0;
draw.draw_framebuffer = 0;
draw.vertex_array = 0;
@@ -162,6 +179,25 @@ OpenGLState::OpenGLState() {
alpha_test.ref = 0.0f;
}
+void OpenGLState::SetDefaultViewports() {
+ for (auto& item : viewports) {
+ item.x = 0;
+ item.y = 0;
+ item.width = 0;
+ item.height = 0;
+ item.depth_range_near = 0.0f;
+ item.depth_range_far = 1.0f;
+ item.scissor.enabled = false;
+ item.scissor.x = 0;
+ item.scissor.y = 0;
+ item.scissor.width = 0;
+ item.scissor.height = 0;
+ }
+
+ depth_clamp.far_plane = false;
+ depth_clamp.near_plane = false;
+}
+
void OpenGLState::ApplyDefaultState() {
glEnable(GL_BLEND);
glDisable(GL_FRAMEBUFFER_SRGB);
@@ -245,8 +281,6 @@ void OpenGLState::ApplySRgb() const {
return;
cur_state.framebuffer_srgb.enabled = framebuffer_srgb.enabled;
if (framebuffer_srgb.enabled) {
- // Track if sRGB is used
- s_rgb_used = true;
glEnable(GL_FRAMEBUFFER_SRGB);
} else {
glDisable(GL_FRAMEBUFFER_SRGB);
@@ -474,56 +508,25 @@ void OpenGLState::ApplyAlphaTest() const {
}
void OpenGLState::ApplyTextures() const {
- bool has_delta{};
- std::size_t first{};
- std::size_t last{};
- std::array<GLuint, Maxwell::NumTextureSamplers> textures;
-
- for (std::size_t i = 0; i < std::size(texture_units); ++i) {
- const auto& texture_unit = texture_units[i];
- auto& cur_state_texture_unit = cur_state.texture_units[i];
- textures[i] = texture_unit.texture;
- if (cur_state_texture_unit.texture == textures[i]) {
- continue;
- }
- cur_state_texture_unit.texture = textures[i];
- if (!has_delta) {
- first = i;
- has_delta = true;
- }
- last = i;
- }
- if (has_delta) {
- glBindTextures(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1),
- textures.data() + first);
+ if (const auto update = UpdateArray(cur_state.textures, textures)) {
+ glBindTextures(update->first, update->second, textures.data() + update->first);
}
}
void OpenGLState::ApplySamplers() const {
- bool has_delta{};
- std::size_t first{};
- std::size_t last{};
- std::array<GLuint, Maxwell::NumTextureSamplers> samplers;
-
- for (std::size_t i = 0; i < std::size(samplers); ++i) {
- samplers[i] = texture_units[i].sampler;
- if (cur_state.texture_units[i].sampler == texture_units[i].sampler) {
- continue;
- }
- cur_state.texture_units[i].sampler = texture_units[i].sampler;
- if (!has_delta) {
- first = i;
- has_delta = true;
- }
- last = i;
+ if (const auto update = UpdateArray(cur_state.samplers, samplers)) {
+ glBindSamplers(update->first, update->second, samplers.data() + update->first);
}
- if (has_delta) {
- glBindSamplers(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1),
- samplers.data() + first);
+}
+
+void OpenGLState::ApplyImages() const {
+ if (const auto update = UpdateArray(cur_state.images, images)) {
+ glBindImageTextures(update->first, update->second, images.data() + update->first);
}
}
-void OpenGLState::Apply() const {
+void OpenGLState::Apply() {
+ MICROPROFILE_SCOPE(OpenGL_State);
ApplyFramebufferState();
ApplyVertexArrayState();
ApplyShaderProgram();
@@ -532,19 +535,32 @@ void OpenGLState::Apply() const {
ApplyPointSize();
ApplyFragmentColorClamp();
ApplyMultisample();
+ if (dirty.color_mask) {
+ ApplyColorMask();
+ dirty.color_mask = false;
+ }
ApplyDepthClamp();
- ApplyColorMask();
ApplyViewport();
- ApplyStencilTest();
+ if (dirty.stencil_state) {
+ ApplyStencilTest();
+ dirty.stencil_state = false;
+ }
ApplySRgb();
ApplyCulling();
ApplyDepth();
ApplyPrimitiveRestart();
- ApplyBlending();
+ if (dirty.blend_state) {
+ ApplyBlending();
+ dirty.blend_state = false;
+ }
ApplyLogicOp();
ApplyTextures();
ApplySamplers();
- ApplyPolygonOffset();
+ ApplyImages();
+ if (dirty.polygon_offset) {
+ ApplyPolygonOffset();
+ dirty.polygon_offset = false;
+ }
ApplyAlphaTest();
}
@@ -571,18 +587,18 @@ void OpenGLState::EmulateViewportWithScissor() {
}
OpenGLState& OpenGLState::UnbindTexture(GLuint handle) {
- for (auto& unit : texture_units) {
- if (unit.texture == handle) {
- unit.Unbind();
+ for (auto& texture : textures) {
+ if (texture == handle) {
+ texture = 0;
}
}
return *this;
}
OpenGLState& OpenGLState::ResetSampler(GLuint handle) {
- for (auto& unit : texture_units) {
- if (unit.sampler == handle) {
- unit.sampler = 0;
+ for (auto& sampler : samplers) {
+ if (sampler == handle) {
+ sampler = 0;
}
}
return *this;
diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h
index b0140495d..c358d3b38 100644
--- a/src/video_core/renderer_opengl/gl_state.h
+++ b/src/video_core/renderer_opengl/gl_state.h
@@ -118,21 +118,9 @@ public:
GLenum operation;
} logic_op;
- // 3 texture units - one for each that is used in PICA fragment shader emulation
- struct TextureUnit {
- GLuint texture; // GL_TEXTURE_BINDING_2D
- GLuint sampler; // GL_SAMPLER_BINDING
-
- void Unbind() {
- texture = 0;
- }
-
- void Reset() {
- Unbind();
- sampler = 0;
- }
- };
- std::array<TextureUnit, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> texture_units;
+ std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> textures{};
+ std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> samplers{};
+ std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumImages> images{};
struct {
GLuint read_framebuffer; // GL_READ_FRAMEBUFFER_BINDING
@@ -187,16 +175,9 @@ public:
return cur_state;
}
- static bool GetsRGBUsed() {
- return s_rgb_used;
- }
-
- static void ClearsRGBUsed() {
- s_rgb_used = false;
- }
-
+ void SetDefaultViewports();
/// Apply this state as the current OpenGL state
- void Apply() const;
+ void Apply();
void ApplyFramebufferState() const;
void ApplyVertexArrayState() const;
@@ -219,6 +200,7 @@ public:
void ApplyLogicOp() const;
void ApplyTextures() const;
void ApplySamplers() const;
+ void ApplyImages() const;
void ApplyDepthClamp() const;
void ApplyPolygonOffset() const;
void ApplyAlphaTest() const;
@@ -237,11 +219,39 @@ public:
/// Viewport does not affects glClearBuffer so emulate viewport using scissor test
void EmulateViewportWithScissor();
+ void MarkDirtyBlendState() {
+ dirty.blend_state = true;
+ }
+
+ void MarkDirtyStencilState() {
+ dirty.stencil_state = true;
+ }
+
+ void MarkDirtyPolygonOffset() {
+ dirty.polygon_offset = true;
+ }
+
+ void MarkDirtyColorMask() {
+ dirty.color_mask = true;
+ }
+
+ void AllDirty() {
+ dirty.blend_state = true;
+ dirty.stencil_state = true;
+ dirty.polygon_offset = true;
+ dirty.color_mask = true;
+ }
+
private:
static OpenGLState cur_state;
- // Workaround for sRGB problems caused by QT not supporting srgb output
- static bool s_rgb_used;
+ struct {
+ bool blend_state;
+ bool stencil_state;
+ bool viewport_state;
+ bool polygon_offset;
+ bool color_mask;
+ } dirty{};
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_stream_buffer.cpp b/src/video_core/renderer_opengl/gl_stream_buffer.cpp
index d0b14b3f6..35ba334e4 100644
--- a/src/video_core/renderer_opengl/gl_stream_buffer.cpp
+++ b/src/video_core/renderer_opengl/gl_stream_buffer.cpp
@@ -15,7 +15,8 @@ MICROPROFILE_DEFINE(OpenGL_StreamBuffer, "OpenGL", "Stream Buffer Orphaning",
namespace OpenGL {
-OGLStreamBuffer::OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent)
+OGLStreamBuffer::OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent,
+ bool use_persistent)
: buffer_size(size) {
gl_buffer.Create();
@@ -29,7 +30,7 @@ OGLStreamBuffer::OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool p
allocate_size *= 2;
}
- if (GLAD_GL_ARB_buffer_storage) {
+ if (use_persistent) {
persistent = true;
coherent = prefer_coherent;
const GLbitfield flags =
diff --git a/src/video_core/renderer_opengl/gl_stream_buffer.h b/src/video_core/renderer_opengl/gl_stream_buffer.h
index 3d18ecb4d..f8383cbd4 100644
--- a/src/video_core/renderer_opengl/gl_stream_buffer.h
+++ b/src/video_core/renderer_opengl/gl_stream_buffer.h
@@ -13,7 +13,8 @@ namespace OpenGL {
class OGLStreamBuffer : private NonCopyable {
public:
- explicit OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent = false);
+ explicit OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent = false,
+ bool use_persistent = true);
~OGLStreamBuffer();
GLuint GetHandle() const;
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
new file mode 100644
index 000000000..4f135fe03
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -0,0 +1,624 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/common_types.h"
+#include "common/microprofile.h"
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "video_core/morton.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/renderer_opengl/gl_state.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
+#include "video_core/renderer_opengl/utils.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/texture_cache.h"
+#include "video_core/textures/convert.h"
+#include "video_core/textures/texture.h"
+
+namespace OpenGL {
+
+using Tegra::Texture::SwizzleSource;
+using VideoCore::MortonSwizzleMode;
+
+using VideoCore::Surface::ComponentType;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::SurfaceCompression;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceType;
+
+MICROPROFILE_DEFINE(OpenGL_Texture_Upload, "OpenGL", "Texture Upload", MP_RGB(128, 192, 128));
+MICROPROFILE_DEFINE(OpenGL_Texture_Download, "OpenGL", "Texture Download", MP_RGB(128, 192, 128));
+MICROPROFILE_DEFINE(OpenGL_Texture_Buffer_Copy, "OpenGL", "Texture Buffer Copy",
+ MP_RGB(128, 192, 128));
+
+namespace {
+
+struct FormatTuple {
+ GLint internal_format;
+ GLenum format;
+ GLenum type;
+ ComponentType component_type;
+ bool compressed;
+};
+
+constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm, false}, // ABGR8U
+ {GL_RGBA8, GL_RGBA, GL_BYTE, ComponentType::SNorm, false}, // ABGR8S
+ {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // ABGR8UI
+ {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, ComponentType::UNorm, false}, // B5G6R5U
+ {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, ComponentType::UNorm,
+ false}, // A2B10G10R10U
+ {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, ComponentType::UNorm, false}, // A1B5G5R5U
+ {GL_R8, GL_RED, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // R8U
+ {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // R8UI
+ {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, ComponentType::Float, false}, // RGBA16F
+ {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RGBA16U
+ {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RGBA16UI
+ {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, ComponentType::Float,
+ false}, // R11FG11FB10F
+ {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RGBA32UI
+ {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT1
+ {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT23
+ {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT45
+ {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXN1
+ {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXN2UNORM
+ {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM
+ {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // BC7U
+ {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
+ true}, // BC6H_UF16
+ {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
+ true}, // BC6H_SF16
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4
+ {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
+ {GL_RGBA32F, GL_RGBA, GL_FLOAT, ComponentType::Float, false}, // RGBA32F
+ {GL_RG32F, GL_RG, GL_FLOAT, ComponentType::Float, false}, // RG32F
+ {GL_R32F, GL_RED, GL_FLOAT, ComponentType::Float, false}, // R32F
+ {GL_R16F, GL_RED, GL_HALF_FLOAT, ComponentType::Float, false}, // R16F
+ {GL_R16, GL_RED, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // R16U
+ {GL_R16_SNORM, GL_RED, GL_SHORT, ComponentType::SNorm, false}, // R16S
+ {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // R16UI
+ {GL_R16I, GL_RED_INTEGER, GL_SHORT, ComponentType::SInt, false}, // R16I
+ {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RG16
+ {GL_RG16F, GL_RG, GL_HALF_FLOAT, ComponentType::Float, false}, // RG16F
+ {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RG16UI
+ {GL_RG16I, GL_RG_INTEGER, GL_SHORT, ComponentType::SInt, false}, // RG16I
+ {GL_RG16_SNORM, GL_RG, GL_SHORT, ComponentType::SNorm, false}, // RG16S
+ {GL_RGB32F, GL_RGB, GL_FLOAT, ComponentType::Float, false}, // RGB32F
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm,
+ false}, // RGBA8_SRGB
+ {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // RG8U
+ {GL_RG8, GL_RG, GL_BYTE, ComponentType::SNorm, false}, // RG8S
+ {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RG32UI
+ {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // R32UI
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4
+ {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
+ // Compressed sRGB formats
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT1_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT23_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT45_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // BC7U_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4_SRGB
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB
+
+ // Depth formats
+ {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F
+ {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, ComponentType::UNorm,
+ false}, // Z16
+
+ // DepthStencil formats
+ {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
+ false}, // Z24S8
+ {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
+ false}, // S8Z24
+ {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV,
+ ComponentType::Float, false}, // Z32FS8
+}};
+
+const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
+ ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
+ const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]};
+ return format;
+}
+
+GLenum GetTextureTarget(const SurfaceTarget& target) {
+ switch (target) {
+ case SurfaceTarget::TextureBuffer:
+ return GL_TEXTURE_BUFFER;
+ case SurfaceTarget::Texture1D:
+ return GL_TEXTURE_1D;
+ case SurfaceTarget::Texture2D:
+ return GL_TEXTURE_2D;
+ case SurfaceTarget::Texture3D:
+ return GL_TEXTURE_3D;
+ case SurfaceTarget::Texture1DArray:
+ return GL_TEXTURE_1D_ARRAY;
+ case SurfaceTarget::Texture2DArray:
+ return GL_TEXTURE_2D_ARRAY;
+ case SurfaceTarget::TextureCubemap:
+ return GL_TEXTURE_CUBE_MAP;
+ case SurfaceTarget::TextureCubeArray:
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ }
+ UNREACHABLE();
+ return {};
+}
+
+GLint GetSwizzleSource(SwizzleSource source) {
+ switch (source) {
+ case SwizzleSource::Zero:
+ return GL_ZERO;
+ case SwizzleSource::R:
+ return GL_RED;
+ case SwizzleSource::G:
+ return GL_GREEN;
+ case SwizzleSource::B:
+ return GL_BLUE;
+ case SwizzleSource::A:
+ return GL_ALPHA;
+ case SwizzleSource::OneInt:
+ case SwizzleSource::OneFloat:
+ return GL_ONE;
+ }
+ UNREACHABLE();
+ return GL_NONE;
+}
+
+void ApplyTextureDefaults(const SurfaceParams& params, GLuint texture) {
+ if (params.IsBuffer()) {
+ return;
+ }
+ glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTextureParameteri(texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTextureParameteri(texture, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTextureParameteri(texture, GL_TEXTURE_MAX_LEVEL, params.num_levels - 1);
+ if (params.num_levels == 1) {
+ glTextureParameterf(texture, GL_TEXTURE_LOD_BIAS, 1000.0f);
+ }
+}
+
+OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum internal_format,
+ OGLBuffer& texture_buffer) {
+ OGLTexture texture;
+ texture.Create(target);
+
+ switch (params.target) {
+ case SurfaceTarget::Texture1D:
+ glTextureStorage1D(texture.handle, params.emulated_levels, internal_format, params.width);
+ break;
+ case SurfaceTarget::TextureBuffer:
+ texture_buffer.Create();
+ glNamedBufferStorage(texture_buffer.handle, params.width * params.GetBytesPerPixel(),
+ nullptr, GL_DYNAMIC_STORAGE_BIT);
+ glTextureBuffer(texture.handle, internal_format, texture_buffer.handle);
+ break;
+ case SurfaceTarget::Texture2D:
+ case SurfaceTarget::TextureCubemap:
+ glTextureStorage2D(texture.handle, params.emulated_levels, internal_format, params.width,
+ params.height);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glTextureStorage3D(texture.handle, params.emulated_levels, internal_format, params.width,
+ params.height, params.depth);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ ApplyTextureDefaults(params, texture.handle);
+
+ return texture;
+}
+
+} // Anonymous namespace
+
+CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params)
+ : VideoCommon::SurfaceBase<View>(gpu_addr, params) {
+ const auto& tuple{GetFormatTuple(params.pixel_format, params.component_type)};
+ internal_format = tuple.internal_format;
+ format = tuple.format;
+ type = tuple.type;
+ is_compressed = tuple.compressed;
+ target = GetTextureTarget(params.target);
+ texture = CreateTexture(params, target, internal_format, texture_buffer);
+ DecorateSurfaceName();
+ main_view = CreateViewInner(
+ ViewParams(params.target, 0, params.is_layered ? params.depth : 1, 0, params.num_levels),
+ true);
+}
+
+CachedSurface::~CachedSurface() = default;
+
+void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
+ MICROPROFILE_SCOPE(OpenGL_Texture_Download);
+
+ SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); });
+
+ for (u32 level = 0; level < params.emulated_levels; ++level) {
+ glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
+ glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
+ const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level);
+ if (is_compressed) {
+ glGetCompressedTextureImage(texture.handle, level,
+ static_cast<GLsizei>(params.GetHostMipmapSize(level)),
+ staging_buffer.data() + mip_offset);
+ } else {
+ glGetTextureImage(texture.handle, level, format, type,
+ static_cast<GLsizei>(params.GetHostMipmapSize(level)),
+ staging_buffer.data() + mip_offset);
+ }
+ }
+}
+
+void CachedSurface::UploadTexture(const std::vector<u8>& staging_buffer) {
+ MICROPROFILE_SCOPE(OpenGL_Texture_Upload);
+ SCOPE_EXIT({ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); });
+ for (u32 level = 0; level < params.emulated_levels; ++level) {
+ UploadTextureMipmap(level, staging_buffer);
+ }
+}
+
+void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) {
+ glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
+
+ auto compression_type = params.GetCompressionType();
+
+ const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
+ ? params.GetConvertedMipmapOffset(level)
+ : params.GetHostMipmapLevelOffset(level);
+ const u8* buffer{staging_buffer.data() + mip_offset};
+ if (is_compressed) {
+ const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
+ switch (params.target) {
+ case SurfaceTarget::Texture2D:
+ glCompressedTextureSubImage2D(texture.handle, level, 0, 0,
+ static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)),
+ internal_format, image_size, buffer);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glCompressedTextureSubImage3D(texture.handle, level, 0, 0, 0,
+ static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)),
+ static_cast<GLsizei>(params.GetMipDepth(level)),
+ internal_format, image_size, buffer);
+ break;
+ case SurfaceTarget::TextureCubemap: {
+ const std::size_t layer_size{params.GetHostLayerSize(level)};
+ for (std::size_t face = 0; face < params.depth; ++face) {
+ glCompressedTextureSubImage3D(texture.handle, level, 0, 0, static_cast<GLint>(face),
+ static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)), 1,
+ internal_format, static_cast<GLsizei>(layer_size),
+ buffer);
+ buffer += layer_size;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (params.target) {
+ case SurfaceTarget::Texture1D:
+ glTextureSubImage1D(texture.handle, level, 0, params.GetMipWidth(level), format, type,
+ buffer);
+ break;
+ case SurfaceTarget::TextureBuffer:
+ ASSERT(level == 0);
+ glNamedBufferSubData(texture_buffer.handle, 0,
+ params.GetMipWidth(level) * params.GetBytesPerPixel(), buffer);
+ break;
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2D:
+ glTextureSubImage2D(texture.handle, level, 0, 0, params.GetMipWidth(level),
+ params.GetMipHeight(level), format, type, buffer);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glTextureSubImage3D(
+ texture.handle, level, 0, 0, 0, static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)),
+ static_cast<GLsizei>(params.GetMipDepth(level)), format, type, buffer);
+ break;
+ case SurfaceTarget::TextureCubemap:
+ for (std::size_t face = 0; face < params.depth; ++face) {
+ glTextureSubImage3D(texture.handle, level, 0, 0, static_cast<GLint>(face),
+ params.GetMipWidth(level), params.GetMipHeight(level), 1,
+ format, type, buffer);
+ buffer += params.GetHostLayerSize(level);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void CachedSurface::DecorateSurfaceName() {
+ LabelGLObject(GL_TEXTURE, texture.handle, GetGpuAddr(), params.TargetName());
+}
+
+void CachedSurfaceView::DecorateViewName(GPUVAddr gpu_addr, std::string prefix) {
+ LabelGLObject(GL_TEXTURE, texture_view.handle, gpu_addr, prefix);
+}
+
+View CachedSurface::CreateView(const ViewParams& view_key) {
+ return CreateViewInner(view_key, false);
+}
+
+View CachedSurface::CreateViewInner(const ViewParams& view_key, const bool is_proxy) {
+ auto view = std::make_shared<CachedSurfaceView>(*this, view_key, is_proxy);
+ views[view_key] = view;
+ if (!is_proxy)
+ view->DecorateViewName(gpu_addr, params.TargetName() + "V:" + std::to_string(view_count++));
+ return view;
+}
+
+CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& params,
+ const bool is_proxy)
+ : VideoCommon::ViewBase(params), surface{surface}, is_proxy{is_proxy} {
+ target = GetTextureTarget(params.target);
+ if (!is_proxy) {
+ texture_view = CreateTextureView();
+ }
+ swizzle = EncodeSwizzle(SwizzleSource::R, SwizzleSource::G, SwizzleSource::B, SwizzleSource::A);
+}
+
+CachedSurfaceView::~CachedSurfaceView() = default;
+
+void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
+ ASSERT(params.num_layers == 1 && params.num_levels == 1);
+
+ const auto& owner_params = surface.GetSurfaceParams();
+
+ switch (owner_params.target) {
+ case SurfaceTarget::Texture1D:
+ glFramebufferTexture1D(target, attachment, surface.GetTarget(), surface.GetTexture(),
+ params.base_level);
+ break;
+ case SurfaceTarget::Texture2D:
+ glFramebufferTexture2D(target, attachment, surface.GetTarget(), surface.GetTexture(),
+ params.base_level);
+ break;
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubemap:
+ case SurfaceTarget::TextureCubeArray:
+ glFramebufferTextureLayer(target, attachment, surface.GetTexture(), params.base_level,
+ params.base_layer);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void CachedSurfaceView::ApplySwizzle(SwizzleSource x_source, SwizzleSource y_source,
+ SwizzleSource z_source, SwizzleSource w_source) {
+ u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
+ if (new_swizzle == swizzle)
+ return;
+ swizzle = new_swizzle;
+ const std::array<GLint, 4> gl_swizzle = {GetSwizzleSource(x_source), GetSwizzleSource(y_source),
+ GetSwizzleSource(z_source),
+ GetSwizzleSource(w_source)};
+ const GLuint handle = GetTexture();
+ glTextureParameteriv(handle, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle.data());
+}
+
+OGLTextureView CachedSurfaceView::CreateTextureView() const {
+ const auto& owner_params = surface.GetSurfaceParams();
+ OGLTextureView texture_view;
+ texture_view.Create();
+
+ const GLuint handle{texture_view.handle};
+ const FormatTuple& tuple{
+ GetFormatTuple(owner_params.pixel_format, owner_params.component_type)};
+
+ glTextureView(handle, target, surface.texture.handle, tuple.internal_format, params.base_level,
+ params.num_levels, params.base_layer, params.num_layers);
+
+ ApplyTextureDefaults(owner_params, handle);
+
+ return texture_view;
+}
+
+TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
+ VideoCore::RasterizerInterface& rasterizer,
+ const Device& device)
+ : TextureCacheBase{system, rasterizer} {
+ src_framebuffer.Create();
+ dst_framebuffer.Create();
+}
+
+TextureCacheOpenGL::~TextureCacheOpenGL() = default;
+
+Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
+ return std::make_shared<CachedSurface>(gpu_addr, params);
+}
+
+void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface,
+ const VideoCommon::CopyParams& copy_params) {
+ const auto& src_params = src_surface->GetSurfaceParams();
+ const auto& dst_params = dst_surface->GetSurfaceParams();
+ if (src_params.type != dst_params.type) {
+ // A fallback is needed
+ return;
+ }
+ const auto src_handle = src_surface->GetTexture();
+ const auto src_target = src_surface->GetTarget();
+ const auto dst_handle = dst_surface->GetTexture();
+ const auto dst_target = dst_surface->GetTarget();
+ glCopyImageSubData(src_handle, src_target, copy_params.source_level, copy_params.source_x,
+ copy_params.source_y, copy_params.source_z, dst_handle, dst_target,
+ copy_params.dest_level, copy_params.dest_x, copy_params.dest_y,
+ copy_params.dest_z, copy_params.width, copy_params.height,
+ copy_params.depth);
+}
+
+void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
+ const auto& src_params{src_view->GetSurfaceParams()};
+ const auto& dst_params{dst_view->GetSurfaceParams()};
+
+ OpenGLState prev_state{OpenGLState::GetCurState()};
+ SCOPE_EXIT({
+ prev_state.AllDirty();
+ prev_state.Apply();
+ });
+
+ OpenGLState state;
+ state.draw.read_framebuffer = src_framebuffer.handle;
+ state.draw.draw_framebuffer = dst_framebuffer.handle;
+ state.AllDirty();
+ state.Apply();
+
+ u32 buffers{};
+
+ UNIMPLEMENTED_IF(src_params.target == SurfaceTarget::Texture3D);
+ UNIMPLEMENTED_IF(dst_params.target == SurfaceTarget::Texture3D);
+
+ if (src_params.type == SurfaceType::ColorTexture) {
+ src_view->Attach(GL_COLOR_ATTACHMENT0, GL_READ_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
+ 0);
+
+ dst_view->Attach(GL_COLOR_ATTACHMENT0, GL_DRAW_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
+ 0);
+
+ buffers = GL_COLOR_BUFFER_BIT;
+ } else if (src_params.type == SurfaceType::Depth) {
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ src_view->Attach(GL_DEPTH_ATTACHMENT, GL_READ_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
+
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ dst_view->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
+
+ buffers = GL_DEPTH_BUFFER_BIT;
+ } else if (src_params.type == SurfaceType::DepthStencil) {
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ src_view->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_READ_FRAMEBUFFER);
+
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ dst_view->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_DRAW_FRAMEBUFFER);
+
+ buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
+ }
+
+ const Common::Rectangle<u32>& src_rect = copy_config.src_rect;
+ const Common::Rectangle<u32>& dst_rect = copy_config.dst_rect;
+ const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
+
+ glBlitFramebuffer(src_rect.left, src_rect.top, src_rect.right, src_rect.bottom, dst_rect.left,
+ dst_rect.top, dst_rect.right, dst_rect.bottom, buffers,
+ is_linear && (buffers == GL_COLOR_BUFFER_BIT) ? GL_LINEAR : GL_NEAREST);
+}
+
+void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface) {
+ MICROPROFILE_SCOPE(OpenGL_Texture_Buffer_Copy);
+ const auto& src_params = src_surface->GetSurfaceParams();
+ const auto& dst_params = dst_surface->GetSurfaceParams();
+ UNIMPLEMENTED_IF(src_params.num_levels > 1 || dst_params.num_levels > 1);
+
+ const auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type);
+ const auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type);
+
+ const std::size_t source_size = src_surface->GetHostSizeInBytes();
+ const std::size_t dest_size = dst_surface->GetHostSizeInBytes();
+
+ const std::size_t buffer_size = std::max(source_size, dest_size);
+
+ GLuint copy_pbo_handle = FetchPBO(buffer_size);
+
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
+
+ if (source_format.compressed) {
+ glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size),
+ nullptr);
+ } else {
+ glGetTextureImage(src_surface->GetTexture(), 0, source_format.format, source_format.type,
+ static_cast<GLsizei>(source_size), nullptr);
+ }
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, copy_pbo_handle);
+
+ const GLsizei width = static_cast<GLsizei>(dst_params.width);
+ const GLsizei height = static_cast<GLsizei>(dst_params.height);
+ const GLsizei depth = static_cast<GLsizei>(dst_params.depth);
+ if (dest_format.compressed) {
+ LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!");
+ UNREACHABLE();
+ } else {
+ switch (dst_params.target) {
+ case SurfaceTarget::Texture1D:
+ glTextureSubImage1D(dst_surface->GetTexture(), 0, 0, width, dest_format.format,
+ dest_format.type, nullptr);
+ break;
+ case SurfaceTarget::Texture2D:
+ glTextureSubImage2D(dst_surface->GetTexture(), 0, 0, 0, width, height,
+ dest_format.format, dest_format.type, nullptr);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glTextureSubImage3D(dst_surface->GetTexture(), 0, 0, 0, 0, width, height, depth,
+ dest_format.format, dest_format.type, nullptr);
+ break;
+ case SurfaceTarget::TextureCubemap:
+ glTextureSubImage3D(dst_surface->GetTexture(), 0, 0, 0, 0, width, height, depth,
+ dest_format.format, dest_format.type, nullptr);
+ break;
+ default:
+ LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
+ static_cast<u32>(dst_params.target));
+ UNREACHABLE();
+ }
+ }
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+
+ glTextureBarrier();
+}
+
+GLuint TextureCacheOpenGL::FetchPBO(std::size_t buffer_size) {
+ ASSERT_OR_EXECUTE(buffer_size > 0, { return 0; });
+ const u32 l2 = Common::Log2Ceil64(static_cast<u64>(buffer_size));
+ OGLBuffer& cp = copy_pbo_cache[l2];
+ if (cp.handle == 0) {
+ const std::size_t ceil_size = 1ULL << l2;
+ cp.Create();
+ cp.MakeStreamCopy(ceil_size);
+ }
+ return cp.handle;
+}
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
new file mode 100644
index 000000000..8e13ab38b
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -0,0 +1,147 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <functional>
+#include <memory>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include <glad/glad.h>
+
+#include "common/common_types.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/renderer_opengl/gl_device.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/texture_cache/texture_cache.h"
+
+namespace OpenGL {
+
+using VideoCommon::SurfaceParams;
+using VideoCommon::ViewParams;
+
+class CachedSurfaceView;
+class CachedSurface;
+class TextureCacheOpenGL;
+
+using Surface = std::shared_ptr<CachedSurface>;
+using View = std::shared_ptr<CachedSurfaceView>;
+using TextureCacheBase = VideoCommon::TextureCache<Surface, View>;
+
+class CachedSurface final : public VideoCommon::SurfaceBase<View> {
+ friend CachedSurfaceView;
+
+public:
+ explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params);
+ ~CachedSurface();
+
+ void UploadTexture(const std::vector<u8>& staging_buffer) override;
+ void DownloadTexture(std::vector<u8>& staging_buffer) override;
+
+ GLenum GetTarget() const {
+ return target;
+ }
+
+ GLuint GetTexture() const {
+ return texture.handle;
+ }
+
+protected:
+ void DecorateSurfaceName() override;
+
+ View CreateView(const ViewParams& view_key) override;
+ View CreateViewInner(const ViewParams& view_key, bool is_proxy);
+
+private:
+ void UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer);
+
+ GLenum internal_format{};
+ GLenum format{};
+ GLenum type{};
+ bool is_compressed{};
+ GLenum target{};
+ u32 view_count{};
+
+ OGLTexture texture;
+ OGLBuffer texture_buffer;
+};
+
+class CachedSurfaceView final : public VideoCommon::ViewBase {
+public:
+ explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy);
+ ~CachedSurfaceView();
+
+ /// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER
+ void Attach(GLenum attachment, GLenum target) const;
+
+ void ApplySwizzle(Tegra::Texture::SwizzleSource x_source,
+ Tegra::Texture::SwizzleSource y_source,
+ Tegra::Texture::SwizzleSource z_source,
+ Tegra::Texture::SwizzleSource w_source);
+
+ void DecorateViewName(GPUVAddr gpu_addr, std::string prefix);
+
+ void MarkAsModified(u64 tick) {
+ surface.MarkAsModified(true, tick);
+ }
+
+ GLuint GetTexture() const {
+ if (is_proxy) {
+ return surface.GetTexture();
+ }
+ return texture_view.handle;
+ }
+
+ const SurfaceParams& GetSurfaceParams() const {
+ return surface.GetSurfaceParams();
+ }
+
+private:
+ u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source,
+ Tegra::Texture::SwizzleSource y_source,
+ Tegra::Texture::SwizzleSource z_source,
+ Tegra::Texture::SwizzleSource w_source) const {
+ return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
+ (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
+ }
+
+ OGLTextureView CreateTextureView() const;
+
+ CachedSurface& surface;
+ GLenum target{};
+
+ OGLTextureView texture_view;
+ u32 swizzle{};
+ bool is_proxy{};
+};
+
+class TextureCacheOpenGL final : public TextureCacheBase {
+public:
+ explicit TextureCacheOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ const Device& device);
+ ~TextureCacheOpenGL();
+
+protected:
+ Surface CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) override;
+
+ void ImageCopy(Surface& src_surface, Surface& dst_surface,
+ const VideoCommon::CopyParams& copy_params) override;
+
+ void ImageBlit(View& src_view, View& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) override;
+
+ void BufferCopy(Surface& src_surface, Surface& dst_surface) override;
+
+private:
+ GLuint FetchPBO(std::size_t buffer_size);
+
+ OGLFramebuffer src_framebuffer;
+ OGLFramebuffer dst_framebuffer;
+ std::unordered_map<u32, OGLBuffer> copy_pbo_cache;
+};
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index ea77dd211..9ed738171 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -145,7 +145,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode,
case Tegra::Texture::TextureMipmapFilter::None:
return GL_LINEAR;
case Tegra::Texture::TextureMipmapFilter::Nearest:
- return GL_NEAREST_MIPMAP_LINEAR;
+ return GL_LINEAR_MIPMAP_NEAREST;
case Tegra::Texture::TextureMipmapFilter::Linear:
return GL_LINEAR_MIPMAP_LINEAR;
}
@@ -157,7 +157,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode,
case Tegra::Texture::TextureMipmapFilter::Nearest:
return GL_NEAREST_MIPMAP_NEAREST;
case Tegra::Texture::TextureMipmapFilter::Linear:
- return GL_LINEAR_MIPMAP_NEAREST;
+ return GL_NEAREST_MIPMAP_LINEAR;
}
}
}
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 3451d321d..1e6ef66ab 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -18,7 +18,6 @@
#include "core/perf_stats.h"
#include "core/settings.h"
#include "core/telemetry_session.h"
-#include "core/tracer/recorder.h"
#include "video_core/morton.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/renderer_opengl.h"
@@ -102,21 +101,19 @@ RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::Syst
RendererOpenGL::~RendererOpenGL() = default;
-/// Swap buffers (render frame)
-void RendererOpenGL::SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
-
+void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
system.GetPerfStats().EndSystemFrame();
// Maintain the rasterizer's state as a priority
OpenGLState prev_state = OpenGLState::GetCurState();
+ state.AllDirty();
state.Apply();
if (framebuffer) {
// If framebuffer is provided, reload it from memory to a texture
- if (screen_info.texture.width != (GLsizei)framebuffer->get().width ||
- screen_info.texture.height != (GLsizei)framebuffer->get().height ||
- screen_info.texture.pixel_format != framebuffer->get().pixel_format) {
+ if (screen_info.texture.width != static_cast<GLsizei>(framebuffer->width) ||
+ screen_info.texture.height != static_cast<GLsizei>(framebuffer->height) ||
+ screen_info.texture.pixel_format != framebuffer->pixel_format) {
// Reallocate texture if the framebuffer size has changed.
// This is expected to not happen very often and hence should not be a
// performance problem.
@@ -131,6 +128,8 @@ void RendererOpenGL::SwapBuffers(
DrawScreen(render_window.GetFramebufferLayout());
+ rasterizer->TickFrame();
+
render_window.SwapBuffers();
}
@@ -140,6 +139,7 @@ void RendererOpenGL::SwapBuffers(
system.GetPerfStats().BeginSystemFrame();
// Restore the rasterizer state
+ prev_state.AllDirty();
prev_state.Apply();
}
@@ -147,43 +147,43 @@ void RendererOpenGL::SwapBuffers(
* Loads framebuffer from emulated memory into the active OpenGL texture.
*/
void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuffer) {
- const u32 bytes_per_pixel{Tegra::FramebufferConfig::BytesPerPixel(framebuffer.pixel_format)};
- const u64 size_in_bytes{framebuffer.stride * framebuffer.height * bytes_per_pixel};
- const VAddr framebuffer_addr{framebuffer.address + framebuffer.offset};
-
// Framebuffer orientation handling
framebuffer_transform_flags = framebuffer.transform_flags;
framebuffer_crop_rect = framebuffer.crop_rect;
- // Ensure no bad interactions with GL_UNPACK_ALIGNMENT, which by default
- // only allows rows to have a memory alignement of 4.
- ASSERT(framebuffer.stride % 4 == 0);
-
- if (!rasterizer->AccelerateDisplay(framebuffer, framebuffer_addr, framebuffer.stride)) {
- // Reset the screen info's display texture to its own permanent texture
- screen_info.display_texture = screen_info.texture.resource.handle;
-
- rasterizer->FlushRegion(ToCacheAddr(Memory::GetPointer(framebuffer_addr)), size_in_bytes);
-
- constexpr u32 linear_bpp = 4;
- VideoCore::MortonCopyPixels128(VideoCore::MortonSwizzleMode::MortonToLinear,
- framebuffer.width, framebuffer.height, bytes_per_pixel,
- linear_bpp, Memory::GetPointer(framebuffer_addr),
- gl_framebuffer_data.data());
-
- glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride));
+ const VAddr framebuffer_addr{framebuffer.address + framebuffer.offset};
+ if (rasterizer->AccelerateDisplay(framebuffer, framebuffer_addr, framebuffer.stride)) {
+ return;
+ }
- // Update existing texture
- // TODO: Test what happens on hardware when you change the framebuffer dimensions so that
- // they differ from the LCD resolution.
- // TODO: Applications could theoretically crash yuzu here by specifying too large
- // framebuffer sizes. We should make sure that this cannot happen.
- glTextureSubImage2D(screen_info.texture.resource.handle, 0, 0, 0, framebuffer.width,
- framebuffer.height, screen_info.texture.gl_format,
- screen_info.texture.gl_type, gl_framebuffer_data.data());
+ // Reset the screen info's display texture to its own permanent texture
+ screen_info.display_texture = screen_info.texture.resource.handle;
- glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- }
+ const auto pixel_format{
+ VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format)};
+ const u32 bytes_per_pixel{VideoCore::Surface::GetBytesPerPixel(pixel_format)};
+ const u64 size_in_bytes{framebuffer.stride * framebuffer.height * bytes_per_pixel};
+ const auto host_ptr{Memory::GetPointer(framebuffer_addr)};
+ rasterizer->FlushRegion(ToCacheAddr(host_ptr), size_in_bytes);
+
+ // TODO(Rodrigo): Read this from HLE
+ constexpr u32 block_height_log2 = 4;
+ VideoCore::MortonSwizzle(VideoCore::MortonSwizzleMode::MortonToLinear, pixel_format,
+ framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1,
+ gl_framebuffer_data.data(), host_ptr);
+
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride));
+
+ // Update existing texture
+ // TODO: Test what happens on hardware when you change the framebuffer dimensions so that
+ // they differ from the LCD resolution.
+ // TODO: Applications could theoretically crash yuzu here by specifying too large
+ // framebuffer sizes. We should make sure that this cannot happen.
+ glTextureSubImage2D(screen_info.texture.resource.handle, 0, 0, 0, framebuffer.width,
+ framebuffer.height, screen_info.texture.gl_format,
+ screen_info.texture.gl_type, gl_framebuffer_data.data());
+
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
}
/**
@@ -206,6 +206,7 @@ void RendererOpenGL::InitOpenGLObjects() {
// Link shaders and get variable locations
shader.CreateFromSource(vertex_shader, nullptr, fragment_shader);
state.draw.shader_program = shader.handle;
+ state.AllDirty();
state.Apply();
uniform_modelview_matrix = glGetUniformLocation(shader.handle, "modelview_matrix");
uniform_color_texture = glGetUniformLocation(shader.handle, "color_texture");
@@ -263,8 +264,6 @@ void RendererOpenGL::CreateRasterizer() {
if (rasterizer) {
return;
}
- // Initialize sRGB Usage
- OpenGLState::ClearsRGBUsed();
rasterizer = std::make_unique<RasterizerOpenGL>(system, emu_window, screen_info);
}
@@ -274,22 +273,29 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
texture.height = framebuffer.height;
texture.pixel_format = framebuffer.pixel_format;
+ const auto pixel_format{
+ VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format)};
+ const u32 bytes_per_pixel{VideoCore::Surface::GetBytesPerPixel(pixel_format)};
+ gl_framebuffer_data.resize(texture.width * texture.height * bytes_per_pixel);
+
GLint internal_format;
switch (framebuffer.pixel_format) {
case Tegra::FramebufferConfig::PixelFormat::ABGR8:
internal_format = GL_RGBA8;
texture.gl_format = GL_RGBA;
texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
- gl_framebuffer_data.resize(texture.width * texture.height * 4);
+ break;
+ case Tegra::FramebufferConfig::PixelFormat::RGB565:
+ internal_format = GL_RGB565;
+ texture.gl_format = GL_RGB;
+ texture.gl_type = GL_UNSIGNED_SHORT_5_6_5;
break;
default:
internal_format = GL_RGBA8;
texture.gl_format = GL_RGBA;
texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
- gl_framebuffer_data.resize(texture.width * texture.height * 4);
- LOG_CRITICAL(Render_OpenGL, "Unknown framebuffer pixel format: {}",
- static_cast<u32>(framebuffer.pixel_format));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}",
+ static_cast<u32>(framebuffer.pixel_format));
}
texture.resource.Release();
@@ -335,19 +341,17 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x,
ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, right * scale_v),
}};
- state.texture_units[0].texture = screen_info.display_texture;
- // Workaround brigthness problems in SMO by enabling sRGB in the final output
- // if it has been used in the frame. Needed because of this bug in QT: QTBUG-50987
- state.framebuffer_srgb.enabled = OpenGLState::GetsRGBUsed();
+ state.textures[0] = screen_info.display_texture;
+ state.framebuffer_srgb.enabled = screen_info.display_srgb;
+ state.AllDirty();
state.Apply();
glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices), vertices.data());
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// Restore default state
state.framebuffer_srgb.enabled = false;
- state.texture_units[0].texture = 0;
+ state.textures[0] = 0;
+ state.AllDirty();
state.Apply();
- // Clear sRGB state for the next frame
- OpenGLState::ClearsRGBUsed();
}
/**
@@ -389,6 +393,7 @@ void RendererOpenGL::CaptureScreenshot() {
GLuint old_read_fb = state.draw.read_framebuffer;
GLuint old_draw_fb = state.draw.draw_framebuffer;
state.draw.read_framebuffer = state.draw.draw_framebuffer = screenshot_framebuffer.handle;
+ state.AllDirty();
state.Apply();
Layout::FramebufferLayout layout{renderer_settings.screenshot_framebuffer_layout};
@@ -396,8 +401,8 @@ void RendererOpenGL::CaptureScreenshot() {
GLuint renderbuffer;
glGenRenderbuffers(1, &renderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer);
- glRenderbufferStorage(GL_RENDERBUFFER, state.GetsRGBUsed() ? GL_SRGB8 : GL_RGB8, layout.width,
- layout.height);
+ glRenderbufferStorage(GL_RENDERBUFFER, screen_info.display_srgb ? GL_SRGB8 : GL_RGB8,
+ layout.width, layout.height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer);
DrawScreen(layout);
@@ -408,6 +413,7 @@ void RendererOpenGL::CaptureScreenshot() {
screenshot_framebuffer.Release();
state.draw.read_framebuffer = old_read_fb;
state.draw.draw_framebuffer = old_draw_fb;
+ state.AllDirty();
state.Apply();
glDeleteRenderbuffers(1, &renderbuffer);
@@ -472,7 +478,6 @@ static void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum
}
}
-/// Initialize the renderer
bool RendererOpenGL::Init() {
Core::Frontend::ScopeAcquireWindowContext acquire_context{render_window};
diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h
index 4aebf2321..cf26628ca 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.h
+++ b/src/video_core/renderer_opengl/renderer_opengl.h
@@ -38,19 +38,19 @@ struct TextureInfo {
/// Structure used for storing information about the display target for the Switch screen
struct ScreenInfo {
- GLuint display_texture;
+ GLuint display_texture{};
+ bool display_srgb{};
const Common::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f};
TextureInfo texture;
};
-class RendererOpenGL : public VideoCore::RendererBase {
+class RendererOpenGL final : public VideoCore::RendererBase {
public:
explicit RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system);
~RendererOpenGL() override;
/// Swap buffers (render frame)
- void SwapBuffers(
- std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
/// Initialize the renderer
bool Init() override;
diff --git a/src/video_core/renderer_opengl/utils.cpp b/src/video_core/renderer_opengl/utils.cpp
index f23fc9f9d..c504a2c1a 100644
--- a/src/video_core/renderer_opengl/utils.cpp
+++ b/src/video_core/renderer_opengl/utils.cpp
@@ -5,35 +5,75 @@
#include <string>
#include <fmt/format.h>
#include <glad/glad.h>
+
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/scope_exit.h"
#include "video_core/renderer_opengl/utils.h"
namespace OpenGL {
+VertexArrayPushBuffer::VertexArrayPushBuffer() = default;
+
+VertexArrayPushBuffer::~VertexArrayPushBuffer() = default;
+
+void VertexArrayPushBuffer::Setup(GLuint vao_) {
+ vao = vao_;
+ index_buffer = nullptr;
+ vertex_buffers.clear();
+}
+
+void VertexArrayPushBuffer::SetIndexBuffer(const GLuint* buffer) {
+ index_buffer = buffer;
+}
+
+void VertexArrayPushBuffer::SetVertexBuffer(GLuint binding_index, const GLuint* buffer,
+ GLintptr offset, GLsizei stride) {
+ vertex_buffers.push_back(Entry{binding_index, buffer, offset, stride});
+}
+
+void VertexArrayPushBuffer::Bind() {
+ if (index_buffer) {
+ glVertexArrayElementBuffer(vao, *index_buffer);
+ }
+
+ // TODO(Rodrigo): Find a way to ARB_multi_bind this
+ for (const auto& entry : vertex_buffers) {
+ glVertexArrayVertexBuffer(vao, entry.binding_index, *entry.buffer, entry.offset,
+ entry.stride);
+ }
+}
+
BindBuffersRangePushBuffer::BindBuffersRangePushBuffer(GLenum target) : target{target} {}
BindBuffersRangePushBuffer::~BindBuffersRangePushBuffer() = default;
void BindBuffersRangePushBuffer::Setup(GLuint first_) {
first = first_;
- buffers.clear();
+ buffer_pointers.clear();
offsets.clear();
sizes.clear();
}
-void BindBuffersRangePushBuffer::Push(GLuint buffer, GLintptr offset, GLsizeiptr size) {
- buffers.push_back(buffer);
+void BindBuffersRangePushBuffer::Push(const GLuint* buffer, GLintptr offset, GLsizeiptr size) {
+ buffer_pointers.push_back(buffer);
offsets.push_back(offset);
sizes.push_back(size);
}
-void BindBuffersRangePushBuffer::Bind() const {
- const std::size_t count{buffers.size()};
+void BindBuffersRangePushBuffer::Bind() {
+ // Ensure sizes are valid.
+ const std::size_t count{buffer_pointers.size()};
DEBUG_ASSERT(count == offsets.size() && count == sizes.size());
if (count == 0) {
return;
}
+
+ // Dereference buffers.
+ buffers.resize(count);
+ std::transform(buffer_pointers.begin(), buffer_pointers.end(), buffers.begin(),
+ [](const GLuint* pointer) { return *pointer; });
+
glBindBuffersRange(target, first, static_cast<GLsizei>(count), buffers.data(), offsets.data(),
sizes.data());
}
@@ -63,4 +103,4 @@ void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_vie
glObjectLabel(identifier, handle, -1, static_cast<const GLchar*>(object_label.c_str()));
}
-} // namespace OpenGL \ No newline at end of file
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/utils.h b/src/video_core/renderer_opengl/utils.h
index b3e9fc499..6c2b45546 100644
--- a/src/video_core/renderer_opengl/utils.h
+++ b/src/video_core/renderer_opengl/utils.h
@@ -11,20 +11,49 @@
namespace OpenGL {
-class BindBuffersRangePushBuffer {
+class VertexArrayPushBuffer final {
public:
- BindBuffersRangePushBuffer(GLenum target);
+ explicit VertexArrayPushBuffer();
+ ~VertexArrayPushBuffer();
+
+ void Setup(GLuint vao_);
+
+ void SetIndexBuffer(const GLuint* buffer);
+
+ void SetVertexBuffer(GLuint binding_index, const GLuint* buffer, GLintptr offset,
+ GLsizei stride);
+
+ void Bind();
+
+private:
+ struct Entry {
+ GLuint binding_index{};
+ const GLuint* buffer{};
+ GLintptr offset{};
+ GLsizei stride{};
+ };
+
+ GLuint vao{};
+ const GLuint* index_buffer{};
+ std::vector<Entry> vertex_buffers;
+};
+
+class BindBuffersRangePushBuffer final {
+public:
+ explicit BindBuffersRangePushBuffer(GLenum target);
~BindBuffersRangePushBuffer();
void Setup(GLuint first_);
- void Push(GLuint buffer, GLintptr offset, GLsizeiptr size);
+ void Push(const GLuint* buffer, GLintptr offset, GLsizeiptr size);
- void Bind() const;
+ void Bind();
private:
- GLenum target;
- GLuint first;
+ GLenum target{};
+ GLuint first{};
+ std::vector<const GLuint*> buffer_pointers;
+
std::vector<GLuint> buffers;
std::vector<GLintptr> offsets;
std::vector<GLsizeiptr> sizes;
@@ -32,4 +61,4 @@ private:
void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info = {});
-} // namespace OpenGL \ No newline at end of file
+} // namespace OpenGL
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 02a9f5ecb..d2e9f4031 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -109,8 +109,8 @@ void VKBufferCache::Reserve(std::size_t max_size) {
}
}
-VKExecutionContext VKBufferCache::Send(VKExecutionContext exctx) {
- return stream_buffer->Send(exctx, buffer_offset - buffer_offset_base);
+void VKBufferCache::Send() {
+ stream_buffer->Send(buffer_offset - buffer_offset_base);
}
void VKBufferCache::AlignBuffer(std::size_t alignment) {
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3edf460df..49f13bcdc 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -77,7 +77,7 @@ public:
void Reserve(std::size_t max_size);
/// Ensures that the set data is sent to the device.
- [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx);
+ void Send();
/// Returns the buffer cache handle.
vk::Buffer GetBuffer() const {
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 3b966ddc3..897cbb4e8 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -2,9 +2,10 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include <map>
+#include <bitset>
#include <optional>
#include <set>
+#include <string_view>
#include <vector>
#include "common/assert.h"
#include "video_core/renderer_vulkan/declarations.h"
@@ -12,13 +13,32 @@
namespace Vulkan {
+namespace {
+
+template <typename T>
+void SetNext(void**& next, T& data) {
+ *next = &data;
+ next = &data.pNext;
+}
+
+template <typename T>
+T GetFeatures(vk::PhysicalDevice physical, vk::DispatchLoaderDynamic dldi) {
+ vk::PhysicalDeviceFeatures2 features;
+ T extension_features;
+ features.pNext = &extension_features;
+ physical.getFeatures2(&features, dldi);
+ return extension_features;
+}
+
+} // Anonymous namespace
+
namespace Alternatives {
-constexpr std::array<vk::Format, 3> Depth24UnormS8Uint = {
- vk::Format::eD32SfloatS8Uint, vk::Format::eD16UnormS8Uint, {}};
-constexpr std::array<vk::Format, 3> Depth16UnormS8Uint = {
- vk::Format::eD24UnormS8Uint, vk::Format::eD32SfloatS8Uint, {}};
-constexpr std::array<vk::Format, 2> Astc = {vk::Format::eA8B8G8R8UnormPack32, {}};
+constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint,
+ vk::Format::eD16UnormS8Uint, vk::Format{}};
+constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint,
+ vk::Format::eD32SfloatS8Uint, vk::Format{}};
+constexpr std::array Astc = {vk::Format::eA8B8G8R8UnormPack32, vk::Format{}};
} // namespace Alternatives
@@ -58,16 +78,53 @@ VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice phy
VKDevice::~VKDevice() = default;
bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance) {
- vk::PhysicalDeviceFeatures device_features;
- device_features.vertexPipelineStoresAndAtomics = true;
- device_features.independentBlend = true;
- device_features.textureCompressionASTC_LDR = is_optimal_astc_supported;
-
const auto queue_cis = GetDeviceQueueCreateInfos();
- const std::vector<const char*> extensions = LoadExtensions(dldi);
- const vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(),
- 0, nullptr, static_cast<u32>(extensions.size()),
- extensions.data(), &device_features);
+ const std::vector extensions = LoadExtensions(dldi);
+
+ vk::PhysicalDeviceFeatures2 features2;
+ void** next = &features2.pNext;
+ auto& features = features2.features;
+ features.vertexPipelineStoresAndAtomics = true;
+ features.independentBlend = true;
+ features.depthClamp = true;
+ features.samplerAnisotropy = true;
+ features.largePoints = true;
+ features.textureCompressionASTC_LDR = is_optimal_astc_supported;
+
+ vk::PhysicalDeviceVertexAttributeDivisorFeaturesEXT vertex_divisor;
+ vertex_divisor.vertexAttributeInstanceRateDivisor = true;
+ vertex_divisor.vertexAttributeInstanceRateZeroDivisor = true;
+ SetNext(next, vertex_divisor);
+
+ vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
+ if (is_float16_supported) {
+ float16_int8.shaderFloat16 = true;
+ SetNext(next, float16_int8);
+ } else {
+ LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively");
+ }
+
+ vk::PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
+ if (khr_uniform_buffer_standard_layout) {
+ std430_layout.uniformBufferStandardLayout = true;
+ SetNext(next, std430_layout);
+ } else {
+ LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs");
+ }
+
+ vk::PhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8;
+ if (ext_index_type_uint8) {
+ index_type_uint8.indexTypeUint8 = true;
+ SetNext(next, index_type_uint8);
+ } else {
+ LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes");
+ }
+
+ vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0,
+ nullptr, static_cast<u32>(extensions.size()), extensions.data(),
+ nullptr);
+ device_ci.pNext = &features2;
+
vk::Device dummy_logical;
if (physical.createDevice(&device_ci, nullptr, &dummy_logical, dldi) != vk::Result::eSuccess) {
LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!");
@@ -78,6 +135,17 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan
logical = UniqueDevice(
dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld));
+ if (khr_driver_properties) {
+ vk::PhysicalDeviceDriverPropertiesKHR driver;
+ vk::PhysicalDeviceProperties2 properties;
+ properties.pNext = &driver;
+ physical.getProperties2(&properties, dld);
+ driver_id = driver.driverID;
+ LOG_INFO(Render_Vulkan, "Driver: {} {}", driver.driverName, driver.driverInfo);
+ } else {
+ LOG_INFO(Render_Vulkan, "Driver: Unknown");
+ }
+
graphics_queue = logical->getQueue(graphics_family, 0, dld);
present_queue = logical->getQueue(present_family, 0, dld);
return true;
@@ -92,20 +160,19 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format,
// The wanted format is not supported by hardware, search for alternatives
const vk::Format* alternatives = GetFormatAlternatives(wanted_format);
if (alternatives == nullptr) {
- LOG_CRITICAL(Render_Vulkan,
- "Format={} with usage={} and type={} has no defined alternatives and host "
- "hardware does not support it",
- vk::to_string(wanted_format), vk::to_string(wanted_usage),
- static_cast<u32>(format_type));
- UNREACHABLE();
+ UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
+ "hardware does not support it",
+ vk::to_string(wanted_format), vk::to_string(wanted_usage),
+ static_cast<u32>(format_type));
return wanted_format;
}
std::size_t i = 0;
for (vk::Format alternative = alternatives[0]; alternative != vk::Format{};
alternative = alternatives[++i]) {
- if (!IsFormatSupported(alternative, wanted_usage, format_type))
+ if (!IsFormatSupported(alternative, wanted_usage, format_type)) {
continue;
+ }
LOG_WARNING(Render_Vulkan,
"Emulating format={} with alternative format={} with usage={} and type={}",
static_cast<u32>(wanted_format), static_cast<u32>(alternative),
@@ -114,12 +181,10 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format,
}
// No alternatives found, panic
- LOG_CRITICAL(Render_Vulkan,
- "Format={} with usage={} and type={} is not supported by the host hardware and "
- "doesn't support any of the alternatives",
- static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage),
- static_cast<u32>(format_type));
- UNREACHABLE();
+ UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
+ "doesn't support any of the alternatives",
+ static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage),
+ static_cast<u32>(format_type));
return wanted_format;
}
@@ -132,7 +197,7 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features
vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc |
vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc |
vk::FormatFeatureFlagBits::eTransferDst};
- constexpr std::array<vk::Format, 9> astc_formats = {
+ constexpr std::array astc_formats = {
vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock,
vk::Format::eAstc8x8SrgbBlock, vk::Format::eAstc8x6SrgbBlock,
vk::Format::eAstc5x4SrgbBlock, vk::Format::eAstc5x5UnormBlock,
@@ -151,76 +216,120 @@ bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlag
FormatType format_type) const {
const auto it = format_properties.find(wanted_format);
if (it == format_properties.end()) {
- LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}", vk::to_string(wanted_format));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented format query={}", vk::to_string(wanted_format));
return true;
}
- const vk::FormatFeatureFlags supported_usage = GetFormatFeatures(it->second, format_type);
+ const auto supported_usage = GetFormatFeatures(it->second, format_type);
return (supported_usage & wanted_usage) == wanted_usage;
}
bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical,
vk::SurfaceKHR surface) {
- bool has_swapchain{};
+ LOG_INFO(Render_Vulkan, "{}", physical.getProperties(dldi).deviceName);
+ bool is_suitable = true;
+
+ constexpr std::array required_extensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+ VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME};
+ std::bitset<required_extensions.size()> available_extensions{};
+
for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) {
- has_swapchain |= prop.extensionName == std::string(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ for (std::size_t i = 0; i < required_extensions.size(); ++i) {
+ if (available_extensions[i]) {
+ continue;
+ }
+ available_extensions[i] =
+ required_extensions[i] == std::string_view{prop.extensionName};
+ }
}
- if (!has_swapchain) {
- // The device doesn't support creating swapchains.
- return false;
+ if (!available_extensions.all()) {
+ for (std::size_t i = 0; i < required_extensions.size(); ++i) {
+ if (available_extensions[i]) {
+ continue;
+ }
+ LOG_INFO(Render_Vulkan, "Missing required extension: {}", required_extensions[i]);
+ is_suitable = false;
+ }
}
bool has_graphics{}, has_present{};
const auto queue_family_properties = physical.getQueueFamilyProperties(dldi);
for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
const auto& family = queue_family_properties[i];
- if (family.queueCount == 0)
+ if (family.queueCount == 0) {
continue;
-
+ }
has_graphics |=
(family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0);
has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0;
}
if (!has_graphics || !has_present) {
- // The device doesn't have a graphics and present queue.
- return false;
+ LOG_INFO(Render_Vulkan, "Device lacks a graphics and present queue");
+ is_suitable = false;
}
// TODO(Rodrigo): Check if the device matches all requeriments.
const auto properties{physical.getProperties(dldi)};
- const auto limits{properties.limits};
- if (limits.maxUniformBufferRange < 65536) {
- return false;
+ const auto& limits{properties.limits};
+
+ constexpr u32 required_ubo_size = 65536;
+ if (limits.maxUniformBufferRange < required_ubo_size) {
+ LOG_INFO(Render_Vulkan, "Device UBO size {} is too small, {} is required)",
+ limits.maxUniformBufferRange, required_ubo_size);
+ is_suitable = false;
}
- const vk::PhysicalDeviceFeatures features{physical.getFeatures(dldi)};
- if (!features.vertexPipelineStoresAndAtomics || !features.independentBlend) {
- return false;
+ const auto features{physical.getFeatures(dldi)};
+ const std::array feature_report = {
+ std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"),
+ std::make_pair(features.independentBlend, "independentBlend"),
+ std::make_pair(features.depthClamp, "depthClamp"),
+ std::make_pair(features.samplerAnisotropy, "samplerAnisotropy"),
+ std::make_pair(features.largePoints, "largePoints"),
+ };
+ for (const auto& [supported, name] : feature_report) {
+ if (supported) {
+ continue;
+ }
+ LOG_INFO(Render_Vulkan, "Missing required feature: {}", name);
+ is_suitable = false;
}
- // Device is suitable.
- return true;
+ return is_suitable;
}
std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynamic& dldi) {
std::vector<const char*> extensions;
- extensions.reserve(2);
+ extensions.reserve(7);
extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME);
const auto Test = [&](const vk::ExtensionProperties& extension,
std::optional<std::reference_wrapper<bool>> status, const char* name,
- u32 revision) {
- if (extension.extensionName != std::string(name)) {
+ bool push) {
+ if (extension.extensionName != std::string_view(name)) {
return;
}
- extensions.push_back(name);
+ if (push) {
+ extensions.push_back(name);
+ }
if (status) {
status->get() = true;
}
};
+ bool khr_shader_float16_int8{};
for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) {
- Test(extension, ext_scalar_block_layout, VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME, 1);
+ Test(extension, khr_uniform_buffer_standard_layout,
+ VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
+ Test(extension, ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true);
+ Test(extension, khr_driver_properties, VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, true);
+ Test(extension, khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, false);
+ }
+
+ if (khr_shader_float16_int8) {
+ is_float16_supported =
+ GetFeatures<vk::PhysicalDeviceFloat16Int8FeaturesKHR>(physical, dldi).shaderFloat16;
+ extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME);
}
return extensions;
@@ -250,9 +359,10 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK
}
void VKDevice::SetupProperties(const vk::DispatchLoaderDynamic& dldi) {
- const vk::PhysicalDeviceProperties props = physical.getProperties(dldi);
+ const auto props = physical.getProperties(dldi);
device_type = props.deviceType;
uniform_buffer_alignment = static_cast<u64>(props.limits.minUniformBufferOffsetAlignment);
+ storage_buffer_alignment = static_cast<u64>(props.limits.minStorageBufferOffsetAlignment);
max_storage_buffer_range = static_cast<u64>(props.limits.maxStorageBufferRange);
}
@@ -273,42 +383,53 @@ std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() con
return queue_cis;
}
-std::map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
+std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) {
- static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32,
- vk::Format::eB5G6R5UnormPack16,
- vk::Format::eA2B10G10R10UnormPack32,
- vk::Format::eR32G32B32A32Sfloat,
- vk::Format::eR16G16Unorm,
- vk::Format::eR16G16Snorm,
- vk::Format::eR8G8B8A8Srgb,
- vk::Format::eR8Unorm,
- vk::Format::eB10G11R11UfloatPack32,
- vk::Format::eR32Sfloat,
- vk::Format::eR16Sfloat,
- vk::Format::eR16G16B16A16Sfloat,
- vk::Format::eD32Sfloat,
- vk::Format::eD16Unorm,
- vk::Format::eD16UnormS8Uint,
- vk::Format::eD24UnormS8Uint,
- vk::Format::eD32SfloatS8Uint,
- vk::Format::eBc1RgbaUnormBlock,
- vk::Format::eBc2UnormBlock,
- vk::Format::eBc3UnormBlock,
- vk::Format::eBc4UnormBlock,
- vk::Format::eBc5UnormBlock,
- vk::Format::eBc5SnormBlock,
- vk::Format::eBc7UnormBlock,
- vk::Format::eAstc4x4UnormBlock,
- vk::Format::eAstc4x4SrgbBlock,
- vk::Format::eAstc8x8SrgbBlock,
- vk::Format::eAstc8x6SrgbBlock,
- vk::Format::eAstc5x4SrgbBlock,
- vk::Format::eAstc5x5UnormBlock,
- vk::Format::eAstc5x5SrgbBlock,
- vk::Format::eAstc10x8UnormBlock,
- vk::Format::eAstc10x8SrgbBlock};
- std::map<vk::Format, vk::FormatProperties> format_properties;
+ constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32,
+ vk::Format::eA8B8G8R8SnormPack32,
+ vk::Format::eA8B8G8R8SrgbPack32,
+ vk::Format::eB5G6R5UnormPack16,
+ vk::Format::eA2B10G10R10UnormPack32,
+ vk::Format::eR32G32B32A32Sfloat,
+ vk::Format::eR16G16B16A16Uint,
+ vk::Format::eR16G16Unorm,
+ vk::Format::eR16G16Snorm,
+ vk::Format::eR16G16Sfloat,
+ vk::Format::eR16Unorm,
+ vk::Format::eR8G8B8A8Srgb,
+ vk::Format::eR8G8Unorm,
+ vk::Format::eR8G8Snorm,
+ vk::Format::eR8Unorm,
+ vk::Format::eB10G11R11UfloatPack32,
+ vk::Format::eR32Sfloat,
+ vk::Format::eR16Sfloat,
+ vk::Format::eR16G16B16A16Sfloat,
+ vk::Format::eB8G8R8A8Unorm,
+ vk::Format::eD32Sfloat,
+ vk::Format::eD16Unorm,
+ vk::Format::eD16UnormS8Uint,
+ vk::Format::eD24UnormS8Uint,
+ vk::Format::eD32SfloatS8Uint,
+ vk::Format::eBc1RgbaUnormBlock,
+ vk::Format::eBc2UnormBlock,
+ vk::Format::eBc3UnormBlock,
+ vk::Format::eBc4UnormBlock,
+ vk::Format::eBc5UnormBlock,
+ vk::Format::eBc5SnormBlock,
+ vk::Format::eBc7UnormBlock,
+ vk::Format::eBc1RgbaSrgbBlock,
+ vk::Format::eBc3SrgbBlock,
+ vk::Format::eBc7SrgbBlock,
+ vk::Format::eAstc4x4UnormBlock,
+ vk::Format::eAstc4x4SrgbBlock,
+ vk::Format::eAstc8x8SrgbBlock,
+ vk::Format::eAstc8x6SrgbBlock,
+ vk::Format::eAstc5x4SrgbBlock,
+ vk::Format::eAstc5x5UnormBlock,
+ vk::Format::eAstc5x5SrgbBlock,
+ vk::Format::eAstc10x8UnormBlock,
+ vk::Format::eAstc10x8SrgbBlock};
+ std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
for (const auto format : formats) {
format_properties.emplace(format, physical.getFormatProperties(format, dldi));
}
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index 537825d8b..010d4c3d6 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -4,7 +4,7 @@
#pragma once
-#include <map>
+#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
@@ -69,16 +69,26 @@ public:
return present_family;
}
- /// Returns if the device is integrated with the host CPU.
+ /// Returns true if the device is integrated with the host CPU.
bool IsIntegrated() const {
return device_type == vk::PhysicalDeviceType::eIntegratedGpu;
}
+ /// Returns the driver ID.
+ vk::DriverIdKHR GetDriverID() const {
+ return driver_id;
+ }
+
/// Returns uniform buffer alignment requeriment.
u64 GetUniformBufferAlignment() const {
return uniform_buffer_alignment;
}
+ /// Returns storage alignment requeriment.
+ u64 GetStorageBufferAlignment() const {
+ return storage_buffer_alignment;
+ }
+
/// Returns the maximum range for storage buffers.
u64 GetMaxStorageBufferRange() const {
return max_storage_buffer_range;
@@ -89,9 +99,19 @@ public:
return is_optimal_astc_supported;
}
+ /// Returns true if the device supports float16 natively
+ bool IsFloat16Supported() const {
+ return is_float16_supported;
+ }
+
/// Returns true if the device supports VK_EXT_scalar_block_layout.
- bool IsExtScalarBlockLayoutSupported() const {
- return ext_scalar_block_layout;
+ bool IsKhrUniformBufferStandardLayoutSupported() const {
+ return khr_uniform_buffer_standard_layout;
+ }
+
+ /// Returns true if the device supports VK_EXT_index_type_uint8.
+ bool IsExtIndexTypeUint8Supported() const {
+ return ext_index_type_uint8;
}
/// Checks if the physical device is suitable.
@@ -123,22 +143,28 @@ private:
FormatType format_type) const;
/// Returns the device properties for Vulkan formats.
- static std::map<vk::Format, vk::FormatProperties> GetFormatProperties(
+ static std::unordered_map<vk::Format, vk::FormatProperties> GetFormatProperties(
const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical);
- const vk::PhysicalDevice physical; ///< Physical device.
- vk::DispatchLoaderDynamic dld; ///< Device function pointers.
- UniqueDevice logical; ///< Logical device.
- vk::Queue graphics_queue; ///< Main graphics queue.
- vk::Queue present_queue; ///< Main present queue.
- u32 graphics_family{}; ///< Main graphics queue family index.
- u32 present_family{}; ///< Main present queue family index.
- vk::PhysicalDeviceType device_type; ///< Physical device type.
- u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment.
- u64 max_storage_buffer_range{}; ///< Max storage buffer size.
- bool is_optimal_astc_supported{}; ///< Support for native ASTC.
- bool ext_scalar_block_layout{}; ///< Support for VK_EXT_scalar_block_layout.
- std::map<vk::Format, vk::FormatProperties> format_properties; ///< Format properties dictionary.
+ const vk::PhysicalDevice physical; ///< Physical device.
+ vk::DispatchLoaderDynamic dld; ///< Device function pointers.
+ UniqueDevice logical; ///< Logical device.
+ vk::Queue graphics_queue; ///< Main graphics queue.
+ vk::Queue present_queue; ///< Main present queue.
+ u32 graphics_family{}; ///< Main graphics queue family index.
+ u32 present_family{}; ///< Main present queue family index.
+ vk::PhysicalDeviceType device_type; ///< Physical device type.
+ vk::DriverIdKHR driver_id{}; ///< Driver ID.
+ u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment.
+ u64 storage_buffer_alignment{}; ///< Storage buffer alignment requeriment.
+ u64 max_storage_buffer_range{}; ///< Max storage buffer size.
+ bool is_optimal_astc_supported{}; ///< Support for native ASTC.
+ bool is_float16_supported{}; ///< Support for float16 arithmetics.
+ bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs.
+ bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
+ bool khr_driver_properties{}; ///< Support for VK_KHR_driver_properties.
+ std::unordered_map<vk::Format, vk::FormatProperties>
+ format_properties; ///< Format properties dictionary.
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h
index 771b05c73..1f73b716b 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.h
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h
@@ -4,9 +4,6 @@
#pragma once
-#include <unordered_map>
-
-#include "common/common_types.h"
#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/sampler_cache.h"
#include "video_core/textures/texture.h"
@@ -21,9 +18,9 @@ public:
~VKSamplerCache();
protected:
- UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const;
+ UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override;
- vk::Sampler ToSamplerType(const UniqueSampler& sampler) const;
+ vk::Sampler ToSamplerType(const UniqueSampler& sampler) const override;
private:
const VKDevice& device;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index f1fea1871..0f8116458 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -19,23 +19,19 @@ VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_man
VKScheduler::~VKScheduler() = default;
-VKExecutionContext VKScheduler::GetExecutionContext() const {
- return VKExecutionContext(current_fence, current_cmdbuf);
-}
-
-VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) {
+void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) {
SubmitExecution(semaphore);
- current_fence->Release();
+ if (release_fence)
+ current_fence->Release();
AllocateNewContext();
- return GetExecutionContext();
}
-VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) {
+void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) {
SubmitExecution(semaphore);
current_fence->Wait();
- current_fence->Release();
+ if (release_fence)
+ current_fence->Release();
AllocateNewContext();
- return GetExecutionContext();
}
void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index cfaf5376f..0e5b49c7f 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -10,10 +10,43 @@
namespace Vulkan {
class VKDevice;
-class VKExecutionContext;
class VKFence;
class VKResourceManager;
+class VKFenceView {
+public:
+ VKFenceView() = default;
+ VKFenceView(VKFence* const& fence) : fence{fence} {}
+
+ VKFence* operator->() const noexcept {
+ return fence;
+ }
+
+ operator VKFence&() const noexcept {
+ return *fence;
+ }
+
+private:
+ VKFence* const& fence;
+};
+
+class VKCommandBufferView {
+public:
+ VKCommandBufferView() = default;
+ VKCommandBufferView(const vk::CommandBuffer& cmdbuf) : cmdbuf{cmdbuf} {}
+
+ const vk::CommandBuffer* operator->() const noexcept {
+ return &cmdbuf;
+ }
+
+ operator vk::CommandBuffer() const noexcept {
+ return cmdbuf;
+ }
+
+private:
+ const vk::CommandBuffer& cmdbuf;
+};
+
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
/// OpenGL-like operations on Vulkan command buffers.
class VKScheduler {
@@ -21,16 +54,21 @@ public:
explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager);
~VKScheduler();
- /// Gets the current execution context.
- [[nodiscard]] VKExecutionContext GetExecutionContext() const;
+ /// Gets a reference to the current fence.
+ VKFenceView GetFence() const {
+ return current_fence;
+ }
+
+ /// Gets a reference to the current command buffer.
+ VKCommandBufferView GetCommandBuffer() const {
+ return current_cmdbuf;
+ }
- /// Sends the current execution context to the GPU. It invalidates the current execution context
- /// and returns a new one.
- VKExecutionContext Flush(vk::Semaphore semaphore = nullptr);
+ /// Sends the current execution context to the GPU.
+ void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr);
- /// Sends the current execution context to the GPU and waits for it to complete. It invalidates
- /// the current execution context and returns a new one.
- VKExecutionContext Finish(vk::Semaphore semaphore = nullptr);
+ /// Sends the current execution context to the GPU and waits for it to complete.
+ void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr);
private:
void SubmitExecution(vk::Semaphore semaphore);
@@ -44,26 +82,4 @@ private:
VKFence* next_fence = nullptr;
};
-class VKExecutionContext {
- friend class VKScheduler;
-
-public:
- VKExecutionContext() = default;
-
- VKFence& GetFence() const {
- return *fence;
- }
-
- vk::CommandBuffer GetCommandBuffer() const {
- return cmdbuf;
- }
-
-private:
- explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf)
- : fence{fence}, cmdbuf{cmdbuf} {}
-
- VKFence* fence{};
- vk::CommandBuffer cmdbuf;
-};
-
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 547883425..f7fbbb6e4 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -132,20 +132,16 @@ public:
branch_labels.push_back(label);
}
- // TODO(Rodrigo): Figure out the actual depth of the flow stack, for now it seems unlikely
- // that shaders will use 20 nested SSYs and PBKs.
- constexpr u32 FLOW_STACK_SIZE = 20;
- const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE));
jmp_to = Emit(OpVariable(TypePointer(spv::StorageClass::Function, t_uint),
spv::StorageClass::Function, Constant(t_uint, first_address)));
- flow_stack = Emit(OpVariable(TypePointer(spv::StorageClass::Function, flow_stack_type),
- spv::StorageClass::Function, ConstantNull(flow_stack_type)));
- flow_stack_top =
- Emit(OpVariable(t_func_uint, spv::StorageClass::Function, Constant(t_uint, 0)));
+ std::tie(ssy_flow_stack, ssy_flow_stack_top) = CreateFlowStack();
+ std::tie(pbk_flow_stack, pbk_flow_stack_top) = CreateFlowStack();
Name(jmp_to, "jmp_to");
- Name(flow_stack, "flow_stack");
- Name(flow_stack_top, "flow_stack_top");
+ Name(ssy_flow_stack, "ssy_flow_stack");
+ Name(ssy_flow_stack_top, "ssy_flow_stack_top");
+ Name(pbk_flow_stack, "pbk_flow_stack");
+ Name(pbk_flow_stack_top, "pbk_flow_stack_top");
Emit(OpBranch(loop_label));
Emit(loop_label);
@@ -209,10 +205,6 @@ public:
}
private:
- using OperationDecompilerFn = Id (SPIRVDecompiler::*)(Operation);
- using OperationDecompilersArray =
- std::array<OperationDecompilerFn, static_cast<std::size_t>(OperationCode::Amount)>;
-
static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount);
void AllocateBindings() {
@@ -378,8 +370,8 @@ private:
u32 binding = const_buffers_base_binding;
for (const auto& entry : ir.GetConstantBuffers()) {
const auto [index, size] = entry;
- const Id type =
- device.IsExtScalarBlockLayoutSupported() ? t_cbuf_scalar_ubo : t_cbuf_std140_ubo;
+ const Id type = device.IsKhrUniformBufferStandardLayoutSupported() ? t_cbuf_scalar_ubo
+ : t_cbuf_std140_ubo;
const Id id = OpVariable(type, spv::StorageClass::Uniform);
AddGlobalVariable(Name(id, fmt::format("cbuf_{}", index)));
@@ -434,20 +426,17 @@ private:
instance_index = DeclareBuiltIn(spv::BuiltIn::InstanceIndex, spv::StorageClass::Input,
t_in_uint, "instance_index");
- bool is_point_size_declared = false;
bool is_clip_distances_declared = false;
for (const auto index : ir.GetOutputAttributes()) {
- if (index == Attribute::Index::PointSize) {
- is_point_size_declared = true;
- } else if (index == Attribute::Index::ClipDistances0123 ||
- index == Attribute::Index::ClipDistances4567) {
+ if (index == Attribute::Index::ClipDistances0123 ||
+ index == Attribute::Index::ClipDistances4567) {
is_clip_distances_declared = true;
}
}
std::vector<Id> members;
members.push_back(t_float4);
- if (is_point_size_declared) {
+ if (ir.UsesPointSize()) {
members.push_back(t_float);
}
if (is_clip_distances_declared) {
@@ -470,7 +459,7 @@ private:
position_index = MemberDecorateBuiltIn(spv::BuiltIn::Position, "position", true);
point_size_index =
- MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", is_point_size_declared);
+ MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", ir.UsesPointSize());
clip_distances_index = MemberDecorateBuiltIn(spv::BuiltIn::ClipDistance, "clip_distances",
is_clip_distances_declared);
@@ -576,7 +565,7 @@ private:
const Id buffer_id = constant_buffers.at(cbuf->GetIndex());
Id pointer{};
- if (device.IsExtScalarBlockLayoutSupported()) {
+ if (device.IsKhrUniformBufferStandardLayoutSupported()) {
const Id buffer_offset = Emit(OpShiftRightLogical(
t_uint, BitcastTo<Type::Uint>(Visit(offset)), Constant(t_uint, 2u)));
pointer = Emit(
@@ -716,7 +705,8 @@ private:
case Attribute::Index::Position:
return AccessElement(t_out_float, per_vertex, position_index,
abuf->GetElement());
- case Attribute::Index::PointSize:
+ case Attribute::Index::LayerViewportPointSize:
+ UNIMPLEMENTED_IF(abuf->GetElement() != 3);
return AccessElement(t_out_float, per_vertex, point_size_index);
case Attribute::Index::ClipDistances0123:
return AccessElement(t_out_float, per_vertex, clip_distances_index,
@@ -745,6 +735,16 @@ private:
return {};
}
+ Id FCastHalf0(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id FCastHalf1(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id HNegate(Operation operation) {
UNIMPLEMENTED();
return {};
@@ -755,6 +755,11 @@ private:
return {};
}
+ Id HCastFloat(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id HUnpack(Operation operation) {
UNIMPLEMENTED();
return {};
@@ -810,12 +815,7 @@ private:
return {};
}
- Id LogicalAll2(Operation operation) {
- UNIMPLEMENTED();
- return {};
- }
-
- Id LogicalAny2(Operation operation) {
+ Id LogicalAnd2(Operation operation) {
UNIMPLEMENTED();
return {};
}
@@ -939,6 +939,46 @@ private:
return {};
}
+ Id ImageStore(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageAdd(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageMin(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageMax(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageAnd(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageOr(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageXor(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id AtomicImageExchange(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id Branch(Operation operation) {
const auto target = std::get_if<ImmediateNode>(&*operation[0]);
UNIMPLEMENTED_IF(!target);
@@ -948,10 +988,19 @@ private:
return {};
}
+ Id BranchIndirect(Operation operation) {
+ const Id op_a = VisitOperand<Type::Uint>(operation, 0);
+
+ Emit(OpStore(jmp_to, op_a));
+ BranchingOp([&]() { Emit(OpBranch(continue_label)); });
+ return {};
+ }
+
Id PushFlowStack(Operation operation) {
const auto target = std::get_if<ImmediateNode>(&*operation[0]);
ASSERT(target);
+ const auto [flow_stack, flow_stack_top] = GetFlowStack(operation);
const Id current = Emit(OpLoad(t_uint, flow_stack_top));
const Id next = Emit(OpIAdd(t_uint, current, Constant(t_uint, 1)));
const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, current));
@@ -962,6 +1011,7 @@ private:
}
Id PopFlowStack(Operation operation) {
+ const auto [flow_stack, flow_stack_top] = GetFlowStack(operation);
const Id current = Emit(OpLoad(t_uint, flow_stack_top));
const Id previous = Emit(OpISub(t_uint, current, Constant(t_uint, 1)));
const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, previous));
@@ -1057,6 +1107,66 @@ private:
return {};
}
+ Id BallotThread(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id VoteAll(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id VoteAny(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id VoteEqual(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id ShuffleIndexed(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id ShuffleUp(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id ShuffleDown(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id ShuffleButterfly(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id InRangeShuffleIndexed(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id InRangeShuffleUp(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id InRangeShuffleDown(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id InRangeShuffleButterfly(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type,
const std::string& name) {
const Id id = OpVariable(type, storage);
@@ -1172,7 +1282,32 @@ private:
Emit(skip_label);
}
- static constexpr OperationDecompilersArray operation_decompilers = {
+ std::tuple<Id, Id> CreateFlowStack() {
+ // TODO(Rodrigo): Figure out the actual depth of the flow stack, for now it seems unlikely
+ // that shaders will use 20 nested SSYs and PBKs.
+ constexpr u32 FLOW_STACK_SIZE = 20;
+ constexpr auto storage_class = spv::StorageClass::Function;
+
+ const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE));
+ const Id stack = Emit(OpVariable(TypePointer(storage_class, flow_stack_type), storage_class,
+ ConstantNull(flow_stack_type)));
+ const Id top = Emit(OpVariable(t_func_uint, storage_class, Constant(t_uint, 0)));
+ return std::tie(stack, top);
+ }
+
+ std::pair<Id, Id> GetFlowStack(Operation operation) {
+ const auto stack_class = std::get<MetaStackClass>(operation.GetMeta());
+ switch (stack_class) {
+ case MetaStackClass::Ssy:
+ return {ssy_flow_stack, ssy_flow_stack_top};
+ case MetaStackClass::Pbk:
+ return {pbk_flow_stack, pbk_flow_stack_top};
+ }
+ UNREACHABLE();
+ return {};
+ }
+
+ static constexpr std::array operation_decompilers = {
&SPIRVDecompiler::Assign,
&SPIRVDecompiler::Ternary<&Module::OpSelect, Type::Float, Type::Bool, Type::Float,
@@ -1185,6 +1320,8 @@ private:
&SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>,
&SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>,
&SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>,
+ &SPIRVDecompiler::FCastHalf0,
+ &SPIRVDecompiler::FCastHalf1,
&SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>,
&SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>,
&SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>,
@@ -1245,6 +1382,7 @@ private:
&SPIRVDecompiler::Unary<&Module::OpFAbs, Type::HalfFloat>,
&SPIRVDecompiler::HNegate,
&SPIRVDecompiler::HClamp,
+ &SPIRVDecompiler::HCastFloat,
&SPIRVDecompiler::HUnpack,
&SPIRVDecompiler::HMergeF32,
&SPIRVDecompiler::HMergeH0,
@@ -1257,8 +1395,7 @@ private:
&SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>,
&SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>,
&SPIRVDecompiler::LogicalPick2,
- &SPIRVDecompiler::LogicalAll2,
- &SPIRVDecompiler::LogicalAny2,
+ &SPIRVDecompiler::LogicalAnd2,
&SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>,
&SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>,
@@ -1303,7 +1440,17 @@ private:
&SPIRVDecompiler::TextureQueryLod,
&SPIRVDecompiler::TexelFetch,
+ &SPIRVDecompiler::ImageStore,
+ &SPIRVDecompiler::AtomicImageAdd,
+ &SPIRVDecompiler::AtomicImageMin,
+ &SPIRVDecompiler::AtomicImageMax,
+ &SPIRVDecompiler::AtomicImageAnd,
+ &SPIRVDecompiler::AtomicImageOr,
+ &SPIRVDecompiler::AtomicImageXor,
+ &SPIRVDecompiler::AtomicImageExchange,
+
&SPIRVDecompiler::Branch,
+ &SPIRVDecompiler::BranchIndirect,
&SPIRVDecompiler::PushFlowStack,
&SPIRVDecompiler::PopFlowStack,
&SPIRVDecompiler::Exit,
@@ -1319,7 +1466,23 @@ private:
&SPIRVDecompiler::WorkGroupId<0>,
&SPIRVDecompiler::WorkGroupId<1>,
&SPIRVDecompiler::WorkGroupId<2>,
+
+ &SPIRVDecompiler::BallotThread,
+ &SPIRVDecompiler::VoteAll,
+ &SPIRVDecompiler::VoteAny,
+ &SPIRVDecompiler::VoteEqual,
+
+ &SPIRVDecompiler::ShuffleIndexed,
+ &SPIRVDecompiler::ShuffleUp,
+ &SPIRVDecompiler::ShuffleDown,
+ &SPIRVDecompiler::ShuffleButterfly,
+
+ &SPIRVDecompiler::InRangeShuffleIndexed,
+ &SPIRVDecompiler::InRangeShuffleUp,
+ &SPIRVDecompiler::InRangeShuffleDown,
+ &SPIRVDecompiler::InRangeShuffleButterfly,
};
+ static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
const VKDevice& device;
const ShaderIR& ir;
@@ -1414,8 +1577,10 @@ private:
Id execute_function{};
Id jmp_to{};
- Id flow_stack_top{};
- Id flow_stack{};
+ Id ssy_flow_stack_top{};
+ Id pbk_flow_stack_top{};
+ Id ssy_flow_stack{};
+ Id pbk_flow_stack{};
Id continue_label{};
std::map<u32, Id> labels;
};
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index 58ffa42f2..62f1427f5 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -46,12 +46,12 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Reserve(u64 size) {
return {mapped_pointer + offset, offset, invalidation_mark.has_value()};
}
-VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) {
+void VKStreamBuffer::Send(u64 size) {
ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
if (invalidation_mark) {
// TODO(Rodrigo): Find a better way to invalidate than waiting for all watches to finish.
- exctx = scheduler.Flush();
+ scheduler.Flush();
std::for_each(watches.begin(), watches.begin() + *invalidation_mark,
[&](auto& resource) { resource->Wait(); });
invalidation_mark = std::nullopt;
@@ -62,11 +62,9 @@ VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) {
ReserveWatches(WATCHES_RESERVE_CHUNK);
}
// Add a watch for this allocation.
- watches[used_watches++]->Watch(exctx.GetFence());
+ watches[used_watches++]->Watch(scheduler.GetFence());
offset += size;
-
- return exctx;
}
void VKStreamBuffer::CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage) {
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 69d036ccd..842e54162 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -37,7 +37,7 @@ public:
std::tuple<u8*, u64, bool> Reserve(u64 size);
/// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
- [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx, u64 size);
+ void Send(u64 size);
vk::Buffer GetBuffer() const {
return *buffer;
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
new file mode 100644
index 000000000..ec3a76690
--- /dev/null
+++ b/src/video_core/shader/control_flow.cpp
@@ -0,0 +1,481 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <list>
+#include <map>
+#include <stack>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "video_core/shader/control_flow.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+namespace {
+using Tegra::Shader::Instruction;
+using Tegra::Shader::OpCode;
+
+constexpr s32 unassigned_branch = -2;
+
+struct Query {
+ u32 address{};
+ std::stack<u32> ssy_stack{};
+ std::stack<u32> pbk_stack{};
+};
+
+struct BlockStack {
+ BlockStack() = default;
+ explicit BlockStack(const Query& q) : ssy_stack{q.ssy_stack}, pbk_stack{q.pbk_stack} {}
+ std::stack<u32> ssy_stack{};
+ std::stack<u32> pbk_stack{};
+};
+
+struct BlockBranchInfo {
+ Condition condition{};
+ s32 address{exit_branch};
+ bool kill{};
+ bool is_sync{};
+ bool is_brk{};
+ bool ignore{};
+};
+
+struct BlockInfo {
+ u32 start{};
+ u32 end{};
+ bool visited{};
+ BlockBranchInfo branch{};
+
+ bool IsInside(const u32 address) const {
+ return start <= address && address <= end;
+ }
+};
+
+struct CFGRebuildState {
+ explicit CFGRebuildState(const ProgramCode& program_code, const std::size_t program_size,
+ const u32 start)
+ : start{start}, program_code{program_code}, program_size{program_size} {}
+
+ u32 start{};
+ std::vector<BlockInfo> block_info{};
+ std::list<u32> inspect_queries{};
+ std::list<Query> queries{};
+ std::unordered_map<u32, u32> registered{};
+ std::unordered_set<u32> labels{};
+ std::map<u32, u32> ssy_labels{};
+ std::map<u32, u32> pbk_labels{};
+ std::unordered_map<u32, BlockStack> stacks{};
+ const ProgramCode& program_code;
+ const std::size_t program_size;
+};
+
+enum class BlockCollision : u32 { None, Found, Inside };
+
+std::pair<BlockCollision, u32> TryGetBlock(CFGRebuildState& state, u32 address) {
+ const auto& blocks = state.block_info;
+ for (u32 index = 0; index < blocks.size(); index++) {
+ if (blocks[index].start == address) {
+ return {BlockCollision::Found, index};
+ }
+ if (blocks[index].IsInside(address)) {
+ return {BlockCollision::Inside, index};
+ }
+ }
+ return {BlockCollision::None, 0xFFFFFFFF};
+}
+
+struct ParseInfo {
+ BlockBranchInfo branch_info{};
+ u32 end_address{};
+};
+
+BlockInfo& CreateBlockInfo(CFGRebuildState& state, u32 start, u32 end) {
+ auto& it = state.block_info.emplace_back();
+ it.start = start;
+ it.end = end;
+ const u32 index = static_cast<u32>(state.block_info.size() - 1);
+ state.registered.insert({start, index});
+ return it;
+}
+
+Pred GetPredicate(u32 index, bool negated) {
+ return static_cast<Pred>(index + (negated ? 8 : 0));
+}
+
+/**
+ * Returns whether the instruction at the specified offset is a 'sched' instruction.
+ * Sched instructions always appear before a sequence of 3 instructions.
+ */
+constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
+ constexpr u32 SchedPeriod = 4;
+ u32 absolute_offset = offset - main_offset;
+
+ return (absolute_offset % SchedPeriod) == 0;
+}
+
+enum class ParseResult : u32 {
+ ControlCaught,
+ BlockEnd,
+ AbnormalFlow,
+};
+
+std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) {
+ u32 offset = static_cast<u32>(address);
+ const u32 end_address = static_cast<u32>(state.program_size / sizeof(Instruction));
+ ParseInfo parse_info{};
+
+ const auto insert_label = [](CFGRebuildState& state, u32 address) {
+ const auto pair = state.labels.emplace(address);
+ if (pair.second) {
+ state.inspect_queries.push_back(address);
+ }
+ };
+
+ while (true) {
+ if (offset >= end_address) {
+ // ASSERT_OR_EXECUTE can't be used, as it ignores the break
+ ASSERT_MSG(false, "Shader passed the current limit!");
+ parse_info.branch_info.address = exit_branch;
+ parse_info.branch_info.ignore = false;
+ break;
+ }
+ if (state.registered.count(offset) != 0) {
+ parse_info.branch_info.address = offset;
+ parse_info.branch_info.ignore = true;
+ break;
+ }
+ if (IsSchedInstruction(offset, state.start)) {
+ offset++;
+ continue;
+ }
+ const Instruction instr = {state.program_code[offset]};
+ const auto opcode = OpCode::Decode(instr);
+ if (!opcode || opcode->get().GetType() != OpCode::Type::Flow) {
+ offset++;
+ continue;
+ }
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::EXIT: {
+ const auto pred_index = static_cast<u32>(instr.pred.pred_index);
+ parse_info.branch_info.condition.predicate =
+ GetPredicate(pred_index, instr.negate_pred != 0);
+ if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) {
+ offset++;
+ continue;
+ }
+ const ConditionCode cc = instr.flow_condition_code;
+ parse_info.branch_info.condition.cc = cc;
+ if (cc == ConditionCode::F) {
+ offset++;
+ continue;
+ }
+ parse_info.branch_info.address = exit_branch;
+ parse_info.branch_info.kill = false;
+ parse_info.branch_info.is_sync = false;
+ parse_info.branch_info.is_brk = false;
+ parse_info.branch_info.ignore = false;
+ parse_info.end_address = offset;
+
+ return {ParseResult::ControlCaught, parse_info};
+ }
+ case OpCode::Id::BRA: {
+ if (instr.bra.constant_buffer != 0) {
+ return {ParseResult::AbnormalFlow, parse_info};
+ }
+ const auto pred_index = static_cast<u32>(instr.pred.pred_index);
+ parse_info.branch_info.condition.predicate =
+ GetPredicate(pred_index, instr.negate_pred != 0);
+ if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) {
+ offset++;
+ continue;
+ }
+ const ConditionCode cc = instr.flow_condition_code;
+ parse_info.branch_info.condition.cc = cc;
+ if (cc == ConditionCode::F) {
+ offset++;
+ continue;
+ }
+ const u32 branch_offset = offset + instr.bra.GetBranchTarget();
+ if (branch_offset == 0) {
+ parse_info.branch_info.address = exit_branch;
+ } else {
+ parse_info.branch_info.address = branch_offset;
+ }
+ insert_label(state, branch_offset);
+ parse_info.branch_info.kill = false;
+ parse_info.branch_info.is_sync = false;
+ parse_info.branch_info.is_brk = false;
+ parse_info.branch_info.ignore = false;
+ parse_info.end_address = offset;
+
+ return {ParseResult::ControlCaught, parse_info};
+ }
+ case OpCode::Id::SYNC: {
+ const auto pred_index = static_cast<u32>(instr.pred.pred_index);
+ parse_info.branch_info.condition.predicate =
+ GetPredicate(pred_index, instr.negate_pred != 0);
+ if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) {
+ offset++;
+ continue;
+ }
+ const ConditionCode cc = instr.flow_condition_code;
+ parse_info.branch_info.condition.cc = cc;
+ if (cc == ConditionCode::F) {
+ offset++;
+ continue;
+ }
+ parse_info.branch_info.address = unassigned_branch;
+ parse_info.branch_info.kill = false;
+ parse_info.branch_info.is_sync = true;
+ parse_info.branch_info.is_brk = false;
+ parse_info.branch_info.ignore = false;
+ parse_info.end_address = offset;
+
+ return {ParseResult::ControlCaught, parse_info};
+ }
+ case OpCode::Id::BRK: {
+ const auto pred_index = static_cast<u32>(instr.pred.pred_index);
+ parse_info.branch_info.condition.predicate =
+ GetPredicate(pred_index, instr.negate_pred != 0);
+ if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) {
+ offset++;
+ continue;
+ }
+ const ConditionCode cc = instr.flow_condition_code;
+ parse_info.branch_info.condition.cc = cc;
+ if (cc == ConditionCode::F) {
+ offset++;
+ continue;
+ }
+ parse_info.branch_info.address = unassigned_branch;
+ parse_info.branch_info.kill = false;
+ parse_info.branch_info.is_sync = false;
+ parse_info.branch_info.is_brk = true;
+ parse_info.branch_info.ignore = false;
+ parse_info.end_address = offset;
+
+ return {ParseResult::ControlCaught, parse_info};
+ }
+ case OpCode::Id::KIL: {
+ const auto pred_index = static_cast<u32>(instr.pred.pred_index);
+ parse_info.branch_info.condition.predicate =
+ GetPredicate(pred_index, instr.negate_pred != 0);
+ if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) {
+ offset++;
+ continue;
+ }
+ const ConditionCode cc = instr.flow_condition_code;
+ parse_info.branch_info.condition.cc = cc;
+ if (cc == ConditionCode::F) {
+ offset++;
+ continue;
+ }
+ parse_info.branch_info.address = exit_branch;
+ parse_info.branch_info.kill = true;
+ parse_info.branch_info.is_sync = false;
+ parse_info.branch_info.is_brk = false;
+ parse_info.branch_info.ignore = false;
+ parse_info.end_address = offset;
+
+ return {ParseResult::ControlCaught, parse_info};
+ }
+ case OpCode::Id::SSY: {
+ const u32 target = offset + instr.bra.GetBranchTarget();
+ insert_label(state, target);
+ state.ssy_labels.emplace(offset, target);
+ break;
+ }
+ case OpCode::Id::PBK: {
+ const u32 target = offset + instr.bra.GetBranchTarget();
+ insert_label(state, target);
+ state.pbk_labels.emplace(offset, target);
+ break;
+ }
+ case OpCode::Id::BRX: {
+ return {ParseResult::AbnormalFlow, parse_info};
+ }
+ default:
+ break;
+ }
+
+ offset++;
+ }
+ parse_info.branch_info.kill = false;
+ parse_info.branch_info.is_sync = false;
+ parse_info.branch_info.is_brk = false;
+ parse_info.end_address = offset - 1;
+ return {ParseResult::BlockEnd, parse_info};
+}
+
+bool TryInspectAddress(CFGRebuildState& state) {
+ if (state.inspect_queries.empty()) {
+ return false;
+ }
+
+ const u32 address = state.inspect_queries.front();
+ state.inspect_queries.pop_front();
+ const auto [result, block_index] = TryGetBlock(state, address);
+ switch (result) {
+ case BlockCollision::Found: {
+ return true;
+ }
+ case BlockCollision::Inside: {
+ // This case is the tricky one:
+ // We need to Split the block in 2 sepparate blocks
+ const u32 end = state.block_info[block_index].end;
+ BlockInfo& new_block = CreateBlockInfo(state, address, end);
+ BlockInfo& current_block = state.block_info[block_index];
+ current_block.end = address - 1;
+ new_block.branch = current_block.branch;
+ BlockBranchInfo forward_branch{};
+ forward_branch.address = address;
+ forward_branch.ignore = true;
+ current_block.branch = forward_branch;
+ return true;
+ }
+ default:
+ break;
+ }
+ const auto [parse_result, parse_info] = ParseCode(state, address);
+ if (parse_result == ParseResult::AbnormalFlow) {
+ // if it's AbnormalFlow, we end it as false, ending the CFG reconstruction
+ return false;
+ }
+
+ BlockInfo& block_info = CreateBlockInfo(state, address, parse_info.end_address);
+ block_info.branch = parse_info.branch_info;
+ if (parse_info.branch_info.condition.IsUnconditional()) {
+ return true;
+ }
+
+ const u32 fallthrough_address = parse_info.end_address + 1;
+ state.inspect_queries.push_front(fallthrough_address);
+ return true;
+}
+
+bool TryQuery(CFGRebuildState& state) {
+ const auto gather_labels = [](std::stack<u32>& cc, std::map<u32, u32>& labels,
+ BlockInfo& block) {
+ auto gather_start = labels.lower_bound(block.start);
+ const auto gather_end = labels.upper_bound(block.end);
+ while (gather_start != gather_end) {
+ cc.push(gather_start->second);
+ ++gather_start;
+ }
+ };
+ if (state.queries.empty()) {
+ return false;
+ }
+
+ Query& q = state.queries.front();
+ const u32 block_index = state.registered[q.address];
+ BlockInfo& block = state.block_info[block_index];
+ // If the block is visited, check if the stacks match, else gather the ssy/pbk
+ // labels into the current stack and look if the branch at the end of the block
+ // consumes a label. Schedule new queries accordingly
+ if (block.visited) {
+ BlockStack& stack = state.stacks[q.address];
+ const bool all_okay = (stack.ssy_stack.empty() || q.ssy_stack == stack.ssy_stack) &&
+ (stack.pbk_stack.empty() || q.pbk_stack == stack.pbk_stack);
+ state.queries.pop_front();
+ return all_okay;
+ }
+ block.visited = true;
+ state.stacks.insert_or_assign(q.address, BlockStack{q});
+
+ Query q2(q);
+ state.queries.pop_front();
+ gather_labels(q2.ssy_stack, state.ssy_labels, block);
+ gather_labels(q2.pbk_stack, state.pbk_labels, block);
+ if (!block.branch.condition.IsUnconditional()) {
+ q2.address = block.end + 1;
+ state.queries.push_back(q2);
+ }
+
+ Query conditional_query{q2};
+ if (block.branch.is_sync) {
+ if (block.branch.address == unassigned_branch) {
+ block.branch.address = conditional_query.ssy_stack.top();
+ }
+ conditional_query.ssy_stack.pop();
+ }
+ if (block.branch.is_brk) {
+ if (block.branch.address == unassigned_branch) {
+ block.branch.address = conditional_query.pbk_stack.top();
+ }
+ conditional_query.pbk_stack.pop();
+ }
+ conditional_query.address = block.branch.address;
+ state.queries.push_back(std::move(conditional_query));
+ return true;
+}
+} // Anonymous namespace
+
+std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code,
+ std::size_t program_size, u32 start_address) {
+ CFGRebuildState state{program_code, program_size, start_address};
+
+ // Inspect Code and generate blocks
+ state.labels.clear();
+ state.labels.emplace(start_address);
+ state.inspect_queries.push_back(state.start);
+ while (!state.inspect_queries.empty()) {
+ if (!TryInspectAddress(state)) {
+ return {};
+ }
+ }
+
+ // Decompile Stacks
+ state.queries.push_back(Query{state.start, {}, {}});
+ bool decompiled = true;
+ while (!state.queries.empty()) {
+ if (!TryQuery(state)) {
+ decompiled = false;
+ break;
+ }
+ }
+
+ // Sort and organize results
+ std::sort(state.block_info.begin(), state.block_info.end(),
+ [](const BlockInfo& a, const BlockInfo& b) { return a.start < b.start; });
+ ShaderCharacteristics result_out{};
+ result_out.decompilable = decompiled;
+ result_out.start = start_address;
+ result_out.end = start_address;
+ for (const auto& block : state.block_info) {
+ ShaderBlock new_block{};
+ new_block.start = block.start;
+ new_block.end = block.end;
+ new_block.ignore_branch = block.branch.ignore;
+ if (!new_block.ignore_branch) {
+ new_block.branch.cond = block.branch.condition;
+ new_block.branch.kills = block.branch.kill;
+ new_block.branch.address = block.branch.address;
+ }
+ result_out.end = std::max(result_out.end, block.end);
+ result_out.blocks.push_back(new_block);
+ }
+ if (result_out.decompilable) {
+ result_out.labels = std::move(state.labels);
+ return {std::move(result_out)};
+ }
+
+ // If it's not decompilable, merge the unlabelled blocks together
+ auto back = result_out.blocks.begin();
+ auto next = std::next(back);
+ while (next != result_out.blocks.end()) {
+ if (state.labels.count(next->start) == 0 && next->start == back->end + 1) {
+ back->end = next->end;
+ next = result_out.blocks.erase(next);
+ continue;
+ }
+ back = next;
+ ++next;
+ }
+ return {std::move(result_out)};
+}
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/control_flow.h b/src/video_core/shader/control_flow.h
new file mode 100644
index 000000000..b0a5e4f8c
--- /dev/null
+++ b/src/video_core/shader/control_flow.h
@@ -0,0 +1,79 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <list>
+#include <optional>
+#include <unordered_set>
+
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+using Tegra::Shader::ConditionCode;
+using Tegra::Shader::Pred;
+
+constexpr s32 exit_branch = -1;
+
+struct Condition {
+ Pred predicate{Pred::UnusedIndex};
+ ConditionCode cc{ConditionCode::T};
+
+ bool IsUnconditional() const {
+ return predicate == Pred::UnusedIndex && cc == ConditionCode::T;
+ }
+
+ bool operator==(const Condition& other) const {
+ return std::tie(predicate, cc) == std::tie(other.predicate, other.cc);
+ }
+
+ bool operator!=(const Condition& other) const {
+ return !operator==(other);
+ }
+};
+
+struct ShaderBlock {
+ struct Branch {
+ Condition cond{};
+ bool kills{};
+ s32 address{};
+
+ bool operator==(const Branch& b) const {
+ return std::tie(cond, kills, address) == std::tie(b.cond, b.kills, b.address);
+ }
+
+ bool operator!=(const Branch& b) const {
+ return !operator==(b);
+ }
+ };
+
+ u32 start{};
+ u32 end{};
+ bool ignore_branch{};
+ Branch branch{};
+
+ bool operator==(const ShaderBlock& sb) const {
+ return std::tie(start, end, ignore_branch, branch) ==
+ std::tie(sb.start, sb.end, sb.ignore_branch, sb.branch);
+ }
+
+ bool operator!=(const ShaderBlock& sb) const {
+ return !operator==(sb);
+ }
+};
+
+struct ShaderCharacteristics {
+ std::list<ShaderBlock> blocks{};
+ bool decompilable{};
+ u32 start{};
+ u32 end{};
+ std::unordered_set<u32> labels{};
+};
+
+std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code,
+ std::size_t program_size, u32 start_address);
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index a0554c97e..47a9fd961 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -11,6 +11,7 @@
#include "common/common_types.h"
#include "video_core/engines/shader_bytecode.h"
#include "video_core/engines/shader_header.h"
+#include "video_core/shader/control_flow.h"
#include "video_core/shader/node_helper.h"
#include "video_core/shader/shader_ir.h"
@@ -21,20 +22,6 @@ using Tegra::Shader::OpCode;
namespace {
-/// Merges exit method of two parallel branches.
-constexpr ExitMethod ParallelExit(ExitMethod a, ExitMethod b) {
- if (a == ExitMethod::Undetermined) {
- return b;
- }
- if (b == ExitMethod::Undetermined) {
- return a;
- }
- if (a == b) {
- return a;
- }
- return ExitMethod::Conditional;
-}
-
/**
* Returns whether the instruction at the specified offset is a 'sched' instruction.
* Sched instructions always appear before a sequence of 3 instructions.
@@ -51,85 +38,104 @@ constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
void ShaderIR::Decode() {
std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header));
- std::set<u32> labels;
- const ExitMethod exit_method = Scan(main_offset, MAX_PROGRAM_LENGTH, labels);
- if (exit_method != ExitMethod::AlwaysEnd) {
- UNREACHABLE_MSG("Program does not always end");
- }
-
- if (labels.empty()) {
- basic_blocks.insert({main_offset, DecodeRange(main_offset, MAX_PROGRAM_LENGTH)});
+ disable_flow_stack = false;
+ const auto info = ScanFlow(program_code, program_size, main_offset);
+ if (info) {
+ const auto& shader_info = *info;
+ coverage_begin = shader_info.start;
+ coverage_end = shader_info.end;
+ if (shader_info.decompilable) {
+ disable_flow_stack = true;
+ const auto insert_block = [this](NodeBlock& nodes, u32 label) {
+ if (label == static_cast<u32>(exit_branch)) {
+ return;
+ }
+ basic_blocks.insert({label, nodes});
+ };
+ const auto& blocks = shader_info.blocks;
+ NodeBlock current_block;
+ u32 current_label = static_cast<u32>(exit_branch);
+ for (auto& block : blocks) {
+ if (shader_info.labels.count(block.start) != 0) {
+ insert_block(current_block, current_label);
+ current_block.clear();
+ current_label = block.start;
+ }
+ if (!block.ignore_branch) {
+ DecodeRangeInner(current_block, block.start, block.end);
+ InsertControlFlow(current_block, block);
+ } else {
+ DecodeRangeInner(current_block, block.start, block.end + 1);
+ }
+ }
+ insert_block(current_block, current_label);
+ return;
+ }
+ LOG_WARNING(HW_GPU, "Flow Stack Removing Failed! Falling back to old method");
+ // we can't decompile it, fallback to standard method
+ for (const auto& block : shader_info.blocks) {
+ basic_blocks.insert({block.start, DecodeRange(block.start, block.end + 1)});
+ }
return;
}
+ LOG_WARNING(HW_GPU, "Flow Analysis Failed! Falling back to brute force compiling");
+
+ // Now we need to deal with an undecompilable shader. We need to brute force
+ // a shader that captures every position.
+ coverage_begin = main_offset;
+ const u32 shader_end = static_cast<u32>(program_size / sizeof(u64));
+ coverage_end = shader_end;
+ for (u32 label = main_offset; label < shader_end; label++) {
+ basic_blocks.insert({label, DecodeRange(label, label + 1)});
+ }
+}
- labels.insert(main_offset);
-
- for (const u32 label : labels) {
- const auto next_it = labels.lower_bound(label + 1);
- const u32 next_label = next_it == labels.end() ? MAX_PROGRAM_LENGTH : *next_it;
+NodeBlock ShaderIR::DecodeRange(u32 begin, u32 end) {
+ NodeBlock basic_block;
+ DecodeRangeInner(basic_block, begin, end);
+ return basic_block;
+}
- basic_blocks.insert({label, DecodeRange(label, next_label)});
+void ShaderIR::DecodeRangeInner(NodeBlock& bb, u32 begin, u32 end) {
+ for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) {
+ pc = DecodeInstr(bb, pc);
}
}
-ExitMethod ShaderIR::Scan(u32 begin, u32 end, std::set<u32>& labels) {
- const auto [iter, inserted] =
- exit_method_map.emplace(std::make_pair(begin, end), ExitMethod::Undetermined);
- ExitMethod& exit_method = iter->second;
- if (!inserted)
- return exit_method;
-
- for (u32 offset = begin; offset != end && offset != MAX_PROGRAM_LENGTH; ++offset) {
- coverage_begin = std::min(coverage_begin, offset);
- coverage_end = std::max(coverage_end, offset + 1);
-
- const Instruction instr = {program_code[offset]};
- const auto opcode = OpCode::Decode(instr);
- if (!opcode)
- continue;
- switch (opcode->get().GetId()) {
- case OpCode::Id::EXIT: {
- // The EXIT instruction can be predicated, which means that the shader can conditionally
- // end on this instruction. We have to consider the case where the condition is not met
- // and check the exit method of that other basic block.
- using Tegra::Shader::Pred;
- if (instr.pred.pred_index == static_cast<u64>(Pred::UnusedIndex)) {
- return exit_method = ExitMethod::AlwaysEnd;
- } else {
- const ExitMethod not_met = Scan(offset + 1, end, labels);
- return exit_method = ParallelExit(ExitMethod::AlwaysEnd, not_met);
- }
+void ShaderIR::InsertControlFlow(NodeBlock& bb, const ShaderBlock& block) {
+ const auto apply_conditions = [&](const Condition& cond, Node n) -> Node {
+ Node result = n;
+ if (cond.cc != ConditionCode::T) {
+ result = Conditional(GetConditionCode(cond.cc), {result});
}
- case OpCode::Id::BRA: {
- const u32 target = offset + instr.bra.GetBranchTarget();
- labels.insert(target);
- const ExitMethod no_jmp = Scan(offset + 1, end, labels);
- const ExitMethod jmp = Scan(target, end, labels);
- return exit_method = ParallelExit(no_jmp, jmp);
- }
- case OpCode::Id::SSY:
- case OpCode::Id::PBK: {
- // The SSY and PBK use a similar encoding as the BRA instruction.
- UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
- "Constant buffer branching is not supported");
- const u32 target = offset + instr.bra.GetBranchTarget();
- labels.insert(target);
- // Continue scanning for an exit method.
- break;
+ if (cond.predicate != Pred::UnusedIndex) {
+ u32 pred = static_cast<u32>(cond.predicate);
+ const bool is_neg = pred > 7;
+ if (is_neg) {
+ pred -= 8;
+ }
+ result = Conditional(GetPredicate(pred, is_neg), {result});
}
- default:
- break;
+ return result;
+ };
+ if (block.branch.address < 0) {
+ if (block.branch.kills) {
+ Node n = Operation(OperationCode::Discard);
+ n = apply_conditions(block.branch.cond, n);
+ bb.push_back(n);
+ global_code.push_back(n);
+ return;
}
+ Node n = Operation(OperationCode::Exit);
+ n = apply_conditions(block.branch.cond, n);
+ bb.push_back(n);
+ global_code.push_back(n);
+ return;
}
- return exit_method = ExitMethod::AlwaysReturn;
-}
-
-NodeBlock ShaderIR::DecodeRange(u32 begin, u32 end) {
- NodeBlock basic_block;
- for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) {
- pc = DecodeInstr(basic_block, pc);
- }
- return basic_block;
+ Node n = Operation(OperationCode::Branch, Immediate(block.branch.address));
+ n = apply_conditions(block.branch.cond, n);
+ bb.push_back(n);
+ global_code.push_back(n);
}
u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
@@ -140,15 +146,18 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
+ const u32 nv_address = ConvertAddressToNvidiaSpace(pc);
// Decoding failure
if (!opcode) {
UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value);
+ bb.push_back(Comment(fmt::format("{:05x} Unimplemented Shader instruction (0x{:016x})",
+ nv_address, instr.value)));
return pc + 1;
}
- bb.push_back(
- Comment(fmt::format("{}: {} (0x{:016x})", pc, opcode->get().GetName(), instr.value)));
+ bb.push_back(Comment(
+ fmt::format("{:05x} {} (0x{:016x})", nv_address, opcode->get().GetName(), instr.value)));
using Tegra::Shader::Pred;
UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute,
@@ -167,8 +176,10 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
{OpCode::Type::Ffma, &ShaderIR::DecodeFfma},
{OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2},
{OpCode::Type::Conversion, &ShaderIR::DecodeConversion},
+ {OpCode::Type::Warp, &ShaderIR::DecodeWarp},
{OpCode::Type::Memory, &ShaderIR::DecodeMemory},
{OpCode::Type::Texture, &ShaderIR::DecodeTexture},
+ {OpCode::Type::Image, &ShaderIR::DecodeImage},
{OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate},
{OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate},
{OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate},
diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp
index 87d8fecaa..1473c282a 100644
--- a/src/video_core/shader/decode/arithmetic.cpp
+++ b/src/video_core/shader/decode/arithmetic.cpp
@@ -42,11 +42,14 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) {
case OpCode::Id::FMUL_R:
case OpCode::Id::FMUL_IMM: {
// FMUL does not have 'abs' bits and only the second operand has a 'neg' bit.
- UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, "FMUL tab5cb8_2({}) is not implemented",
- instr.fmul.tab5cb8_2.Value());
- UNIMPLEMENTED_IF_MSG(
- instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented",
- instr.fmul.tab5c68_0.Value()); // SMO typical sends 1 here which seems to be the default
+ if (instr.fmul.tab5cb8_2 != 0) {
+ LOG_WARNING(HW_GPU, "FMUL tab5cb8_2({}) is not implemented",
+ instr.fmul.tab5cb8_2.Value());
+ }
+ if (instr.fmul.tab5c68_0 != 1) {
+ LOG_WARNING(HW_GPU, "FMUL tab5cb8_0({}) is not implemented",
+ instr.fmul.tab5c68_0.Value());
+ }
op_b = GetOperandAbsNegFloat(op_b, false, instr.fmul.negate_b);
diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp
index 7bcf38f23..6466fc011 100644
--- a/src/video_core/shader/decode/arithmetic_half_immediate.cpp
+++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp
@@ -23,7 +23,9 @@ u32 ShaderIR::DecodeArithmeticHalfImmediate(NodeBlock& bb, u32 pc) {
LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
}
} else {
- UNIMPLEMENTED_IF(instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None);
+ if (instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None) {
+ LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
+ }
}
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half_imm.type_a);
diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp
index 4221f0c58..32facd6ba 100644
--- a/src/video_core/shader/decode/conversion.cpp
+++ b/src/video_core/shader/decode/conversion.cpp
@@ -14,6 +14,12 @@ using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
using Tegra::Shader::Register;
+namespace {
+constexpr OperationCode GetFloatSelector(u64 selector) {
+ return selector == 0 ? OperationCode::FCastHalf0 : OperationCode::FCastHalf1;
+}
+} // Anonymous namespace
+
u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
@@ -22,7 +28,7 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
case OpCode::Id::I2I_R:
case OpCode::Id::I2I_C:
case OpCode::Id::I2I_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.selector);
+ UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0);
UNIMPLEMENTED_IF(instr.conversion.dst_size != Register::Size::Word);
UNIMPLEMENTED_IF(instr.alu.saturate_d);
@@ -57,8 +63,8 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
case OpCode::Id::I2F_R:
case OpCode::Id::I2F_C:
case OpCode::Id::I2F_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.dst_size != Register::Size::Word);
- UNIMPLEMENTED_IF(instr.conversion.selector);
+ UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0);
+ UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in I2F is not implemented");
@@ -82,14 +88,19 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
value = GetOperandAbsNegFloat(value, false, instr.conversion.negate_a);
SetInternalFlagsFromFloat(bb, value, instr.generates_cc);
+
+ if (instr.conversion.dst_size == Register::Size::Short) {
+ value = Operation(OperationCode::HCastFloat, PRECISE, value);
+ }
+
SetRegister(bb, instr.gpr0, value);
break;
}
case OpCode::Id::F2F_R:
case OpCode::Id::F2F_C:
case OpCode::Id::F2F_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.f2f.dst_size != Register::Size::Word);
- UNIMPLEMENTED_IF(instr.conversion.f2f.src_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long);
+ UNIMPLEMENTED_IF(instr.conversion.src_size == Register::Size::Long);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in F2F is not implemented");
@@ -107,6 +118,13 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
}
}();
+ if (instr.conversion.src_size == Register::Size::Short) {
+ value = Operation(GetFloatSelector(instr.conversion.float_src.selector), NO_PRECISE,
+ std::move(value));
+ } else {
+ ASSERT(instr.conversion.float_src.selector == 0);
+ }
+
value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a);
value = [&]() {
@@ -124,19 +142,24 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
default:
UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}",
static_cast<u32>(instr.conversion.f2f.rounding.Value()));
- return Immediate(0);
+ return value;
}
}();
value = GetSaturatedFloat(value, instr.alu.saturate_d);
SetInternalFlagsFromFloat(bb, value, instr.generates_cc);
+
+ if (instr.conversion.dst_size == Register::Size::Short) {
+ value = Operation(OperationCode::HCastFloat, PRECISE, value);
+ }
+
SetRegister(bb, instr.gpr0, value);
break;
}
case OpCode::Id::F2I_R:
case OpCode::Id::F2I_C:
case OpCode::Id::F2I_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.src_size == Register::Size::Long);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in F2I is not implemented");
Node value = [&]() {
@@ -153,6 +176,13 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
}
}();
+ if (instr.conversion.src_size == Register::Size::Short) {
+ value = Operation(GetFloatSelector(instr.conversion.float_src.selector), NO_PRECISE,
+ std::move(value));
+ } else {
+ ASSERT(instr.conversion.float_src.selector == 0);
+ }
+
value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a);
value = [&]() {
diff --git a/src/video_core/shader/decode/decode_integer_set.cpp b/src/video_core/shader/decode/decode_integer_set.cpp
deleted file mode 100644
index e69de29bb..000000000
--- a/src/video_core/shader/decode/decode_integer_set.cpp
+++ /dev/null
diff --git a/src/video_core/shader/decode/ffma.cpp b/src/video_core/shader/decode/ffma.cpp
index 29be25ca3..ca2f39e8d 100644
--- a/src/video_core/shader/decode/ffma.cpp
+++ b/src/video_core/shader/decode/ffma.cpp
@@ -18,10 +18,12 @@ u32 ShaderIR::DecodeFfma(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented");
- UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented",
- instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO
- UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented",
- instr.ffma.tab5980_1.Value());
+ if (instr.ffma.tab5980_0 != 1) {
+ LOG_WARNING(HW_GPU, "FFMA tab5980_0({}) not implemented", instr.ffma.tab5980_0.Value());
+ }
+ if (instr.ffma.tab5980_1 != 0) {
+ LOG_WARNING(HW_GPU, "FFMA tab5980_1({}) not implemented", instr.ffma.tab5980_1.Value());
+ }
const Node op_a = GetRegister(instr.gpr8);
diff --git a/src/video_core/shader/decode/float_set.cpp b/src/video_core/shader/decode/float_set.cpp
index f5013e44a..5614e8a0d 100644
--- a/src/video_core/shader/decode/float_set.cpp
+++ b/src/video_core/shader/decode/float_set.cpp
@@ -15,7 +15,6 @@ using Tegra::Shader::OpCode;
u32 ShaderIR::DecodeFloatSet(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fset.abs_a != 0,
instr.fset.neg_a != 0);
diff --git a/src/video_core/shader/decode/float_set_predicate.cpp b/src/video_core/shader/decode/float_set_predicate.cpp
index 2323052b0..200c2c983 100644
--- a/src/video_core/shader/decode/float_set_predicate.cpp
+++ b/src/video_core/shader/decode/float_set_predicate.cpp
@@ -16,10 +16,9 @@ using Tegra::Shader::Pred;
u32 ShaderIR::DecodeFloatSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
- const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0,
- instr.fsetp.neg_a != 0);
+ Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0,
+ instr.fsetp.neg_a != 0);
Node op_b = [&]() {
if (instr.is_b_imm) {
return GetImmediate19(instr);
@@ -29,12 +28,13 @@ u32 ShaderIR::DecodeFloatSetPredicate(NodeBlock& bb, u32 pc) {
return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset());
}
}();
- op_b = GetOperandAbsNegFloat(op_b, instr.fsetp.abs_b, false);
+ op_b = GetOperandAbsNegFloat(std::move(op_b), instr.fsetp.abs_b, instr.fsetp.neg_b);
// We can't use the constant predicate as destination.
ASSERT(instr.fsetp.pred3 != static_cast<u64>(Pred::UnusedIndex));
- const Node predicate = GetPredicateComparisonFloat(instr.fsetp.cond, op_a, op_b);
+ const Node predicate =
+ GetPredicateComparisonFloat(instr.fsetp.cond, std::move(op_a), std::move(op_b));
const Node second_pred = GetPredicate(instr.fsetp.pred39, instr.fsetp.neg_pred != 0);
const OperationCode combiner = GetPredicateCombiner(instr.fsetp.op);
diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp
index d59d15bd8..840694527 100644
--- a/src/video_core/shader/decode/half_set_predicate.cpp
+++ b/src/video_core/shader/decode/half_set_predicate.cpp
@@ -18,43 +18,55 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0);
+ DEBUG_ASSERT(instr.hsetp2.ftz == 0);
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hsetp2.type_a);
op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a);
- Node op_b = [&]() {
- switch (opcode->get().GetId()) {
- case OpCode::Id::HSETP2_R:
- return GetOperandAbsNegHalf(GetRegister(instr.gpr20), instr.hsetp2.abs_a,
- instr.hsetp2.negate_b);
- default:
- UNREACHABLE();
- return Immediate(0);
- }
- }();
- op_b = UnpackHalfFloat(op_b, instr.hsetp2.type_b);
-
- // We can't use the constant predicate as destination.
- ASSERT(instr.hsetp2.pred3 != static_cast<u64>(Pred::UnusedIndex));
-
- const Node second_pred = GetPredicate(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0);
+ Tegra::Shader::PredCondition cond{};
+ bool h_and{};
+ Node op_b{};
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::HSETP2_C:
+ cond = instr.hsetp2.cbuf_and_imm.cond;
+ h_and = instr.hsetp2.cbuf_and_imm.h_and;
+ op_b = GetOperandAbsNegHalf(GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()),
+ instr.hsetp2.cbuf.abs_b, instr.hsetp2.cbuf.negate_b);
+ break;
+ case OpCode::Id::HSETP2_IMM:
+ cond = instr.hsetp2.cbuf_and_imm.cond;
+ h_and = instr.hsetp2.cbuf_and_imm.h_and;
+ op_b = UnpackHalfImmediate(instr, true);
+ break;
+ case OpCode::Id::HSETP2_R:
+ cond = instr.hsetp2.reg.cond;
+ h_and = instr.hsetp2.reg.h_and;
+ op_b =
+ GetOperandAbsNegHalf(UnpackHalfFloat(GetRegister(instr.gpr20), instr.hsetp2.reg.type_b),
+ instr.hsetp2.reg.abs_b, instr.hsetp2.reg.negate_b);
+ break;
+ default:
+ UNREACHABLE();
+ op_b = Immediate(0);
+ }
const OperationCode combiner = GetPredicateCombiner(instr.hsetp2.op);
- const OperationCode pair_combiner =
- instr.hsetp2.h_and ? OperationCode::LogicalAll2 : OperationCode::LogicalAny2;
-
- const Node comparison = GetPredicateComparisonHalf(instr.hsetp2.cond, op_a, op_b);
- const Node first_pred = Operation(pair_combiner, comparison);
+ const Node combined_pred = GetPredicate(instr.hsetp2.pred39, instr.hsetp2.neg_pred);
- // Set the primary predicate to the result of Predicate OP SecondPredicate
- const Node value = Operation(combiner, first_pred, second_pred);
- SetPredicate(bb, instr.hsetp2.pred3, value);
+ const auto Write = [&](u64 dest, Node src) {
+ SetPredicate(bb, dest, Operation(combiner, std::move(src), combined_pred));
+ };
- if (instr.hsetp2.pred0 != static_cast<u64>(Pred::UnusedIndex)) {
- // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if enabled
- const Node negated_pred = Operation(OperationCode::LogicalNegate, first_pred);
- SetPredicate(bb, instr.hsetp2.pred0, Operation(combiner, negated_pred, second_pred));
+ const Node comparison = GetPredicateComparisonHalf(cond, op_a, op_b);
+ const u64 first = instr.hsetp2.pred3;
+ const u64 second = instr.hsetp2.pred0;
+ if (h_and) {
+ Node joined = Operation(OperationCode::LogicalAnd2, comparison);
+ Write(first, joined);
+ Write(second, Operation(OperationCode::LogicalNegate, std::move(joined)));
+ } else {
+ Write(first, Operation(OperationCode::LogicalPick2, comparison, Immediate(0U)));
+ Write(second, Operation(OperationCode::LogicalPick2, comparison, Immediate(1U)));
}
return pc;
diff --git a/src/video_core/shader/decode/hfma2.cpp b/src/video_core/shader/decode/hfma2.cpp
index c3bcf1ae9..5b44cb79c 100644
--- a/src/video_core/shader/decode/hfma2.cpp
+++ b/src/video_core/shader/decode/hfma2.cpp
@@ -22,9 +22,9 @@ u32 ShaderIR::DecodeHfma2(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) {
- UNIMPLEMENTED_IF(instr.hfma2.rr.precision != HalfPrecision::None);
+ DEBUG_ASSERT(instr.hfma2.rr.precision == HalfPrecision::None);
} else {
- UNIMPLEMENTED_IF(instr.hfma2.precision != HalfPrecision::None);
+ DEBUG_ASSERT(instr.hfma2.precision == HalfPrecision::None);
}
constexpr auto identity = HalfType::H0_H1;
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
new file mode 100644
index 000000000..d54fb88c9
--- /dev/null
+++ b/src/video_core/shader/decode/image.cpp
@@ -0,0 +1,164 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <vector>
+#include <fmt/format.h>
+
+#include "common/assert.h"
+#include "common/bit_field.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/shader/node_helper.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+using Tegra::Shader::Instruction;
+using Tegra::Shader::OpCode;
+
+namespace {
+std::size_t GetImageTypeNumCoordinates(Tegra::Shader::ImageType image_type) {
+ switch (image_type) {
+ case Tegra::Shader::ImageType::Texture1D:
+ case Tegra::Shader::ImageType::TextureBuffer:
+ return 1;
+ case Tegra::Shader::ImageType::Texture1DArray:
+ case Tegra::Shader::ImageType::Texture2D:
+ return 2;
+ case Tegra::Shader::ImageType::Texture2DArray:
+ case Tegra::Shader::ImageType::Texture3D:
+ return 3;
+ }
+ UNREACHABLE();
+ return 1;
+}
+} // Anonymous namespace
+
+u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
+ const Instruction instr = {program_code[pc]};
+ const auto opcode = OpCode::Decode(instr);
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::SUST: {
+ UNIMPLEMENTED_IF(instr.sust.mode != Tegra::Shader::SurfaceDataMode::P);
+ UNIMPLEMENTED_IF(instr.sust.out_of_bounds_store != Tegra::Shader::OutOfBoundsStore::Ignore);
+ UNIMPLEMENTED_IF(instr.sust.component_mask_selector != 0xf); // Ensure we have an RGBA store
+
+ std::vector<Node> values;
+ constexpr std::size_t hardcoded_size{4};
+ for (std::size_t i = 0; i < hardcoded_size; ++i) {
+ values.push_back(GetRegister(instr.gpr0.Value() + i));
+ }
+
+ std::vector<Node> coords;
+ const std::size_t num_coords{GetImageTypeNumCoordinates(instr.sust.image_type)};
+ for (std::size_t i = 0; i < num_coords; ++i) {
+ coords.push_back(GetRegister(instr.gpr8.Value() + i));
+ }
+
+ const auto type{instr.sust.image_type};
+ auto& image{instr.sust.is_immediate ? GetImage(instr.image, type)
+ : GetBindlessImage(instr.gpr39, type)};
+ image.MarkWrite();
+
+ MetaImage meta{image, values};
+ bb.push_back(Operation(OperationCode::ImageStore, meta, std::move(coords)));
+ break;
+ }
+ case OpCode::Id::SUATOM: {
+ UNIMPLEMENTED_IF(instr.suatom_d.is_ba != 0);
+
+ Node value = GetRegister(instr.gpr0);
+
+ std::vector<Node> coords;
+ const std::size_t num_coords{GetImageTypeNumCoordinates(instr.sust.image_type)};
+ for (std::size_t i = 0; i < num_coords; ++i) {
+ coords.push_back(GetRegister(instr.gpr8.Value() + i));
+ }
+
+ const OperationCode operation_code = [instr] {
+ switch (instr.suatom_d.operation) {
+ case Tegra::Shader::ImageAtomicOperation::Add:
+ return OperationCode::AtomicImageAdd;
+ case Tegra::Shader::ImageAtomicOperation::Min:
+ return OperationCode::AtomicImageMin;
+ case Tegra::Shader::ImageAtomicOperation::Max:
+ return OperationCode::AtomicImageMax;
+ case Tegra::Shader::ImageAtomicOperation::And:
+ return OperationCode::AtomicImageAnd;
+ case Tegra::Shader::ImageAtomicOperation::Or:
+ return OperationCode::AtomicImageOr;
+ case Tegra::Shader::ImageAtomicOperation::Xor:
+ return OperationCode::AtomicImageXor;
+ case Tegra::Shader::ImageAtomicOperation::Exch:
+ return OperationCode::AtomicImageExchange;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented operation={}",
+ static_cast<u32>(instr.suatom_d.operation.Value()));
+ return OperationCode::AtomicImageAdd;
+ }
+ }();
+
+ const auto& image{GetImage(instr.image, instr.suatom_d.image_type, instr.suatom_d.size)};
+ MetaImage meta{image, {std::move(value)}};
+ SetRegister(bb, instr.gpr0, Operation(operation_code, meta, std::move(coords)));
+ break;
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unhandled image instruction: {}", opcode->get().GetName());
+ }
+
+ return pc;
+}
+
+Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size) {
+ const auto offset{static_cast<std::size_t>(image.index.Value())};
+ if (const auto image = TryUseExistingImage(offset, type, size)) {
+ return *image;
+ }
+
+ const std::size_t next_index{used_images.size()};
+ return used_images.emplace(offset, Image{offset, next_index, type, size}).first->second;
+}
+
+Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size) {
+ const Node image_register{GetRegister(reg)};
+ const auto [base_image, cbuf_index, cbuf_offset]{
+ TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))};
+ const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)};
+
+ if (const auto image = TryUseExistingImage(cbuf_key, type, size)) {
+ return *image;
+ }
+
+ const std::size_t next_index{used_images.size()};
+ return used_images.emplace(cbuf_key, Image{cbuf_index, cbuf_offset, next_index, type, size})
+ .first->second;
+}
+
+Image* ShaderIR::TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size) {
+ auto it = used_images.find(offset);
+ if (it == used_images.end()) {
+ return nullptr;
+ }
+ auto& image = it->second;
+ ASSERT(image.GetType() == type);
+
+ if (size) {
+ // We know the size, if it's known it has to be the same as before, otherwise we can set it.
+ if (image.IsSizeKnown()) {
+ ASSERT(image.GetSize() == size);
+ } else {
+ image.SetSize(*size);
+ }
+ }
+ return &image;
+}
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode/integer_set.cpp b/src/video_core/shader/decode/integer_set.cpp
index 46e3d5905..59809bcd8 100644
--- a/src/video_core/shader/decode/integer_set.cpp
+++ b/src/video_core/shader/decode/integer_set.cpp
@@ -14,7 +14,6 @@ using Tegra::Shader::OpCode;
u32 ShaderIR::DecodeIntegerSet(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetRegister(instr.gpr8);
const Node op_b = [&]() {
diff --git a/src/video_core/shader/decode/integer_set_predicate.cpp b/src/video_core/shader/decode/integer_set_predicate.cpp
index dd20775d7..25e48fef8 100644
--- a/src/video_core/shader/decode/integer_set_predicate.cpp
+++ b/src/video_core/shader/decode/integer_set_predicate.cpp
@@ -16,7 +16,6 @@ using Tegra::Shader::Pred;
u32 ShaderIR::DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetRegister(instr.gpr8);
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index 80fc0ccfc..7923d4d69 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -35,7 +35,7 @@ u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) {
return 1;
}
}
-} // namespace
+} // Anonymous namespace
u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
@@ -95,10 +95,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
const Node op_b =
GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 4, index);
- SetTemporal(bb, 0, op_a);
- SetTemporal(bb, 1, op_b);
- SetRegister(bb, instr.gpr0, GetTemporal(0));
- SetRegister(bb, instr.gpr0.Value() + 1, GetTemporal(1));
+ SetTemporary(bb, 0, op_a);
+ SetTemporary(bb, 1, op_b);
+ SetRegister(bb, instr.gpr0, GetTemporary(0));
+ SetRegister(bb, instr.gpr0.Value() + 1, GetTemporary(1));
break;
}
default:
@@ -106,16 +106,17 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
break;
}
- case OpCode::Id::LD_L: {
- LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}",
- static_cast<u64>(instr.ld_l.unknown.Value()));
-
- const auto GetLmem = [&](s32 offset) {
+ case OpCode::Id::LD_L:
+ LOG_DEBUG(HW_GPU, "LD_L cache management mode: {}", static_cast<u64>(instr.ld_l.unknown));
+ [[fallthrough]];
+ case OpCode::Id::LD_S: {
+ const auto GetMemory = [&](s32 offset) {
ASSERT(offset % 4 == 0);
const Node immediate_offset = Immediate(static_cast<s32>(instr.smem_imm) + offset);
const Node address = Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8),
immediate_offset);
- return GetLocalMemory(address);
+ return opcode->get().GetId() == OpCode::Id::LD_S ? GetSharedMemory(address)
+ : GetLocalMemory(address);
};
switch (instr.ldst_sl.type.Value()) {
@@ -135,14 +136,16 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
return 0;
}
}();
- for (u32 i = 0; i < count; ++i)
- SetTemporal(bb, i, GetLmem(i * 4));
- for (u32 i = 0; i < count; ++i)
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ for (u32 i = 0; i < count; ++i) {
+ SetTemporary(bb, i, GetMemory(i * 4));
+ }
+ for (u32 i = 0; i < count; ++i) {
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
+ }
break;
}
default:
- UNIMPLEMENTED_MSG("LD_L Unhandled type: {}",
+ UNIMPLEMENTED_MSG("{} Unhandled type: {}", opcode->get().GetName(),
static_cast<u32>(instr.ldst_sl.type.Value()));
}
break;
@@ -172,10 +175,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset);
const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
- SetTemporal(bb, i, gmem);
+ SetTemporary(bb, i, gmem);
}
for (u32 i = 0; i < count; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
}
break;
}
@@ -209,27 +212,34 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
break;
}
- case OpCode::Id::ST_L: {
+ case OpCode::Id::ST_L:
LOG_DEBUG(HW_GPU, "ST_L cache management mode: {}",
static_cast<u64>(instr.st_l.cache_management.Value()));
-
- const auto GetLmemAddr = [&](s32 offset) {
+ [[fallthrough]];
+ case OpCode::Id::ST_S: {
+ const auto GetAddress = [&](s32 offset) {
ASSERT(offset % 4 == 0);
const Node immediate = Immediate(static_cast<s32>(instr.smem_imm) + offset);
return Operation(OperationCode::IAdd, NO_PRECISE, GetRegister(instr.gpr8), immediate);
};
+ const auto set_memory = opcode->get().GetId() == OpCode::Id::ST_L
+ ? &ShaderIR::SetLocalMemory
+ : &ShaderIR::SetSharedMemory;
+
switch (instr.ldst_sl.type.Value()) {
case Tegra::Shader::StoreType::Bits128:
- SetLocalMemory(bb, GetLmemAddr(12), GetRegister(instr.gpr0.Value() + 3));
- SetLocalMemory(bb, GetLmemAddr(8), GetRegister(instr.gpr0.Value() + 2));
+ (this->*set_memory)(bb, GetAddress(12), GetRegister(instr.gpr0.Value() + 3));
+ (this->*set_memory)(bb, GetAddress(8), GetRegister(instr.gpr0.Value() + 2));
+ [[fallthrough]];
case Tegra::Shader::StoreType::Bits64:
- SetLocalMemory(bb, GetLmemAddr(4), GetRegister(instr.gpr0.Value() + 1));
+ (this->*set_memory)(bb, GetAddress(4), GetRegister(instr.gpr0.Value() + 1));
+ [[fallthrough]];
case Tegra::Shader::StoreType::Bits32:
- SetLocalMemory(bb, GetLmemAddr(0), GetRegister(instr.gpr0));
+ (this->*set_memory)(bb, GetAddress(0), GetRegister(instr.gpr0));
break;
default:
- UNIMPLEMENTED_MSG("ST_L Unhandled type: {}",
+ UNIMPLEMENTED_MSG("{} unhandled type: {}", opcode->get().GetName(),
static_cast<u32>(instr.ldst_sl.type.Value()));
}
break;
@@ -253,11 +263,11 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
TrackAndGetGlobalMemory(bb, instr, true);
// Encode in temporary registers like this: real_base_address, {registers_to_be_written...}
- SetTemporal(bb, 0, real_address_base);
+ SetTemporary(bb, 0, real_address_base);
const u32 count = GetUniformTypeElementsCount(type);
for (u32 i = 0; i < count; ++i) {
- SetTemporal(bb, i + 1, GetRegister(instr.gpr0.Value() + i));
+ SetTemporary(bb, i + 1, GetRegister(instr.gpr0.Value() + i));
}
for (u32 i = 0; i < count; ++i) {
const Node it_offset = Immediate(i * 4);
@@ -265,7 +275,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset);
const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
- bb.push_back(Operation(OperationCode::Assign, gmem, GetTemporal(i + 1)));
+ bb.push_back(Operation(OperationCode::Assign, gmem, GetTemporary(i + 1)));
}
break;
}
@@ -297,18 +307,13 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackAndGetGlobalMemory(NodeB
const auto addr_register{GetRegister(instr.gmem.gpr)};
const auto immediate_offset{static_cast<u32>(instr.gmem.offset)};
- const Node base_address{
- TrackCbuf(addr_register, global_code, static_cast<s64>(global_code.size()))};
- const auto cbuf = std::get_if<CbufNode>(&*base_address);
- ASSERT(cbuf != nullptr);
- const auto cbuf_offset_imm = std::get_if<ImmediateNode>(&*cbuf->GetOffset());
- ASSERT(cbuf_offset_imm != nullptr);
- const auto cbuf_offset = cbuf_offset_imm->GetValue();
+ const auto [base_address, index, offset] =
+ TrackCbuf(addr_register, global_code, static_cast<s64>(global_code.size()));
+ ASSERT(base_address != nullptr);
- bb.push_back(
- Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", cbuf->GetIndex(), cbuf_offset)));
+ bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset)));
- const GlobalMemoryBase descriptor{cbuf->GetIndex(), cbuf_offset};
+ const GlobalMemoryBase descriptor{index, offset};
const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor);
auto& usage = entry->second;
if (is_write) {
diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp
index 6fc07f213..d46e0f823 100644
--- a/src/video_core/shader/decode/other.cpp
+++ b/src/video_core/shader/decode/other.cpp
@@ -22,6 +22,12 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
switch (opcode->get().GetId()) {
+ case OpCode::Id::NOP: {
+ UNIMPLEMENTED_IF(instr.nop.cc != Tegra::Shader::ConditionCode::T);
+ UNIMPLEMENTED_IF(instr.nop.trigger != 0);
+ // With the previous preconditions, this instruction is a no-operation.
+ break;
+ }
case OpCode::Id::EXIT: {
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}",
@@ -68,6 +74,13 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
case SystemVariable::InvocationInfo:
LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete");
return Immediate(0u);
+ case SystemVariable::Tid: {
+ Node value = Immediate(0);
+ value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdX), 0, 9);
+ value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdY), 16, 9);
+ value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdZ), 26, 5);
+ return value;
+ }
case SystemVariable::TidX:
return Operation(OperationCode::LocalInvocationIdX);
case SystemVariable::TidY:
@@ -91,11 +104,46 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
break;
}
case OpCode::Id::BRA: {
- UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
- "BRA with constant buffers are not implemented");
+ Node branch;
+ if (instr.bra.constant_buffer == 0) {
+ const u32 target = pc + instr.bra.GetBranchTarget();
+ branch = Operation(OperationCode::Branch, Immediate(target));
+ } else {
+ const u32 target = pc + 1;
+ const Node op_a = GetConstBuffer(instr.cbuf36.index, instr.cbuf36.GetOffset());
+ const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true,
+ PRECISE, op_a, Immediate(3));
+ const Node operand =
+ Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target));
+ branch = Operation(OperationCode::BranchIndirect, operand);
+ }
- const u32 target = pc + instr.bra.GetBranchTarget();
- const Node branch = Operation(OperationCode::Branch, Immediate(target));
+ const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
+ if (cc != Tegra::Shader::ConditionCode::T) {
+ bb.push_back(Conditional(GetConditionCode(cc), {branch}));
+ } else {
+ bb.push_back(branch);
+ }
+ break;
+ }
+ case OpCode::Id::BRX: {
+ Node operand;
+ if (instr.brx.constant_buffer != 0) {
+ const s32 target = pc + 1;
+ const Node index = GetRegister(instr.gpr8);
+ const Node op_a =
+ GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 0, index);
+ const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true,
+ PRECISE, op_a, Immediate(3));
+ operand = Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target));
+ } else {
+ const s32 target = pc + instr.brx.GetBranchExtend();
+ const Node op_a = GetRegister(instr.gpr8);
+ const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true,
+ PRECISE, op_a, Immediate(3));
+ operand = Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target));
+ }
+ const Node branch = Operation(OperationCode::BranchIndirect, operand);
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
if (cc != Tegra::Shader::ConditionCode::T) {
@@ -109,22 +157,28 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
"Constant buffer flow is not supported");
- // The SSY opcode tells the GPU where to re-converge divergent execution paths, it sets the
- // target of the jump that the SYNC instruction will make. The SSY opcode has a similar
- // structure to the BRA opcode.
+ if (disable_flow_stack) {
+ break;
+ }
+
+ // The SSY opcode tells the GPU where to re-converge divergent execution paths with SYNC.
const u32 target = pc + instr.bra.GetBranchTarget();
- bb.push_back(Operation(OperationCode::PushFlowStack, Immediate(target)));
+ bb.push_back(
+ Operation(OperationCode::PushFlowStack, MetaStackClass::Ssy, Immediate(target)));
break;
}
case OpCode::Id::PBK: {
UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
"Constant buffer PBK is not supported");
- // PBK pushes to a stack the address where BRK will jump to. This shares stack with SSY but
- // using SYNC on a PBK address will kill the shader execution. We don't emulate this because
- // it's very unlikely a driver will emit such invalid shader.
+ if (disable_flow_stack) {
+ break;
+ }
+
+ // PBK pushes to a stack the address where BRK will jump to.
const u32 target = pc + instr.bra.GetBranchTarget();
- bb.push_back(Operation(OperationCode::PushFlowStack, Immediate(target)));
+ bb.push_back(
+ Operation(OperationCode::PushFlowStack, MetaStackClass::Pbk, Immediate(target)));
break;
}
case OpCode::Id::SYNC: {
@@ -132,17 +186,24 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "SYNC condition code used: {}",
static_cast<u32>(cc));
+ if (disable_flow_stack) {
+ break;
+ }
+
// The SYNC opcode jumps to the address previously set by the SSY opcode
- bb.push_back(Operation(OperationCode::PopFlowStack));
+ bb.push_back(Operation(OperationCode::PopFlowStack, MetaStackClass::Ssy));
break;
}
case OpCode::Id::BRK: {
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "BRK condition code used: {}",
static_cast<u32>(cc));
+ if (disable_flow_stack) {
+ break;
+ }
// The BRK opcode jumps to the address previously set by the PBK opcode
- bb.push_back(Operation(OperationCode::PopFlowStack));
+ bb.push_back(Operation(OperationCode::PopFlowStack, MetaStackClass::Pbk));
break;
}
case OpCode::Id::IPA: {
diff --git a/src/video_core/shader/decode/predicate_set_register.cpp b/src/video_core/shader/decode/predicate_set_register.cpp
index febbfeb50..84dbc50fe 100644
--- a/src/video_core/shader/decode/predicate_set_register.cpp
+++ b/src/video_core/shader/decode/predicate_set_register.cpp
@@ -15,7 +15,6 @@ using Tegra::Shader::OpCode;
u32 ShaderIR::DecodePredicateSetRegister(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in PSET is not implemented");
diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp
index 2ac16eeb0..f6ee68a54 100644
--- a/src/video_core/shader/decode/shift.cpp
+++ b/src/video_core/shader/decode/shift.cpp
@@ -17,8 +17,8 @@ u32 ShaderIR::DecodeShift(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- const Node op_a = GetRegister(instr.gpr8);
- const Node op_b = [&]() {
+ Node op_a = GetRegister(instr.gpr8);
+ Node op_b = [&]() {
if (instr.is_b_imm) {
return Immediate(instr.alu.GetSignedImm20_20());
} else if (instr.is_b_gpr) {
@@ -32,16 +32,23 @@ u32 ShaderIR::DecodeShift(NodeBlock& bb, u32 pc) {
case OpCode::Id::SHR_C:
case OpCode::Id::SHR_R:
case OpCode::Id::SHR_IMM: {
- const Node value = SignedOperation(OperationCode::IArithmeticShiftRight,
- instr.shift.is_signed, PRECISE, op_a, op_b);
+ if (instr.shr.wrap) {
+ op_b = Operation(OperationCode::UBitwiseAnd, std::move(op_b), Immediate(0x1f));
+ } else {
+ op_b = Operation(OperationCode::IMax, std::move(op_b), Immediate(0));
+ op_b = Operation(OperationCode::IMin, std::move(op_b), Immediate(31));
+ }
+
+ Node value = SignedOperation(OperationCode::IArithmeticShiftRight, instr.shift.is_signed,
+ std::move(op_a), std::move(op_b));
SetInternalFlagsFromInteger(bb, value, instr.generates_cc);
- SetRegister(bb, instr.gpr0, value);
+ SetRegister(bb, instr.gpr0, std::move(value));
break;
}
case OpCode::Id::SHL_C:
case OpCode::Id::SHL_R:
case OpCode::Id::SHL_IMM: {
- const Node value = Operation(OperationCode::ILogicalShiftLeft, PRECISE, op_a, op_b);
+ const Node value = Operation(OperationCode::ILogicalShiftLeft, op_a, op_b);
SetInternalFlagsFromInteger(bb, value, instr.generates_cc);
SetRegister(bb, instr.gpr0, value);
break;
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 4a356dbd4..0b934a069 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -181,10 +181,10 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
const Node value =
Operation(OperationCode::TextureQueryDimensions, meta,
GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0)));
- SetTemporal(bb, indexer++, value);
+ SetTemporary(bb, indexer++, value);
}
for (u32 i = 0; i < indexer; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
}
break;
}
@@ -238,13 +238,25 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
auto params = coords;
MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element};
const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
- SetTemporal(bb, indexer++, value);
+ SetTemporary(bb, indexer++, value);
}
for (u32 i = 0; i < indexer; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
}
break;
}
+ case OpCode::Id::TLD: {
+ UNIMPLEMENTED_IF_MSG(instr.tld.aoffi, "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld.ms, "MS is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld.cl, "CL is not implemented");
+
+ if (instr.tld.nodep_flag) {
+ LOG_WARNING(HW_GPU, "TLD.NODEP implementation is incomplete");
+ }
+
+ WriteTexInstructionFloat(bb, instr, GetTldCode(instr));
+ break;
+ }
case OpCode::Id::TLDS: {
const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
const bool is_array{instr.tlds.IsArrayTexture()};
@@ -257,7 +269,13 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete");
}
- WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array));
+ const Node4 components = GetTldsCode(instr, texture_type, is_array);
+
+ if (instr.tlds.fp32_flag) {
+ WriteTexsInstructionFloat(bb, instr, components);
+ } else {
+ WriteTexsInstructionHalfFloat(bb, instr, components);
+ }
break;
}
default:
@@ -290,13 +308,9 @@ const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, Textu
const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, TextureType type,
bool is_array, bool is_shadow) {
const Node sampler_register = GetRegister(reg);
- const Node base_sampler =
+ const auto [base_sampler, cbuf_index, cbuf_offset] =
TrackCbuf(sampler_register, global_code, static_cast<s64>(global_code.size()));
- const auto cbuf = std::get_if<CbufNode>(&*base_sampler);
- const auto cbuf_offset_imm = std::get_if<ImmediateNode>(&*cbuf->GetOffset());
- ASSERT(cbuf_offset_imm != nullptr);
- const auto cbuf_offset = cbuf_offset_imm->GetValue();
- const auto cbuf_index = cbuf->GetIndex();
+ ASSERT(base_sampler != nullptr);
const auto cbuf_key = (static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset);
// If this sampler has already been used, return the existing mapping.
@@ -322,11 +336,11 @@ void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const
// Skip disabled components
continue;
}
- SetTemporal(bb, dest_elem++, components[elem]);
+ SetTemporary(bb, dest_elem++, components[elem]);
}
// After writing values in temporals, move them to the real registers
for (u32 i = 0; i < dest_elem; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
}
}
@@ -339,17 +353,17 @@ void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr,
for (u32 component = 0; component < 4; ++component) {
if (!instr.texs.IsComponentEnabled(component))
continue;
- SetTemporal(bb, dest_elem++, components[component]);
+ SetTemporary(bb, dest_elem++, components[component]);
}
for (u32 i = 0; i < dest_elem; ++i) {
if (i < 2) {
// Write the first two swizzle components to gpr0 and gpr0+1
- SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i));
+ SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporary(i));
} else {
ASSERT(instr.texs.HasTwoDestinations());
// Write the rest of the swizzle components to gpr28 and gpr28+1
- SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i));
+ SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporary(i));
}
}
}
@@ -377,11 +391,11 @@ void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr,
return;
}
- SetTemporal(bb, 0, first_value);
- SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3]));
+ SetTemporary(bb, 0, first_value);
+ SetTemporary(bb, 1, Operation(OperationCode::HPack2, values[2], values[3]));
- SetRegister(bb, instr.gpr0, GetTemporal(0));
- SetRegister(bb, instr.gpr28, GetTemporal(1));
+ SetRegister(bb, instr.gpr0, GetTemporary(0));
+ SetRegister(bb, instr.gpr28, GetTemporary(1));
}
Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
@@ -575,6 +589,39 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
return values;
}
+Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
+ const auto texture_type{instr.tld.texture_type};
+ const bool is_array{instr.tld.is_array};
+ const bool lod_enabled{instr.tld.GetTextureProcessMode() == TextureProcessMode::LL};
+ const std::size_t coord_count{GetCoordCount(texture_type)};
+
+ u64 gpr8_cursor{instr.gpr8.Value()};
+ const Node array_register{is_array ? GetRegister(gpr8_cursor++) : nullptr};
+
+ std::vector<Node> coords;
+ coords.reserve(coord_count);
+ for (std::size_t i = 0; i < coord_count; ++i) {
+ coords.push_back(GetRegister(gpr8_cursor++));
+ }
+
+ u64 gpr20_cursor{instr.gpr20.Value()};
+ // const Node bindless_register{is_bindless ? GetRegister(gpr20_cursor++) : nullptr};
+ const Node lod{lod_enabled ? GetRegister(gpr20_cursor++) : Immediate(0u)};
+ // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
+ // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
+
+ const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
+
+ Node4 values;
+ for (u32 element = 0; element < values.size(); ++element) {
+ auto coords_copy = coords;
+ MetaTexture meta{sampler, array_register, {}, {}, {}, lod, {}, element};
+ values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
+ }
+
+ return values;
+}
+
Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
const std::size_t type_coord_count = GetCoordCount(texture_type);
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp
new file mode 100644
index 000000000..a8e481b3c
--- /dev/null
+++ b/src/video_core/shader/decode/warp.cpp
@@ -0,0 +1,102 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/shader/node_helper.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+using Tegra::Shader::Instruction;
+using Tegra::Shader::OpCode;
+using Tegra::Shader::Pred;
+using Tegra::Shader::ShuffleOperation;
+using Tegra::Shader::VoteOperation;
+
+namespace {
+OperationCode GetOperationCode(VoteOperation vote_op) {
+ switch (vote_op) {
+ case VoteOperation::All:
+ return OperationCode::VoteAll;
+ case VoteOperation::Any:
+ return OperationCode::VoteAny;
+ case VoteOperation::Eq:
+ return OperationCode::VoteEqual;
+ default:
+ UNREACHABLE_MSG("Invalid vote operation={}", static_cast<u64>(vote_op));
+ return OperationCode::VoteAll;
+ }
+}
+} // Anonymous namespace
+
+u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) {
+ const Instruction instr = {program_code[pc]};
+ const auto opcode = OpCode::Decode(instr);
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::VOTE: {
+ const Node value = GetPredicate(instr.vote.value, instr.vote.negate_value != 0);
+ const Node active = Operation(OperationCode::BallotThread, value);
+ const Node vote = Operation(GetOperationCode(instr.vote.operation), value);
+ SetRegister(bb, instr.gpr0, active);
+ SetPredicate(bb, instr.vote.dest_pred, vote);
+ break;
+ }
+ case OpCode::Id::SHFL: {
+ Node mask = instr.shfl.is_mask_imm ? Immediate(static_cast<u32>(instr.shfl.mask_imm))
+ : GetRegister(instr.gpr39);
+ Node width = [&] {
+ // Convert the obscure SHFL mask back into GL_NV_shader_thread_shuffle's width. This has
+ // been done reversing Nvidia's math. It won't work on all cases due to SHFL having
+ // different parameters that don't properly map to GLSL's interface, but it should work
+ // for cases emitted by Nvidia's compiler.
+ if (instr.shfl.operation == ShuffleOperation::Up) {
+ return Operation(
+ OperationCode::ILogicalShiftRight,
+ Operation(OperationCode::IAdd, std::move(mask), Immediate(-0x2000)),
+ Immediate(8));
+ } else {
+ return Operation(OperationCode::ILogicalShiftRight,
+ Operation(OperationCode::IAdd, Immediate(0x201F),
+ Operation(OperationCode::INegate, std::move(mask))),
+ Immediate(8));
+ }
+ }();
+
+ const auto [operation, in_range] = [instr]() -> std::pair<OperationCode, OperationCode> {
+ switch (instr.shfl.operation) {
+ case ShuffleOperation::Idx:
+ return {OperationCode::ShuffleIndexed, OperationCode::InRangeShuffleIndexed};
+ case ShuffleOperation::Up:
+ return {OperationCode::ShuffleUp, OperationCode::InRangeShuffleUp};
+ case ShuffleOperation::Down:
+ return {OperationCode::ShuffleDown, OperationCode::InRangeShuffleDown};
+ case ShuffleOperation::Bfly:
+ return {OperationCode::ShuffleButterfly, OperationCode::InRangeShuffleButterfly};
+ }
+ UNREACHABLE_MSG("Invalid SHFL operation: {}",
+ static_cast<u64>(instr.shfl.operation.Value()));
+ return {};
+ }();
+
+ // Setting the predicate before the register is intentional to avoid overwriting.
+ Node index = instr.shfl.is_index_imm ? Immediate(static_cast<u32>(instr.shfl.index_imm))
+ : GetRegister(instr.gpr20);
+ SetPredicate(bb, instr.shfl.pred48, Operation(in_range, index, width));
+ SetRegister(
+ bb, instr.gpr0,
+ Operation(operation, GetRegister(instr.gpr8), std::move(index), std::move(width)));
+ break;
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unhandled warp instruction: {}", opcode->get().GetName());
+ break;
+ }
+
+ return pc;
+}
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode/xmad.cpp b/src/video_core/shader/decode/xmad.cpp
index 93dee77d1..206961909 100644
--- a/src/video_core/shader/decode/xmad.cpp
+++ b/src/video_core/shader/decode/xmad.cpp
@@ -73,8 +73,8 @@ u32 ShaderIR::DecodeXmad(NodeBlock& bb, u32 pc) {
if (is_psl) {
product = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, product, Immediate(16));
}
- SetTemporal(bb, 0, product);
- product = GetTemporal(0);
+ SetTemporary(bb, 0, product);
+ product = GetTemporary(0);
const Node original_c = op_c;
const Tegra::Shader::XmadMode set_mode = mode; // Workaround to clang compile error
@@ -98,13 +98,13 @@ u32 ShaderIR::DecodeXmad(NodeBlock& bb, u32 pc) {
}
}();
- SetTemporal(bb, 1, op_c);
- op_c = GetTemporal(1);
+ SetTemporary(bb, 1, op_c);
+ op_c = GetTemporary(1);
// TODO(Rodrigo): Use an appropiate sign for this operation
Node sum = Operation(OperationCode::IAdd, product, op_c);
- SetTemporal(bb, 2, sum);
- sum = GetTemporal(2);
+ SetTemporary(bb, 2, sum);
+ sum = GetTemporary(2);
if (is_merge) {
const Node a = BitfieldExtract(sum, 0, 16);
const Node b =
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index c002f90f9..abf2cb1ab 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -7,6 +7,7 @@
#include <array>
#include <cstddef>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -30,6 +31,8 @@ enum class OperationCode {
FNegate, /// (MetaArithmetic, float a) -> float
FAbsolute, /// (MetaArithmetic, float a) -> float
FClamp, /// (MetaArithmetic, float value, float min, float max) -> float
+ FCastHalf0, /// (MetaArithmetic, f16vec2 a) -> float
+ FCastHalf1, /// (MetaArithmetic, f16vec2 a) -> float
FMin, /// (MetaArithmetic, float a, float b) -> float
FMax, /// (MetaArithmetic, float a, float b) -> float
FCos, /// (MetaArithmetic, float a) -> float
@@ -83,17 +86,18 @@ enum class OperationCode {
UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint
UBitCount, /// (MetaArithmetic, uint) -> uint
- HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
- HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
- HFma, /// (MetaArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2
- HAbsolute, /// (f16vec2 a) -> f16vec2
- HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2
- HClamp, /// (f16vec2 src, float min, float max) -> f16vec2
- HUnpack, /// (Tegra::Shader::HalfType, T value) -> f16vec2
- HMergeF32, /// (f16vec2 src) -> float
- HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2
- HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2
- HPack2, /// (float a, float b) -> f16vec2
+ HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
+ HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
+ HFma, /// (MetaArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2
+ HAbsolute, /// (f16vec2 a) -> f16vec2
+ HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2
+ HClamp, /// (f16vec2 src, float min, float max) -> f16vec2
+ HCastFloat, /// (MetaArithmetic, float a) -> f16vec2
+ HUnpack, /// (Tegra::Shader::HalfType, T value) -> f16vec2
+ HMergeF32, /// (f16vec2 src) -> float
+ HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2
+ HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2
+ HPack2, /// (float a, float b) -> f16vec2
LogicalAssign, /// (bool& dst, bool src) -> void
LogicalAnd, /// (bool a, bool b) -> bool
@@ -101,8 +105,7 @@ enum class OperationCode {
LogicalXor, /// (bool a, bool b) -> bool
LogicalNegate, /// (bool a) -> bool
LogicalPick2, /// (bool2 pair, uint index) -> bool
- LogicalAll2, /// (bool2 a) -> bool
- LogicalAny2, /// (bool2 a) -> bool
+ LogicalAnd2, /// (bool2 a) -> bool
LogicalFLessThan, /// (float a, float b) -> bool
LogicalFEqual, /// (float a, float b) -> bool
@@ -146,11 +149,21 @@ enum class OperationCode {
TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4
TexelFetch, /// (MetaTexture, int[N], int) -> float4
- Branch, /// (uint branch_target) -> void
- PushFlowStack, /// (uint branch_target) -> void
- PopFlowStack, /// () -> void
- Exit, /// () -> void
- Discard, /// () -> void
+ ImageStore, /// (MetaImage, int[N] values) -> void
+ AtomicImageAdd, /// (MetaImage, int[N] coords) -> void
+ AtomicImageMin, /// (MetaImage, int[N] coords) -> void
+ AtomicImageMax, /// (MetaImage, int[N] coords) -> void
+ AtomicImageAnd, /// (MetaImage, int[N] coords) -> void
+ AtomicImageOr, /// (MetaImage, int[N] coords) -> void
+ AtomicImageXor, /// (MetaImage, int[N] coords) -> void
+ AtomicImageExchange, /// (MetaImage, int[N] coords) -> void
+
+ Branch, /// (uint branch_target) -> void
+ BranchIndirect, /// (uint branch_target) -> void
+ PushFlowStack, /// (uint branch_target) -> void
+ PopFlowStack, /// () -> void
+ Exit, /// () -> void
+ Discard, /// () -> void
EmitVertex, /// () -> void
EndPrimitive, /// () -> void
@@ -163,6 +176,21 @@ enum class OperationCode {
WorkGroupIdY, /// () -> uint
WorkGroupIdZ, /// () -> uint
+ BallotThread, /// (bool) -> uint
+ VoteAll, /// (bool) -> bool
+ VoteAny, /// (bool) -> bool
+ VoteEqual, /// (bool) -> bool
+
+ ShuffleIndexed, /// (uint value, uint index, uint width) -> uint
+ ShuffleUp, /// (uint value, uint index, uint width) -> uint
+ ShuffleDown, /// (uint value, uint index, uint width) -> uint
+ ShuffleButterfly, /// (uint value, uint index, uint width) -> uint
+
+ InRangeShuffleIndexed, /// (uint index, uint width) -> bool
+ InRangeShuffleUp, /// (uint index, uint width) -> bool
+ InRangeShuffleDown, /// (uint index, uint width) -> bool
+ InRangeShuffleButterfly, /// (uint index, uint width) -> bool
+
Amount,
};
@@ -174,6 +202,11 @@ enum class InternalFlag {
Amount = 4,
};
+enum class MetaStackClass {
+ Ssy,
+ Pbk,
+};
+
class OperationNode;
class ConditionalNode;
class GprNode;
@@ -183,12 +216,13 @@ class PredicateNode;
class AbufNode;
class CbufNode;
class LmemNode;
+class SmemNode;
class GmemNode;
class CommentNode;
using NodeData =
std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, InternalFlagNode,
- PredicateNode, AbufNode, CbufNode, LmemNode, GmemNode, CommentNode>;
+ PredicateNode, AbufNode, CbufNode, LmemNode, SmemNode, GmemNode, CommentNode>;
using Node = std::shared_ptr<NodeData>;
using Node4 = std::array<Node, 4>;
using NodeBlock = std::vector<Node>;
@@ -258,6 +292,87 @@ private:
bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not.
};
+class Image final {
+public:
+ constexpr explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size)
+ : offset{offset}, index{index}, type{type}, is_bindless{false}, size{size} {}
+
+ constexpr explicit Image(u32 cbuf_index, u32 cbuf_offset, std::size_t index,
+ Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size)
+ : offset{(static_cast<u64>(cbuf_index) << 32) | cbuf_offset}, index{index}, type{type},
+ is_bindless{true}, size{size} {}
+
+ constexpr explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type,
+ bool is_bindless, bool is_written, bool is_read,
+ std::optional<Tegra::Shader::ImageAtomicSize> size)
+ : offset{offset}, index{index}, type{type}, is_bindless{is_bindless},
+ is_written{is_written}, is_read{is_read}, size{size} {}
+
+ void MarkWrite() {
+ is_written = true;
+ }
+
+ void MarkRead() {
+ is_read = true;
+ }
+
+ void SetSize(Tegra::Shader::ImageAtomicSize size_) {
+ size = size_;
+ }
+
+ constexpr std::size_t GetOffset() const {
+ return offset;
+ }
+
+ constexpr std::size_t GetIndex() const {
+ return index;
+ }
+
+ constexpr Tegra::Shader::ImageType GetType() const {
+ return type;
+ }
+
+ constexpr bool IsBindless() const {
+ return is_bindless;
+ }
+
+ constexpr bool IsWritten() const {
+ return is_written;
+ }
+
+ constexpr bool IsRead() const {
+ return is_read;
+ }
+
+ constexpr std::pair<u32, u32> GetBindlessCBuf() const {
+ return {static_cast<u32>(offset >> 32), static_cast<u32>(offset)};
+ }
+
+ constexpr bool IsSizeKnown() const {
+ return size.has_value();
+ }
+
+ constexpr Tegra::Shader::ImageAtomicSize GetSize() const {
+ return size.value();
+ }
+
+ constexpr bool operator<(const Image& rhs) const {
+ return std::tie(offset, index, type, size, is_bindless) <
+ std::tie(rhs.offset, rhs.index, rhs.type, rhs.size, rhs.is_bindless);
+ }
+
+private:
+ u64 offset{};
+ std::size_t index{};
+ Tegra::Shader::ImageType type{};
+ bool is_bindless{};
+ bool is_written{};
+ bool is_read{};
+ std::optional<Tegra::Shader::ImageAtomicSize> size{};
+};
+
struct GlobalMemoryBase {
u32 cbuf_index{};
u32 cbuf_offset{};
@@ -284,8 +399,14 @@ struct MetaTexture {
u32 element{};
};
+struct MetaImage {
+ const Image& image;
+ std::vector<Node> values;
+};
+
/// Parameters that modify an operation but are not part of any particular operand
-using Meta = std::variant<MetaArithmetic, MetaTexture, Tegra::Shader::HalfType>;
+using Meta =
+ std::variant<MetaArithmetic, MetaTexture, MetaImage, MetaStackClass, Tegra::Shader::HalfType>;
/// Holds any kind of operation that can be done in the IR
class OperationNode final {
@@ -473,6 +594,19 @@ private:
Node address;
};
+/// Shared memory node
+class SmemNode final {
+public:
+ explicit SmemNode(Node address) : address{std::move(address)} {}
+
+ const Node& GetAddress() const {
+ return address;
+ }
+
+private:
+ Node address;
+};
+
/// Global memory node
class GmemNode final {
public:
diff --git a/src/video_core/shader/node_helper.cpp b/src/video_core/shader/node_helper.cpp
index 6fccbbba3..b3dcd291c 100644
--- a/src/video_core/shader/node_helper.cpp
+++ b/src/video_core/shader/node_helper.cpp
@@ -12,7 +12,7 @@
namespace VideoCommon::Shader {
Node Conditional(Node condition, std::vector<Node> code) {
- return MakeNode<ConditionalNode>(condition, std::move(code));
+ return MakeNode<ConditionalNode>(std::move(condition), std::move(code));
}
Node Comment(std::string text) {
diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp
index 11b545cca..bbbab0bca 100644
--- a/src/video_core/shader/shader_ir.cpp
+++ b/src/video_core/shader/shader_ir.cpp
@@ -22,8 +22,8 @@ using Tegra::Shader::PredCondition;
using Tegra::Shader::PredOperation;
using Tegra::Shader::Register;
-ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset)
- : program_code{program_code}, main_offset{main_offset} {
+ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset, const std::size_t size)
+ : program_code{program_code}, main_offset{main_offset}, program_size{size} {
Decode();
}
@@ -61,8 +61,17 @@ Node ShaderIR::GetConstBufferIndirect(u64 index_, u64 offset_, Node node) {
const auto [entry, is_new] = used_cbufs.try_emplace(index);
entry->second.MarkAsUsedIndirect();
- const Node final_offset = Operation(OperationCode::UAdd, NO_PRECISE, node, Immediate(offset));
- return MakeNode<CbufNode>(index, final_offset);
+ Node final_offset = [&] {
+ // Attempt to inline constant buffer without a variable offset. This is done to allow
+ // tracking LDC calls.
+ if (const auto gpr = std::get_if<GprNode>(&*node)) {
+ if (gpr->GetIndex() == Register::ZeroIndex) {
+ return Immediate(offset);
+ }
+ }
+ return Operation(OperationCode::UAdd, NO_PRECISE, std::move(node), Immediate(offset));
+ }();
+ return MakeNode<CbufNode>(index, std::move(final_offset));
}
Node ShaderIR::GetPredicate(u64 pred_, bool negated) {
@@ -80,7 +89,7 @@ Node ShaderIR::GetPredicate(bool immediate) {
Node ShaderIR::GetInputAttribute(Attribute::Index index, u64 element, Node buffer) {
used_input_attributes.emplace(index);
- return MakeNode<AbufNode>(index, static_cast<u32>(element), buffer);
+ return MakeNode<AbufNode>(index, static_cast<u32>(element), std::move(buffer));
}
Node ShaderIR::GetPhysicalInputAttribute(Tegra::Shader::Register physical_address, Node buffer) {
@@ -89,6 +98,22 @@ Node ShaderIR::GetPhysicalInputAttribute(Tegra::Shader::Register physical_addres
}
Node ShaderIR::GetOutputAttribute(Attribute::Index index, u64 element, Node buffer) {
+ if (index == Attribute::Index::LayerViewportPointSize) {
+ switch (element) {
+ case 0:
+ UNIMPLEMENTED();
+ break;
+ case 1:
+ uses_layer = true;
+ break;
+ case 2:
+ uses_viewport_index = true;
+ break;
+ case 3:
+ uses_point_size = true;
+ break;
+ }
+ }
if (index == Attribute::Index::ClipDistances0123 ||
index == Attribute::Index::ClipDistances4567) {
const auto clip_index =
@@ -97,7 +122,7 @@ Node ShaderIR::GetOutputAttribute(Attribute::Index index, u64 element, Node buff
}
used_output_attributes.insert(index);
- return MakeNode<AbufNode>(index, static_cast<u32>(element), buffer);
+ return MakeNode<AbufNode>(index, static_cast<u32>(element), std::move(buffer));
}
Node ShaderIR::GetInternalFlag(InternalFlag flag, bool negated) {
@@ -109,19 +134,23 @@ Node ShaderIR::GetInternalFlag(InternalFlag flag, bool negated) {
}
Node ShaderIR::GetLocalMemory(Node address) {
- return MakeNode<LmemNode>(address);
+ return MakeNode<LmemNode>(std::move(address));
+}
+
+Node ShaderIR::GetSharedMemory(Node address) {
+ return MakeNode<SmemNode>(std::move(address));
}
-Node ShaderIR::GetTemporal(u32 id) {
+Node ShaderIR::GetTemporary(u32 id) {
return GetRegister(Register::ZeroIndex + 1 + id);
}
Node ShaderIR::GetOperandAbsNegFloat(Node value, bool absolute, bool negate) {
if (absolute) {
- value = Operation(OperationCode::FAbsolute, NO_PRECISE, value);
+ value = Operation(OperationCode::FAbsolute, NO_PRECISE, std::move(value));
}
if (negate) {
- value = Operation(OperationCode::FNegate, NO_PRECISE, value);
+ value = Operation(OperationCode::FNegate, NO_PRECISE, std::move(value));
}
return value;
}
@@ -130,24 +159,26 @@ Node ShaderIR::GetSaturatedFloat(Node value, bool saturate) {
if (!saturate) {
return value;
}
- const Node positive_zero = Immediate(std::copysignf(0, 1));
- const Node positive_one = Immediate(1.0f);
- return Operation(OperationCode::FClamp, NO_PRECISE, value, positive_zero, positive_one);
+
+ Node positive_zero = Immediate(std::copysignf(0, 1));
+ Node positive_one = Immediate(1.0f);
+ return Operation(OperationCode::FClamp, NO_PRECISE, std::move(value), std::move(positive_zero),
+ std::move(positive_one));
}
-Node ShaderIR::ConvertIntegerSize(Node value, Tegra::Shader::Register::Size size, bool is_signed) {
+Node ShaderIR::ConvertIntegerSize(Node value, Register::Size size, bool is_signed) {
switch (size) {
case Register::Size::Byte:
- value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value,
- Immediate(24));
- value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value,
- Immediate(24));
+ value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE,
+ std::move(value), Immediate(24));
+ value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE,
+ std::move(value), Immediate(24));
return value;
case Register::Size::Short:
- value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value,
- Immediate(16));
- value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value,
- Immediate(16));
+ value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE,
+ std::move(value), Immediate(16));
+ value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE,
+ std::move(value), Immediate(16));
case Register::Size::Word:
// Default - do nothing
return value;
@@ -163,27 +194,29 @@ Node ShaderIR::GetOperandAbsNegInteger(Node value, bool absolute, bool negate, b
return value;
}
if (absolute) {
- value = Operation(OperationCode::IAbsolute, NO_PRECISE, value);
+ value = Operation(OperationCode::IAbsolute, NO_PRECISE, std::move(value));
}
if (negate) {
- value = Operation(OperationCode::INegate, NO_PRECISE, value);
+ value = Operation(OperationCode::INegate, NO_PRECISE, std::move(value));
}
return value;
}
Node ShaderIR::UnpackHalfImmediate(Instruction instr, bool has_negation) {
- const Node value = Immediate(instr.half_imm.PackImmediates());
+ Node value = Immediate(instr.half_imm.PackImmediates());
if (!has_negation) {
return value;
}
- const Node first_negate = GetPredicate(instr.half_imm.first_negate != 0);
- const Node second_negate = GetPredicate(instr.half_imm.second_negate != 0);
- return Operation(OperationCode::HNegate, NO_PRECISE, value, first_negate, second_negate);
+ Node first_negate = GetPredicate(instr.half_imm.first_negate != 0);
+ Node second_negate = GetPredicate(instr.half_imm.second_negate != 0);
+
+ return Operation(OperationCode::HNegate, NO_PRECISE, std::move(value), std::move(first_negate),
+ std::move(second_negate));
}
Node ShaderIR::UnpackHalfFloat(Node value, Tegra::Shader::HalfType type) {
- return Operation(OperationCode::HUnpack, type, value);
+ return Operation(OperationCode::HUnpack, type, std::move(value));
}
Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) {
@@ -191,11 +224,11 @@ Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) {
case Tegra::Shader::HalfMerge::H0_H1:
return src;
case Tegra::Shader::HalfMerge::F32:
- return Operation(OperationCode::HMergeF32, src);
+ return Operation(OperationCode::HMergeF32, std::move(src));
case Tegra::Shader::HalfMerge::Mrg_H0:
- return Operation(OperationCode::HMergeH0, dest, src);
+ return Operation(OperationCode::HMergeH0, std::move(dest), std::move(src));
case Tegra::Shader::HalfMerge::Mrg_H1:
- return Operation(OperationCode::HMergeH1, dest, src);
+ return Operation(OperationCode::HMergeH1, std::move(dest), std::move(src));
}
UNREACHABLE();
return src;
@@ -203,10 +236,10 @@ Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) {
Node ShaderIR::GetOperandAbsNegHalf(Node value, bool absolute, bool negate) {
if (absolute) {
- value = Operation(OperationCode::HAbsolute, NO_PRECISE, value);
+ value = Operation(OperationCode::HAbsolute, NO_PRECISE, std::move(value));
}
if (negate) {
- value = Operation(OperationCode::HNegate, NO_PRECISE, value, GetPredicate(true),
+ value = Operation(OperationCode::HNegate, NO_PRECISE, std::move(value), GetPredicate(true),
GetPredicate(true));
}
return value;
@@ -216,9 +249,11 @@ Node ShaderIR::GetSaturatedHalfFloat(Node value, bool saturate) {
if (!saturate) {
return value;
}
- const Node positive_zero = Immediate(std::copysignf(0, 1));
- const Node positive_one = Immediate(1.0f);
- return Operation(OperationCode::HClamp, NO_PRECISE, value, positive_zero, positive_one);
+
+ Node positive_zero = Immediate(std::copysignf(0, 1));
+ Node positive_one = Immediate(1.0f);
+ return Operation(OperationCode::HClamp, NO_PRECISE, std::move(value), std::move(positive_zero),
+ std::move(positive_one));
}
Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, Node op_b) {
@@ -246,7 +281,6 @@ Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, N
condition == PredCondition::LessEqualWithNan ||
condition == PredCondition::GreaterThanWithNan ||
condition == PredCondition::GreaterEqualWithNan) {
-
predicate = Operation(OperationCode::LogicalOr, predicate,
Operation(OperationCode::LogicalFIsNan, op_a));
predicate = Operation(OperationCode::LogicalOr, predicate,
@@ -275,7 +309,8 @@ Node ShaderIR::GetPredicateComparisonInteger(PredCondition condition, bool is_si
UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(),
"Unknown predicate comparison operation");
- Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, op_a, op_b);
+ Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, std::move(op_a),
+ std::move(op_b));
UNIMPLEMENTED_IF_MSG(condition == PredCondition::LessThanWithNan ||
condition == PredCondition::NotEqualWithNan ||
@@ -305,9 +340,7 @@ Node ShaderIR::GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition
UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(),
"Unknown predicate comparison operation");
- const Node predicate = Operation(comparison->second, NO_PRECISE, op_a, op_b);
-
- return predicate;
+ return Operation(comparison->second, NO_PRECISE, std::move(op_a), std::move(op_b));
}
OperationCode ShaderIR::GetPredicateCombiner(PredOperation operation) {
@@ -333,31 +366,37 @@ Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) {
}
void ShaderIR::SetRegister(NodeBlock& bb, Register dest, Node src) {
- bb.push_back(Operation(OperationCode::Assign, GetRegister(dest), src));
+ bb.push_back(Operation(OperationCode::Assign, GetRegister(dest), std::move(src)));
}
void ShaderIR::SetPredicate(NodeBlock& bb, u64 dest, Node src) {
- bb.push_back(Operation(OperationCode::LogicalAssign, GetPredicate(dest), src));
+ bb.push_back(Operation(OperationCode::LogicalAssign, GetPredicate(dest), std::move(src)));
}
void ShaderIR::SetInternalFlag(NodeBlock& bb, InternalFlag flag, Node value) {
- bb.push_back(Operation(OperationCode::LogicalAssign, GetInternalFlag(flag), value));
+ bb.push_back(Operation(OperationCode::LogicalAssign, GetInternalFlag(flag), std::move(value)));
}
void ShaderIR::SetLocalMemory(NodeBlock& bb, Node address, Node value) {
- bb.push_back(Operation(OperationCode::Assign, GetLocalMemory(address), value));
+ bb.push_back(
+ Operation(OperationCode::Assign, GetLocalMemory(std::move(address)), std::move(value)));
+}
+
+void ShaderIR::SetSharedMemory(NodeBlock& bb, Node address, Node value) {
+ bb.push_back(
+ Operation(OperationCode::Assign, GetSharedMemory(std::move(address)), std::move(value)));
}
-void ShaderIR::SetTemporal(NodeBlock& bb, u32 id, Node value) {
- SetRegister(bb, Register::ZeroIndex + 1 + id, value);
+void ShaderIR::SetTemporary(NodeBlock& bb, u32 id, Node value) {
+ SetRegister(bb, Register::ZeroIndex + 1 + id, std::move(value));
}
void ShaderIR::SetInternalFlagsFromFloat(NodeBlock& bb, Node value, bool sets_cc) {
if (!sets_cc) {
return;
}
- const Node zerop = Operation(OperationCode::LogicalFEqual, value, Immediate(0.0f));
- SetInternalFlag(bb, InternalFlag::Zero, zerop);
+ Node zerop = Operation(OperationCode::LogicalFEqual, std::move(value), Immediate(0.0f));
+ SetInternalFlag(bb, InternalFlag::Zero, std::move(zerop));
LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete");
}
@@ -365,13 +404,18 @@ void ShaderIR::SetInternalFlagsFromInteger(NodeBlock& bb, Node value, bool sets_
if (!sets_cc) {
return;
}
- const Node zerop = Operation(OperationCode::LogicalIEqual, value, Immediate(0));
- SetInternalFlag(bb, InternalFlag::Zero, zerop);
+ Node zerop = Operation(OperationCode::LogicalIEqual, std::move(value), Immediate(0));
+ SetInternalFlag(bb, InternalFlag::Zero, std::move(zerop));
LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete");
}
Node ShaderIR::BitfieldExtract(Node value, u32 offset, u32 bits) {
- return Operation(OperationCode::UBitfieldExtract, NO_PRECISE, value, Immediate(offset),
+ return Operation(OperationCode::UBitfieldExtract, NO_PRECISE, std::move(value),
+ Immediate(offset), Immediate(bits));
+}
+
+Node ShaderIR::BitfieldInsert(Node base, Node insert, u32 offset, u32 bits) {
+ return Operation(OperationCode::UBitfieldInsert, NO_PRECISE, base, insert, Immediate(offset),
Immediate(bits));
}
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index edcf2288e..6aed9bb84 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -5,13 +5,10 @@
#pragma once
#include <array>
-#include <cstring>
#include <map>
#include <optional>
#include <set>
-#include <string>
#include <tuple>
-#include <variant>
#include <vector>
#include "common/common_types.h"
@@ -22,18 +19,12 @@
namespace VideoCommon::Shader {
+struct ShaderBlock;
+
using ProgramCode = std::vector<u64>;
constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
-/// Describes the behaviour of code path of a given entry point and a return point.
-enum class ExitMethod {
- Undetermined, ///< Internal value. Only occur when analyzing JMP loop.
- AlwaysReturn, ///< All code paths reach the return point.
- Conditional, ///< Code path reaches the return point or an END instruction conditionally.
- AlwaysEnd, ///< All code paths reach a END instruction.
-};
-
class ConstBuffer {
public:
explicit ConstBuffer(u32 max_offset, bool is_indirect)
@@ -73,7 +64,7 @@ struct GlobalMemoryUsage {
class ShaderIR final {
public:
- explicit ShaderIR(const ProgramCode& program_code, u32 main_offset);
+ explicit ShaderIR(const ProgramCode& program_code, u32 main_offset, std::size_t size);
~ShaderIR();
const std::map<u32, NodeBlock>& GetBasicBlocks() const {
@@ -104,6 +95,10 @@ public:
return used_samplers;
}
+ const std::map<u64, Image>& GetImages() const {
+ return used_images;
+ }
+
const std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances>& GetClipDistances()
const {
return used_clip_distances;
@@ -117,6 +112,18 @@ public:
return static_cast<std::size_t>(coverage_end * sizeof(u64));
}
+ bool UsesLayer() const {
+ return uses_layer;
+ }
+
+ bool UsesViewportIndex() const {
+ return uses_viewport_index;
+ }
+
+ bool UsesPointSize() const {
+ return uses_point_size;
+ }
+
bool HasPhysicalAttributes() const {
return uses_physical_attributes;
}
@@ -125,12 +132,20 @@ public:
return header;
}
+ bool IsFlowStackDisabled() const {
+ return disable_flow_stack;
+ }
+
+ u32 ConvertAddressToNvidiaSpace(const u32 address) const {
+ return (address - main_offset) * sizeof(Tegra::Shader::Instruction);
+ }
+
private:
void Decode();
- ExitMethod Scan(u32 begin, u32 end, std::set<u32>& labels);
-
NodeBlock DecodeRange(u32 begin, u32 end);
+ void DecodeRangeInner(NodeBlock& bb, u32 begin, u32 end);
+ void InsertControlFlow(NodeBlock& bb, const ShaderBlock& block);
/**
* Decodes a single instruction from Tegra to IR.
@@ -152,8 +167,10 @@ private:
u32 DecodeFfma(NodeBlock& bb, u32 pc);
u32 DecodeHfma2(NodeBlock& bb, u32 pc);
u32 DecodeConversion(NodeBlock& bb, u32 pc);
+ u32 DecodeWarp(NodeBlock& bb, u32 pc);
u32 DecodeMemory(NodeBlock& bb, u32 pc);
u32 DecodeTexture(NodeBlock& bb, u32 pc);
+ u32 DecodeImage(NodeBlock& bb, u32 pc);
u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc);
u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc);
u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc);
@@ -191,8 +208,10 @@ private:
Node GetInternalFlag(InternalFlag flag, bool negated = false);
/// Generates a node representing a local memory address
Node GetLocalMemory(Node address);
- /// Generates a temporal, internally it uses a post-RZ register
- Node GetTemporal(u32 id);
+ /// Generates a node representing a shared memory address
+ Node GetSharedMemory(Node address);
+ /// Generates a temporary, internally it uses a post-RZ register
+ Node GetTemporary(u32 id);
/// Sets a register. src value must be a number-evaluated node.
void SetRegister(NodeBlock& bb, Tegra::Shader::Register dest, Node src);
@@ -200,10 +219,12 @@ private:
void SetPredicate(NodeBlock& bb, u64 dest, Node src);
/// Sets an internal flag. src value must be a bool-evaluated node
void SetInternalFlag(NodeBlock& bb, InternalFlag flag, Node value);
- /// Sets a local memory address. address and value must be a number-evaluated node
+ /// Sets a local memory address with a value.
void SetLocalMemory(NodeBlock& bb, Node address, Node value);
- /// Sets a temporal. Internally it uses a post-RZ register
- void SetTemporal(NodeBlock& bb, u32 id, Node value);
+ /// Sets a shared memory address with a value.
+ void SetSharedMemory(NodeBlock& bb, Node address, Node value);
+ /// Sets a temporary. Internally it uses a post-RZ register
+ void SetTemporary(NodeBlock& bb, u32 id, Node value);
/// Sets internal flags from a float
void SetInternalFlagsFromFloat(NodeBlock& bb, Node value, bool sets_cc = true);
@@ -254,9 +275,24 @@ private:
Tegra::Shader::TextureType type, bool is_array,
bool is_shadow);
+ /// Accesses an image.
+ Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size = {});
+
+ /// Access a bindless image sampler.
+ Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size = {});
+
+ /// Tries to access an existing image, updating it's state as needed
+ Image* TryUseExistingImage(u64 offset, Tegra::Shader::ImageType type,
+ std::optional<Tegra::Shader::ImageAtomicSize> size);
+
/// Extracts a sequence of bits from a node
Node BitfieldExtract(Node value, u32 offset, u32 bits);
+ /// Inserts a sequence of bits from a node
+ Node BitfieldInsert(Node base, Node insert, u32 offset, u32 bits);
+
void WriteTexInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr,
const Node4& components);
@@ -277,6 +313,8 @@ private:
Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type,
bool depth_compare, bool is_array, bool is_aoffi);
+ Node4 GetTldCode(Tegra::Shader::Instruction instr);
+
Node4 GetTldsCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type,
bool is_array);
@@ -301,7 +339,7 @@ private:
void WriteLop3Instruction(NodeBlock& bb, Tegra::Shader::Register dest, Node op_a, Node op_b,
Node op_c, Node imm_lut, bool sets_cc);
- Node TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const;
+ std::tuple<Node, u32, u32> TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const;
std::optional<u32> TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const;
@@ -313,10 +351,11 @@ private:
const ProgramCode& program_code;
const u32 main_offset;
+ const std::size_t program_size;
+ bool disable_flow_stack{};
u32 coverage_begin{};
u32 coverage_end{};
- std::map<std::pair<u32, u32>, ExitMethod> exit_method_map;
std::map<u32, NodeBlock> basic_blocks;
NodeBlock global_code;
@@ -327,8 +366,12 @@ private:
std::set<Tegra::Shader::Attribute::Index> used_output_attributes;
std::map<u32, ConstBuffer> used_cbufs;
std::set<Sampler> used_samplers;
+ std::map<u64, Image> used_images;
std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{};
std::map<GlobalMemoryBase, GlobalMemoryUsage> used_global_memory;
+ bool uses_layer{};
+ bool uses_viewport_index{};
+ bool uses_point_size{};
bool uses_physical_attributes{}; // Shader uses AL2P or physical attribute read/writes
Tegra::Shader::Header header;
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index fc957d980..55f5949e4 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -15,56 +15,63 @@ namespace {
std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor,
OperationCode operation_code) {
for (; cursor >= 0; --cursor) {
- const Node node = code.at(cursor);
+ Node node = code.at(cursor);
+
if (const auto operation = std::get_if<OperationNode>(&*node)) {
if (operation->GetCode() == operation_code) {
- return {node, cursor};
+ return {std::move(node), cursor};
}
}
+
if (const auto conditional = std::get_if<ConditionalNode>(&*node)) {
const auto& conditional_code = conditional->GetCode();
- const auto [found, internal_cursor] = FindOperation(
+ auto [found, internal_cursor] = FindOperation(
conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code);
if (found) {
- return {found, cursor};
+ return {std::move(found), cursor};
}
}
}
return {};
}
-} // namespace
+} // Anonymous namespace
-Node ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const {
+std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code,
+ s64 cursor) const {
if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) {
- // Cbuf found, but it has to be immediate
- return std::holds_alternative<ImmediateNode>(*cbuf->GetOffset()) ? tracked : nullptr;
+ // Constant buffer found, test if it's an immediate
+ const auto offset = cbuf->GetOffset();
+ if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) {
+ return {tracked, cbuf->GetIndex(), immediate->GetValue()};
+ }
+ return {};
}
if (const auto gpr = std::get_if<GprNode>(&*tracked)) {
if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) {
- return nullptr;
+ return {};
}
// Reduce the cursor in one to avoid infinite loops when the instruction sets the same
// register that it uses as operand
const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1);
if (!source) {
- return nullptr;
+ return {};
}
return TrackCbuf(source, code, new_cursor);
}
if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
- for (std::size_t i = 0; i < operation->GetOperandsCount(); ++i) {
- if (const auto found = TrackCbuf((*operation)[i], code, cursor)) {
- // Cbuf found in operand
+ for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) {
+ if (auto found = TrackCbuf((*operation)[i - 1], code, cursor); std::get<0>(found)) {
+ // Cbuf found in operand.
return found;
}
}
- return nullptr;
+ return {};
}
if (const auto conditional = std::get_if<ConditionalNode>(&*tracked)) {
const auto& conditional_code = conditional->GetCode();
return TrackCbuf(tracked, conditional_code, static_cast<s64>(conditional_code.size()));
}
- return nullptr;
+ return {};
}
std::optional<u32> ShaderIR::TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const {
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp
index 6384fa8d2..53d0142cb 100644
--- a/src/video_core/surface.cpp
+++ b/src/video_core/surface.cpp
@@ -12,6 +12,8 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
switch (texture_type) {
case Tegra::Texture::TextureType::Texture1D:
return SurfaceTarget::Texture1D;
+ case Tegra::Texture::TextureType::Texture1DBuffer:
+ return SurfaceTarget::TextureBuffer;
case Tegra::Texture::TextureType::Texture2D:
case Tegra::Texture::TextureType::Texture2DNoMipmap:
return SurfaceTarget::Texture2D;
@@ -35,6 +37,7 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
bool SurfaceTargetIsLayered(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
+ case SurfaceTarget::TextureBuffer:
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture3D:
return false;
@@ -53,6 +56,7 @@ bool SurfaceTargetIsLayered(SurfaceTarget target) {
bool SurfaceTargetIsArray(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
+ case SurfaceTarget::TextureBuffer:
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture3D:
case SurfaceTarget::TextureCubemap:
@@ -304,8 +308,8 @@ PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format,
return PixelFormat::Z32F;
case Tegra::Texture::TextureFormat::Z16:
return PixelFormat::Z16;
- case Tegra::Texture::TextureFormat::Z24S8:
- return PixelFormat::Z24S8;
+ case Tegra::Texture::TextureFormat::S8Z24:
+ return PixelFormat::S8Z24;
case Tegra::Texture::TextureFormat::ZF32_X24S8:
return PixelFormat::Z32FS8;
case Tegra::Texture::TextureFormat::DXT1:
@@ -441,11 +445,12 @@ PixelFormat PixelFormatFromGPUPixelFormat(Tegra::FramebufferConfig::PixelFormat
switch (format) {
case Tegra::FramebufferConfig::PixelFormat::ABGR8:
return PixelFormat::ABGR8U;
+ case Tegra::FramebufferConfig::PixelFormat::RGB565:
+ return PixelFormat::B5G6R5U;
case Tegra::FramebufferConfig::PixelFormat::BGRA8:
return PixelFormat::BGRA8;
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented format={}", static_cast<u32>(format));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented format={}", static_cast<u32>(format));
return PixelFormat::ABGR8U;
}
}
@@ -508,6 +513,26 @@ bool IsPixelFormatASTC(PixelFormat format) {
}
}
+bool IsPixelFormatSRGB(PixelFormat format) {
+ switch (format) {
+ case PixelFormat::RGBA8_SRGB:
+ case PixelFormat::BGRA8_SRGB:
+ case PixelFormat::DXT1_SRGB:
+ case PixelFormat::DXT23_SRGB:
+ case PixelFormat::DXT45_SRGB:
+ case PixelFormat::BC7U_SRGB:
+ case PixelFormat::ASTC_2D_4X4_SRGB:
+ case PixelFormat::ASTC_2D_8X8_SRGB:
+ case PixelFormat::ASTC_2D_8X5_SRGB:
+ case PixelFormat::ASTC_2D_5X4_SRGB:
+ case PixelFormat::ASTC_2D_5X5_SRGB:
+ case PixelFormat::ASTC_2D_10X8_SRGB:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::pair<u32, u32> GetASTCBlockSize(PixelFormat format) {
return {GetDefaultBlockWidth(format), GetDefaultBlockHeight(format)};
}
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index b783e4b27..19268b7cd 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -114,6 +114,7 @@ enum class SurfaceType {
enum class SurfaceTarget {
Texture1D,
+ TextureBuffer,
Texture2D,
Texture3D,
Texture1DArray,
@@ -122,71 +123,71 @@ enum class SurfaceTarget {
TextureCubeArray,
};
-constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{
- 1, // ABGR8U
- 1, // ABGR8S
- 1, // ABGR8UI
- 1, // B5G6R5U
- 1, // A2B10G10R10U
- 1, // A1B5G5R5U
- 1, // R8U
- 1, // R8UI
- 1, // RGBA16F
- 1, // RGBA16U
- 1, // RGBA16UI
- 1, // R11FG11FB10F
- 1, // RGBA32UI
- 4, // DXT1
- 4, // DXT23
- 4, // DXT45
- 4, // DXN1
- 4, // DXN2UNORM
- 4, // DXN2SNORM
- 4, // BC7U
- 4, // BC6H_UF16
- 4, // BC6H_SF16
- 4, // ASTC_2D_4X4
- 1, // BGRA8
- 1, // RGBA32F
- 1, // RG32F
- 1, // R32F
- 1, // R16F
- 1, // R16U
- 1, // R16S
- 1, // R16UI
- 1, // R16I
- 1, // RG16
- 1, // RG16F
- 1, // RG16UI
- 1, // RG16I
- 1, // RG16S
- 1, // RGB32F
- 1, // RGBA8_SRGB
- 1, // RG8U
- 1, // RG8S
- 1, // RG32UI
- 1, // R32UI
- 4, // ASTC_2D_8X8
- 4, // ASTC_2D_8X5
- 4, // ASTC_2D_5X4
- 1, // BGRA8_SRGB
- 4, // DXT1_SRGB
- 4, // DXT23_SRGB
- 4, // DXT45_SRGB
- 4, // BC7U_SRGB
- 4, // ASTC_2D_4X4_SRGB
- 4, // ASTC_2D_8X8_SRGB
- 4, // ASTC_2D_8X5_SRGB
- 4, // ASTC_2D_5X4_SRGB
- 4, // ASTC_2D_5X5
- 4, // ASTC_2D_5X5_SRGB
- 4, // ASTC_2D_10X8
- 4, // ASTC_2D_10X8_SRGB
- 1, // Z32F
- 1, // Z16
- 1, // Z24S8
- 1, // S8Z24
- 1, // Z32FS8
+constexpr std::array<u32, MaxPixelFormat> compression_factor_shift_table = {{
+ 0, // ABGR8U
+ 0, // ABGR8S
+ 0, // ABGR8UI
+ 0, // B5G6R5U
+ 0, // A2B10G10R10U
+ 0, // A1B5G5R5U
+ 0, // R8U
+ 0, // R8UI
+ 0, // RGBA16F
+ 0, // RGBA16U
+ 0, // RGBA16UI
+ 0, // R11FG11FB10F
+ 0, // RGBA32UI
+ 2, // DXT1
+ 2, // DXT23
+ 2, // DXT45
+ 2, // DXN1
+ 2, // DXN2UNORM
+ 2, // DXN2SNORM
+ 2, // BC7U
+ 2, // BC6H_UF16
+ 2, // BC6H_SF16
+ 2, // ASTC_2D_4X4
+ 0, // BGRA8
+ 0, // RGBA32F
+ 0, // RG32F
+ 0, // R32F
+ 0, // R16F
+ 0, // R16U
+ 0, // R16S
+ 0, // R16UI
+ 0, // R16I
+ 0, // RG16
+ 0, // RG16F
+ 0, // RG16UI
+ 0, // RG16I
+ 0, // RG16S
+ 0, // RGB32F
+ 0, // RGBA8_SRGB
+ 0, // RG8U
+ 0, // RG8S
+ 0, // RG32UI
+ 0, // R32UI
+ 2, // ASTC_2D_8X8
+ 2, // ASTC_2D_8X5
+ 2, // ASTC_2D_5X4
+ 0, // BGRA8_SRGB
+ 2, // DXT1_SRGB
+ 2, // DXT23_SRGB
+ 2, // DXT45_SRGB
+ 2, // BC7U_SRGB
+ 2, // ASTC_2D_4X4_SRGB
+ 2, // ASTC_2D_8X8_SRGB
+ 2, // ASTC_2D_8X5_SRGB
+ 2, // ASTC_2D_5X4_SRGB
+ 2, // ASTC_2D_5X5
+ 2, // ASTC_2D_5X5_SRGB
+ 2, // ASTC_2D_10X8
+ 2, // ASTC_2D_10X8_SRGB
+ 0, // Z32F
+ 0, // Z16
+ 0, // Z24S8
+ 0, // S8Z24
+ 0, // Z32FS8
}};
/**
@@ -195,12 +196,14 @@ constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{
* compressed image. This is used for maintaining proper surface sizes for compressed
* texture formats.
*/
-static constexpr u32 GetCompressionFactor(PixelFormat format) {
- if (format == PixelFormat::Invalid)
- return 0;
+inline constexpr u32 GetCompressionFactorShift(PixelFormat format) {
+ DEBUG_ASSERT(format != PixelFormat::Invalid);
+ DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_factor_shift_table.size());
+ return compression_factor_shift_table[static_cast<std::size_t>(format)];
+}
- ASSERT(static_cast<std::size_t>(format) < compression_factor_table.size());
- return compression_factor_table[static_cast<std::size_t>(format)];
+inline constexpr u32 GetCompressionFactor(PixelFormat format) {
+ return 1U << GetCompressionFactorShift(format);
}
constexpr std::array<u32, MaxPixelFormat> block_width_table = {{
@@ -436,6 +439,88 @@ static constexpr u32 GetBytesPerPixel(PixelFormat pixel_format) {
return GetFormatBpp(pixel_format) / CHAR_BIT;
}
+enum class SurfaceCompression {
+ None, // Not compressed
+ Compressed, // Texture is compressed
+ Converted, // Texture is converted before upload or after download
+ Rearranged, // Texture is swizzled before upload or after download
+};
+
+constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table = {{
+ SurfaceCompression::None, // ABGR8U
+ SurfaceCompression::None, // ABGR8S
+ SurfaceCompression::None, // ABGR8UI
+ SurfaceCompression::None, // B5G6R5U
+ SurfaceCompression::None, // A2B10G10R10U
+ SurfaceCompression::None, // A1B5G5R5U
+ SurfaceCompression::None, // R8U
+ SurfaceCompression::None, // R8UI
+ SurfaceCompression::None, // RGBA16F
+ SurfaceCompression::None, // RGBA16U
+ SurfaceCompression::None, // RGBA16UI
+ SurfaceCompression::None, // R11FG11FB10F
+ SurfaceCompression::None, // RGBA32UI
+ SurfaceCompression::Compressed, // DXT1
+ SurfaceCompression::Compressed, // DXT23
+ SurfaceCompression::Compressed, // DXT45
+ SurfaceCompression::Compressed, // DXN1
+ SurfaceCompression::Compressed, // DXN2UNORM
+ SurfaceCompression::Compressed, // DXN2SNORM
+ SurfaceCompression::Compressed, // BC7U
+ SurfaceCompression::Compressed, // BC6H_UF16
+ SurfaceCompression::Compressed, // BC6H_SF16
+ SurfaceCompression::Converted, // ASTC_2D_4X4
+ SurfaceCompression::None, // BGRA8
+ SurfaceCompression::None, // RGBA32F
+ SurfaceCompression::None, // RG32F
+ SurfaceCompression::None, // R32F
+ SurfaceCompression::None, // R16F
+ SurfaceCompression::None, // R16U
+ SurfaceCompression::None, // R16S
+ SurfaceCompression::None, // R16UI
+ SurfaceCompression::None, // R16I
+ SurfaceCompression::None, // RG16
+ SurfaceCompression::None, // RG16F
+ SurfaceCompression::None, // RG16UI
+ SurfaceCompression::None, // RG16I
+ SurfaceCompression::None, // RG16S
+ SurfaceCompression::None, // RGB32F
+ SurfaceCompression::None, // RGBA8_SRGB
+ SurfaceCompression::None, // RG8U
+ SurfaceCompression::None, // RG8S
+ SurfaceCompression::None, // RG32UI
+ SurfaceCompression::None, // R32UI
+ SurfaceCompression::Converted, // ASTC_2D_8X8
+ SurfaceCompression::Converted, // ASTC_2D_8X5
+ SurfaceCompression::Converted, // ASTC_2D_5X4
+ SurfaceCompression::None, // BGRA8_SRGB
+ SurfaceCompression::Compressed, // DXT1_SRGB
+ SurfaceCompression::Compressed, // DXT23_SRGB
+ SurfaceCompression::Compressed, // DXT45_SRGB
+ SurfaceCompression::Compressed, // BC7U_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_4X4_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_8X8_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_8X5_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_5X4_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_5X5
+ SurfaceCompression::Converted, // ASTC_2D_5X5_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_10X8
+ SurfaceCompression::Converted, // ASTC_2D_10X8_SRGB
+ SurfaceCompression::None, // Z32F
+ SurfaceCompression::None, // Z16
+ SurfaceCompression::None, // Z24S8
+ SurfaceCompression::Rearranged, // S8Z24
+ SurfaceCompression::None, // Z32FS8
+}};
+
+constexpr SurfaceCompression GetFormatCompressionType(PixelFormat format) {
+ if (format == PixelFormat::Invalid) {
+ return SurfaceCompression::None;
+ }
+ DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_type_table.size());
+ return compression_type_table[static_cast<std::size_t>(format)];
+}
+
SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type);
bool SurfaceTargetIsLayered(SurfaceTarget target);
@@ -462,6 +547,8 @@ SurfaceType GetFormatType(PixelFormat pixel_format);
bool IsPixelFormatASTC(PixelFormat format);
+bool IsPixelFormatSRGB(PixelFormat format);
+
std::pair<u32, u32> GetASTCBlockSize(PixelFormat format);
/// Returns true if the specified PixelFormat is a BCn format, e.g. DXT or DXN
diff --git a/src/video_core/texture_cache.cpp b/src/video_core/texture_cache.cpp
deleted file mode 100644
index e96eba7cc..000000000
--- a/src/video_core/texture_cache.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/cityhash.h"
-#include "common/common_types.h"
-#include "core/core.h"
-#include "video_core/surface.h"
-#include "video_core/texture_cache.h"
-#include "video_core/textures/decoders.h"
-#include "video_core/textures/texture.h"
-
-namespace VideoCommon {
-
-using VideoCore::Surface::SurfaceTarget;
-
-using VideoCore::Surface::ComponentTypeFromDepthFormat;
-using VideoCore::Surface::ComponentTypeFromRenderTarget;
-using VideoCore::Surface::ComponentTypeFromTexture;
-using VideoCore::Surface::PixelFormatFromDepthFormat;
-using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
-using VideoCore::Surface::PixelFormatFromTextureFormat;
-using VideoCore::Surface::SurfaceTargetFromTextureType;
-
-constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) {
- return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile);
-}
-
-SurfaceParams SurfaceParams::CreateForTexture(Core::System& system,
- const Tegra::Texture::FullTextureInfo& config) {
- SurfaceParams params;
- params.is_tiled = config.tic.IsTiled();
- params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
- params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
- params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
- params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
- params.pixel_format =
- PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(), false);
- params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value());
- params.type = GetFormatType(params.pixel_format);
- params.target = SurfaceTargetFromTextureType(config.tic.texture_type);
- params.width = Common::AlignUp(config.tic.Width(), GetCompressionFactor(params.pixel_format));
- params.height = Common::AlignUp(config.tic.Height(), GetCompressionFactor(params.pixel_format));
- params.depth = config.tic.Depth();
- if (params.target == SurfaceTarget::TextureCubemap ||
- params.target == SurfaceTarget::TextureCubeArray) {
- params.depth *= 6;
- }
- params.pitch = params.is_tiled ? 0 : config.tic.Pitch();
- params.unaligned_height = config.tic.Height();
- params.num_levels = config.tic.max_mip_level + 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-SurfaceParams SurfaceParams::CreateForDepthBuffer(
- Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
- SurfaceParams params;
- params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << std::min(block_width, 5U);
- params.block_height = 1 << std::min(block_height, 5U);
- params.block_depth = 1 << std::min(block_depth, 5U);
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromDepthFormat(format);
- params.component_type = ComponentTypeFromDepthFormat(format);
- params.type = GetFormatType(params.pixel_format);
- params.width = zeta_width;
- params.height = zeta_height;
- params.unaligned_height = zeta_height;
- params.target = SurfaceTarget::Texture2D;
- params.depth = 1;
- params.num_levels = 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::size_t index) {
- const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
- SurfaceParams params;
- params.is_tiled =
- config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << config.memory_layout.block_width;
- params.block_height = 1 << config.memory_layout.block_height;
- params.block_depth = 1 << config.memory_layout.block_depth;
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- if (params.is_tiled) {
- params.width = config.width;
- } else {
- const u32 bpp = GetFormatBpp(params.pixel_format) / CHAR_BIT;
- params.pitch = config.width;
- params.width = params.pitch / bpp;
- }
- params.height = config.height;
- params.depth = 1;
- params.unaligned_height = config.height;
- params.target = SurfaceTarget::Texture2D;
- params.num_levels = 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-SurfaceParams SurfaceParams::CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config) {
- SurfaceParams params{};
- params.is_tiled = !config.linear;
- params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 32U) : 0,
- params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 32U) : 0,
- params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 32U) : 0,
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- params.width = config.width;
- params.height = config.height;
- params.unaligned_height = config.height;
- // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters
- params.target = SurfaceTarget::Texture2D;
- params.depth = 1;
- params.num_levels = 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-u32 SurfaceParams::GetMipWidth(u32 level) const {
- return std::max(1U, width >> level);
-}
-
-u32 SurfaceParams::GetMipHeight(u32 level) const {
- return std::max(1U, height >> level);
-}
-
-u32 SurfaceParams::GetMipDepth(u32 level) const {
- return IsLayered() ? depth : std::max(1U, depth >> level);
-}
-
-bool SurfaceParams::IsLayered() const {
- switch (target) {
- case SurfaceTarget::Texture1DArray:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- case SurfaceTarget::TextureCubemap:
- return true;
- default:
- return false;
- }
-}
-
-u32 SurfaceParams::GetMipBlockHeight(u32 level) const {
- // Auto block resizing algorithm from:
- // https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
- if (level == 0) {
- return block_height;
- }
- const u32 height{GetMipHeight(level)};
- const u32 default_block_height{GetDefaultBlockHeight(pixel_format)};
- const u32 blocks_in_y{(height + default_block_height - 1) / default_block_height};
- u32 block_height = 16;
- while (block_height > 1 && blocks_in_y <= block_height * 4) {
- block_height >>= 1;
- }
- return block_height;
-}
-
-u32 SurfaceParams::GetMipBlockDepth(u32 level) const {
- if (level == 0)
- return block_depth;
- if (target != SurfaceTarget::Texture3D)
- return 1;
-
- const u32 depth{GetMipDepth(level)};
- u32 block_depth = 32;
- while (block_depth > 1 && depth * 2 <= block_depth) {
- block_depth >>= 1;
- }
- if (block_depth == 32 && GetMipBlockHeight(level) >= 4) {
- return 16;
- }
- return block_depth;
-}
-
-std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < level; i++) {
- offset += GetInnerMipmapMemorySize(i, false, IsLayered(), false);
- }
- return offset;
-}
-
-std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < level; i++) {
- offset += GetInnerMipmapMemorySize(i, true, false, false);
- }
- return offset;
-}
-
-std::size_t SurfaceParams::GetGuestLayerSize() const {
- return GetInnerMemorySize(false, true, false);
-}
-
-std::size_t SurfaceParams::GetHostLayerSize(u32 level) const {
- return GetInnerMipmapMemorySize(level, true, IsLayered(), false);
-}
-
-bool SurfaceParams::IsFamiliar(const SurfaceParams& view_params) const {
- if (std::tie(is_tiled, tile_width_spacing, pixel_format, component_type, type) !=
- std::tie(view_params.is_tiled, view_params.tile_width_spacing, view_params.pixel_format,
- view_params.component_type, view_params.type)) {
- return false;
- }
-
- const SurfaceTarget view_target{view_params.target};
- if (view_target == target) {
- return true;
- }
-
- switch (target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::Texture3D:
- return false;
- case SurfaceTarget::Texture1DArray:
- return view_target == SurfaceTarget::Texture1D;
- case SurfaceTarget::Texture2DArray:
- return view_target == SurfaceTarget::Texture2D;
- case SurfaceTarget::TextureCubemap:
- return view_target == SurfaceTarget::Texture2D ||
- view_target == SurfaceTarget::Texture2DArray;
- case SurfaceTarget::TextureCubeArray:
- return view_target == SurfaceTarget::Texture2D ||
- view_target == SurfaceTarget::Texture2DArray ||
- view_target == SurfaceTarget::TextureCubemap;
- default:
- UNIMPLEMENTED_MSG("Unimplemented texture family={}", static_cast<u32>(target));
- return false;
- }
-}
-
-bool SurfaceParams::IsPixelFormatZeta() const {
- return pixel_format >= VideoCore::Surface::PixelFormat::MaxColorFormat &&
- pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
-}
-
-void SurfaceParams::CalculateCachedValues() {
- guest_size_in_bytes = GetInnerMemorySize(false, false, false);
-
- // ASTC is uncompressed in software, in emulated as RGBA8
- if (IsPixelFormatASTC(pixel_format)) {
- host_size_in_bytes = width * height * depth * 4;
- } else {
- host_size_in_bytes = GetInnerMemorySize(true, false, false);
- }
-
- switch (target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::Texture3D:
- num_layers = 1;
- break;
- case SurfaceTarget::Texture1DArray:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubemap:
- case SurfaceTarget::TextureCubeArray:
- num_layers = depth;
- break;
- default:
- UNREACHABLE();
- }
-}
-
-std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool layer_only,
- bool uncompressed) const {
- const bool tiled{as_host_size ? false : is_tiled};
- const u32 tile_x{GetDefaultBlockWidth(pixel_format)};
- const u32 tile_y{GetDefaultBlockHeight(pixel_format)};
- const u32 width{GetMipmapSize(uncompressed, GetMipWidth(level), tile_x)};
- const u32 height{GetMipmapSize(uncompressed, GetMipHeight(level), tile_y)};
- const u32 depth{layer_only ? 1U : GetMipDepth(level)};
- return Tegra::Texture::CalculateSize(tiled, GetBytesPerPixel(pixel_format), width, height,
- depth, GetMipBlockHeight(level), GetMipBlockDepth(level));
-}
-
-std::size_t SurfaceParams::GetInnerMemorySize(bool as_host_size, bool layer_only,
- bool uncompressed) const {
- std::size_t size = 0;
- for (u32 level = 0; level < num_levels; ++level) {
- size += GetInnerMipmapMemorySize(level, as_host_size, layer_only, uncompressed);
- }
- if (!as_host_size && is_tiled) {
- size = Common::AlignUp(size, Tegra::Texture::GetGOBSize() * block_height * block_depth);
- }
- return size;
-}
-
-std::map<u64, std::pair<u32, u32>> SurfaceParams::CreateViewOffsetMap() const {
- std::map<u64, std::pair<u32, u32>> view_offset_map;
- switch (target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::Texture3D: {
- constexpr u32 layer = 0;
- for (u32 level = 0; level < num_levels; ++level) {
- const std::size_t offset{GetGuestMipmapLevelOffset(level)};
- view_offset_map.insert({offset, {layer, level}});
- }
- break;
- }
- case SurfaceTarget::Texture1DArray:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubemap:
- case SurfaceTarget::TextureCubeArray: {
- const std::size_t layer_size{GetGuestLayerSize()};
- for (u32 level = 0; level < num_levels; ++level) {
- const std::size_t level_offset{GetGuestMipmapLevelOffset(level)};
- for (u32 layer = 0; layer < num_layers; ++layer) {
- const auto layer_offset{static_cast<std::size_t>(layer_size * layer)};
- const std::size_t offset{level_offset + layer_offset};
- view_offset_map.insert({offset, {layer, level}});
- }
- }
- break;
- }
- default:
- UNIMPLEMENTED_MSG("Unimplemented surface target {}", static_cast<u32>(target));
- }
- return view_offset_map;
-}
-
-bool SurfaceParams::IsViewValid(const SurfaceParams& view_params, u32 layer, u32 level) const {
- return IsDimensionValid(view_params, level) && IsDepthValid(view_params, level) &&
- IsInBounds(view_params, layer, level);
-}
-
-bool SurfaceParams::IsDimensionValid(const SurfaceParams& view_params, u32 level) const {
- return view_params.width == GetMipWidth(level) && view_params.height == GetMipHeight(level);
-}
-
-bool SurfaceParams::IsDepthValid(const SurfaceParams& view_params, u32 level) const {
- if (view_params.target != SurfaceTarget::Texture3D) {
- return true;
- }
- return view_params.depth == GetMipDepth(level);
-}
-
-bool SurfaceParams::IsInBounds(const SurfaceParams& view_params, u32 layer, u32 level) const {
- return layer + view_params.num_layers <= num_layers &&
- level + view_params.num_levels <= num_levels;
-}
-
-std::size_t HasheableSurfaceParams::Hash() const {
- return static_cast<std::size_t>(
- Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
-}
-
-bool HasheableSurfaceParams::operator==(const HasheableSurfaceParams& rhs) const {
- return std::tie(is_tiled, block_width, block_height, block_depth, tile_width_spacing, width,
- height, depth, pitch, unaligned_height, num_levels, pixel_format,
- component_type, type, target) ==
- std::tie(rhs.is_tiled, rhs.block_width, rhs.block_height, rhs.block_depth,
- rhs.tile_width_spacing, rhs.width, rhs.height, rhs.depth, rhs.pitch,
- rhs.unaligned_height, rhs.num_levels, rhs.pixel_format, rhs.component_type,
- rhs.type, rhs.target);
-}
-
-std::size_t ViewKey::Hash() const {
- return static_cast<std::size_t>(
- Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
-}
-
-bool ViewKey::operator==(const ViewKey& rhs) const {
- return std::tie(base_layer, num_layers, base_level, num_levels) ==
- std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels);
-}
-
-} // namespace VideoCommon
diff --git a/src/video_core/texture_cache.h b/src/video_core/texture_cache.h
deleted file mode 100644
index 041551691..000000000
--- a/src/video_core/texture_cache.h
+++ /dev/null
@@ -1,586 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <list>
-#include <memory>
-#include <set>
-#include <tuple>
-#include <type_traits>
-#include <unordered_map>
-
-#include <boost/icl/interval_map.hpp>
-#include <boost/range/iterator_range.hpp>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "core/memory.h"
-#include "video_core/engines/fermi_2d.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
-#include "video_core/rasterizer_interface.h"
-#include "video_core/surface.h"
-
-namespace Core {
-class System;
-}
-
-namespace Tegra::Texture {
-struct FullTextureInfo;
-}
-
-namespace VideoCore {
-class RasterizerInterface;
-}
-
-namespace VideoCommon {
-
-class HasheableSurfaceParams {
-public:
- std::size_t Hash() const;
-
- bool operator==(const HasheableSurfaceParams& rhs) const;
-
-protected:
- // Avoid creation outside of a managed environment.
- HasheableSurfaceParams() = default;
-
- bool is_tiled;
- u32 block_width;
- u32 block_height;
- u32 block_depth;
- u32 tile_width_spacing;
- u32 width;
- u32 height;
- u32 depth;
- u32 pitch;
- u32 unaligned_height;
- u32 num_levels;
- VideoCore::Surface::PixelFormat pixel_format;
- VideoCore::Surface::ComponentType component_type;
- VideoCore::Surface::SurfaceType type;
- VideoCore::Surface::SurfaceTarget target;
-};
-
-class SurfaceParams final : public HasheableSurfaceParams {
-public:
- /// Creates SurfaceCachedParams from a texture configuration.
- static SurfaceParams CreateForTexture(Core::System& system,
- const Tegra::Texture::FullTextureInfo& config);
-
- /// Creates SurfaceCachedParams for a depth buffer configuration.
- static SurfaceParams CreateForDepthBuffer(
- Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
-
- /// Creates SurfaceCachedParams from a framebuffer configuration.
- static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index);
-
- /// Creates SurfaceCachedParams from a Fermi2D surface configuration.
- static SurfaceParams CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config);
-
- bool IsTiled() const {
- return is_tiled;
- }
-
- u32 GetBlockWidth() const {
- return block_width;
- }
-
- u32 GetTileWidthSpacing() const {
- return tile_width_spacing;
- }
-
- u32 GetWidth() const {
- return width;
- }
-
- u32 GetHeight() const {
- return height;
- }
-
- u32 GetDepth() const {
- return depth;
- }
-
- u32 GetPitch() const {
- return pitch;
- }
-
- u32 GetNumLevels() const {
- return num_levels;
- }
-
- VideoCore::Surface::PixelFormat GetPixelFormat() const {
- return pixel_format;
- }
-
- VideoCore::Surface::ComponentType GetComponentType() const {
- return component_type;
- }
-
- VideoCore::Surface::SurfaceTarget GetTarget() const {
- return target;
- }
-
- VideoCore::Surface::SurfaceType GetType() const {
- return type;
- }
-
- std::size_t GetGuestSizeInBytes() const {
- return guest_size_in_bytes;
- }
-
- std::size_t GetHostSizeInBytes() const {
- return host_size_in_bytes;
- }
-
- u32 GetNumLayers() const {
- return num_layers;
- }
-
- /// Returns the width of a given mipmap level.
- u32 GetMipWidth(u32 level) const;
-
- /// Returns the height of a given mipmap level.
- u32 GetMipHeight(u32 level) const;
-
- /// Returns the depth of a given mipmap level.
- u32 GetMipDepth(u32 level) const;
-
- /// Returns true if these parameters are from a layered surface.
- bool IsLayered() const;
-
- /// Returns the block height of a given mipmap level.
- u32 GetMipBlockHeight(u32 level) const;
-
- /// Returns the block depth of a given mipmap level.
- u32 GetMipBlockDepth(u32 level) const;
-
- /// Returns the offset in bytes in guest memory of a given mipmap level.
- std::size_t GetGuestMipmapLevelOffset(u32 level) const;
-
- /// Returns the offset in bytes in host memory (linear) of a given mipmap level.
- std::size_t GetHostMipmapLevelOffset(u32 level) const;
-
- /// Returns the size of a layer in bytes in guest memory.
- std::size_t GetGuestLayerSize() const;
-
- /// Returns the size of a layer in bytes in host memory for a given mipmap level.
- std::size_t GetHostLayerSize(u32 level) const;
-
- /// Returns true if another surface can be familiar with this. This is a loosely defined term
- /// that reflects the possibility of these two surface parameters potentially being part of a
- /// bigger superset.
- bool IsFamiliar(const SurfaceParams& view_params) const;
-
- /// Returns true if the pixel format is a depth and/or stencil format.
- bool IsPixelFormatZeta() const;
-
- /// Creates a map that redirects an address difference to a layer and mipmap level.
- std::map<u64, std::pair<u32, u32>> CreateViewOffsetMap() const;
-
- /// Returns true if the passed surface view parameters is equal or a valid subset of this.
- bool IsViewValid(const SurfaceParams& view_params, u32 layer, u32 level) const;
-
-private:
- /// Calculates values that can be deduced from HasheableSurfaceParams.
- void CalculateCachedValues();
-
- /// Returns the size of a given mipmap level.
- std::size_t GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool layer_only,
- bool uncompressed) const;
-
- /// Returns the size of all mipmap levels and aligns as needed.
- std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const;
-
- /// Returns true if the passed view width and height match the size of this params in a given
- /// mipmap level.
- bool IsDimensionValid(const SurfaceParams& view_params, u32 level) const;
-
- /// Returns true if the passed view depth match the size of this params in a given mipmap level.
- bool IsDepthValid(const SurfaceParams& view_params, u32 level) const;
-
- /// Returns true if the passed view layers and mipmap levels are in bounds.
- bool IsInBounds(const SurfaceParams& view_params, u32 layer, u32 level) const;
-
- std::size_t guest_size_in_bytes;
- std::size_t host_size_in_bytes;
- u32 num_layers;
-};
-
-struct ViewKey {
- std::size_t Hash() const;
-
- bool operator==(const ViewKey& rhs) const;
-
- u32 base_layer{};
- u32 num_layers{};
- u32 base_level{};
- u32 num_levels{};
-};
-
-} // namespace VideoCommon
-
-namespace std {
-
-template <>
-struct hash<VideoCommon::SurfaceParams> {
- std::size_t operator()(const VideoCommon::SurfaceParams& k) const noexcept {
- return k.Hash();
- }
-};
-
-template <>
-struct hash<VideoCommon::ViewKey> {
- std::size_t operator()(const VideoCommon::ViewKey& k) const noexcept {
- return k.Hash();
- }
-};
-
-} // namespace std
-
-namespace VideoCommon {
-
-template <typename TView, typename TExecutionContext>
-class SurfaceBase {
- static_assert(std::is_trivially_copyable_v<TExecutionContext>);
-
-public:
- virtual void LoadBuffer() = 0;
-
- virtual TExecutionContext FlushBuffer(TExecutionContext exctx) = 0;
-
- virtual TExecutionContext UploadTexture(TExecutionContext exctx) = 0;
-
- TView* TryGetView(VAddr view_addr, const SurfaceParams& view_params) {
- if (view_addr < cpu_addr || !params.IsFamiliar(view_params)) {
- // It can't be a view if it's in a prior address.
- return {};
- }
-
- const auto relative_offset{static_cast<u64>(view_addr - cpu_addr)};
- const auto it{view_offset_map.find(relative_offset)};
- if (it == view_offset_map.end()) {
- // Couldn't find an aligned view.
- return {};
- }
- const auto [layer, level] = it->second;
-
- if (!params.IsViewValid(view_params, layer, level)) {
- return {};
- }
-
- return GetView(layer, view_params.GetNumLayers(), level, view_params.GetNumLevels());
- }
-
- VAddr GetCpuAddr() const {
- ASSERT(is_registered);
- return cpu_addr;
- }
-
- u8* GetHostPtr() const {
- ASSERT(is_registered);
- return host_ptr;
- }
-
- CacheAddr GetCacheAddr() const {
- ASSERT(is_registered);
- return cache_addr;
- }
-
- std::size_t GetSizeInBytes() const {
- return params.GetGuestSizeInBytes();
- }
-
- void MarkAsModified(bool is_modified_) {
- is_modified = is_modified_;
- }
-
- const SurfaceParams& GetSurfaceParams() const {
- return params;
- }
-
- TView* GetView(VAddr view_addr, const SurfaceParams& view_params) {
- TView* view{TryGetView(view_addr, view_params)};
- ASSERT(view != nullptr);
- return view;
- }
-
- void Register(VAddr cpu_addr_, u8* host_ptr_) {
- ASSERT(!is_registered);
- is_registered = true;
- cpu_addr = cpu_addr_;
- host_ptr = host_ptr_;
- cache_addr = ToCacheAddr(host_ptr_);
- }
-
- void Register(VAddr cpu_addr_) {
- Register(cpu_addr_, Memory::GetPointer(cpu_addr_));
- }
-
- void Unregister() {
- ASSERT(is_registered);
- is_registered = false;
- }
-
- bool IsRegistered() const {
- return is_registered;
- }
-
-protected:
- explicit SurfaceBase(const SurfaceParams& params)
- : params{params}, view_offset_map{params.CreateViewOffsetMap()} {}
-
- ~SurfaceBase() = default;
-
- virtual std::unique_ptr<TView> CreateView(const ViewKey& view_key) = 0;
-
- bool IsModified() const {
- return is_modified;
- }
-
- const SurfaceParams params;
-
-private:
- TView* GetView(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels) {
- const ViewKey key{base_layer, num_layers, base_level, num_levels};
- const auto [entry, is_cache_miss] = views.try_emplace(key);
- auto& view{entry->second};
- if (is_cache_miss) {
- view = CreateView(key);
- }
- return view.get();
- }
-
- const std::map<u64, std::pair<u32, u32>> view_offset_map;
-
- VAddr cpu_addr{};
- u8* host_ptr{};
- CacheAddr cache_addr{};
- bool is_modified{};
- bool is_registered{};
- std::unordered_map<ViewKey, std::unique_ptr<TView>> views;
-};
-
-template <typename TSurface, typename TView, typename TExecutionContext>
-class TextureCache {
- static_assert(std::is_trivially_copyable_v<TExecutionContext>);
- using ResultType = std::tuple<TView*, TExecutionContext>;
- using IntervalMap = boost::icl::interval_map<CacheAddr, std::set<TSurface*>>;
- using IntervalType = typename IntervalMap::interval_type;
-
-public:
- void InvalidateRegion(CacheAddr addr, std::size_t size) {
- for (TSurface* surface : GetSurfacesInRegion(addr, size)) {
- if (!surface->IsRegistered()) {
- // Skip duplicates
- continue;
- }
- Unregister(surface);
- }
- }
-
- ResultType GetTextureSurface(TExecutionContext exctx,
- const Tegra::Texture::FullTextureInfo& config) {
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(config.tic.Address())};
- if (!cpu_addr) {
- return {{}, exctx};
- }
- const auto params{SurfaceParams::CreateForTexture(system, config)};
- return GetSurfaceView(exctx, *cpu_addr, params, true);
- }
-
- ResultType GetDepthBufferSurface(TExecutionContext exctx, bool preserve_contents) {
- const auto& regs{system.GPU().Maxwell3D().regs};
- if (!regs.zeta.Address() || !regs.zeta_enable) {
- return {{}, exctx};
- }
-
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(regs.zeta.Address())};
- if (!cpu_addr) {
- return {{}, exctx};
- }
-
- const auto depth_params{SurfaceParams::CreateForDepthBuffer(
- system, regs.zeta_width, regs.zeta_height, regs.zeta.format,
- regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
- regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
- return GetSurfaceView(exctx, *cpu_addr, depth_params, preserve_contents);
- }
-
- ResultType GetColorBufferSurface(TExecutionContext exctx, std::size_t index,
- bool preserve_contents) {
- ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
-
- const auto& regs{system.GPU().Maxwell3D().regs};
- if (index >= regs.rt_control.count || regs.rt[index].Address() == 0 ||
- regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
- return {{}, exctx};
- }
-
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(
- config.Address() + config.base_layer * config.layer_stride * sizeof(u32))};
- if (!cpu_addr) {
- return {{}, exctx};
- }
-
- return GetSurfaceView(exctx, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
- preserve_contents);
- }
-
- ResultType GetFermiSurface(TExecutionContext exctx,
- const Tegra::Engines::Fermi2D::Regs::Surface& config) {
- const auto cpu_addr{system.GPU().MemoryManager().GpuToCpuAddress(config.Address())};
- ASSERT(cpu_addr);
- return GetSurfaceView(exctx, *cpu_addr, SurfaceParams::CreateForFermiCopySurface(config),
- true);
- }
-
- TSurface* TryFindFramebufferSurface(const u8* host_ptr) const {
- const auto it{registered_surfaces.find(ToCacheAddr(host_ptr))};
- return it != registered_surfaces.end() ? *it->second.begin() : nullptr;
- }
-
-protected:
- TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
- : system{system}, rasterizer{rasterizer} {}
-
- ~TextureCache() = default;
-
- virtual ResultType TryFastGetSurfaceView(TExecutionContext exctx, VAddr cpu_addr, u8* host_ptr,
- const SurfaceParams& params, bool preserve_contents,
- const std::vector<TSurface*>& overlaps) = 0;
-
- virtual std::unique_ptr<TSurface> CreateSurface(const SurfaceParams& params) = 0;
-
- void Register(TSurface* surface, VAddr cpu_addr, u8* host_ptr) {
- surface->Register(cpu_addr, host_ptr);
- registered_surfaces.add({GetSurfaceInterval(surface), {surface}});
- rasterizer.UpdatePagesCachedCount(surface->GetCpuAddr(), surface->GetSizeInBytes(), 1);
- }
-
- void Unregister(TSurface* surface) {
- registered_surfaces.subtract({GetSurfaceInterval(surface), {surface}});
- rasterizer.UpdatePagesCachedCount(surface->GetCpuAddr(), surface->GetSizeInBytes(), -1);
- surface->Unregister();
- }
-
- TSurface* GetUncachedSurface(const SurfaceParams& params) {
- if (TSurface* surface = TryGetReservedSurface(params); surface)
- return surface;
- // No reserved surface available, create a new one and reserve it
- auto new_surface{CreateSurface(params)};
- TSurface* surface{new_surface.get()};
- ReserveSurface(params, std::move(new_surface));
- return surface;
- }
-
- Core::System& system;
-
-private:
- ResultType GetSurfaceView(TExecutionContext exctx, VAddr cpu_addr, const SurfaceParams& params,
- bool preserve_contents) {
- const auto host_ptr{Memory::GetPointer(cpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
- const auto overlaps{GetSurfacesInRegion(cache_addr, params.GetGuestSizeInBytes())};
- if (overlaps.empty()) {
- return LoadSurfaceView(exctx, cpu_addr, host_ptr, params, preserve_contents);
- }
-
- if (overlaps.size() == 1) {
- if (TView* view = overlaps[0]->TryGetView(cpu_addr, params); view)
- return {view, exctx};
- }
-
- TView* fast_view;
- std::tie(fast_view, exctx) =
- TryFastGetSurfaceView(exctx, cpu_addr, host_ptr, params, preserve_contents, overlaps);
-
- for (TSurface* surface : overlaps) {
- if (!fast_view) {
- // Flush even when we don't care about the contents, to preserve memory not written
- // by the new surface.
- exctx = surface->FlushBuffer(exctx);
- }
- Unregister(surface);
- }
-
- if (fast_view) {
- return {fast_view, exctx};
- }
-
- return LoadSurfaceView(exctx, cpu_addr, host_ptr, params, preserve_contents);
- }
-
- ResultType LoadSurfaceView(TExecutionContext exctx, VAddr cpu_addr, u8* host_ptr,
- const SurfaceParams& params, bool preserve_contents) {
- TSurface* new_surface{GetUncachedSurface(params)};
- Register(new_surface, cpu_addr, host_ptr);
- if (preserve_contents) {
- exctx = LoadSurface(exctx, new_surface);
- }
- return {new_surface->GetView(cpu_addr, params), exctx};
- }
-
- TExecutionContext LoadSurface(TExecutionContext exctx, TSurface* surface) {
- surface->LoadBuffer();
- exctx = surface->UploadTexture(exctx);
- surface->MarkAsModified(false);
- return exctx;
- }
-
- std::vector<TSurface*> GetSurfacesInRegion(CacheAddr cache_addr, std::size_t size) const {
- if (size == 0) {
- return {};
- }
- const IntervalType interval{cache_addr, cache_addr + size};
-
- std::vector<TSurface*> surfaces;
- for (auto& pair : boost::make_iterator_range(registered_surfaces.equal_range(interval))) {
- surfaces.push_back(*pair.second.begin());
- }
- return surfaces;
- }
-
- void ReserveSurface(const SurfaceParams& params, std::unique_ptr<TSurface> surface) {
- surface_reserve[params].push_back(std::move(surface));
- }
-
- TSurface* TryGetReservedSurface(const SurfaceParams& params) {
- auto search{surface_reserve.find(params)};
- if (search == surface_reserve.end()) {
- return {};
- }
- for (auto& surface : search->second) {
- if (!surface->IsRegistered()) {
- return surface.get();
- }
- }
- return {};
- }
-
- IntervalType GetSurfaceInterval(TSurface* surface) const {
- return IntervalType::right_open(surface->GetCacheAddr(),
- surface->GetCacheAddr() + surface->GetSizeInBytes());
- }
-
- VideoCore::RasterizerInterface& rasterizer;
-
- IntervalMap registered_surfaces;
-
- /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
- /// previously been used. This is to prevent surfaces from being constantly created and
- /// destroyed when used with different surface parameters.
- std::unordered_map<SurfaceParams, std::list<std::unique_ptr<TSurface>>> surface_reserve;
-};
-
-} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/copy_params.h b/src/video_core/texture_cache/copy_params.h
new file mode 100644
index 000000000..9c21a0649
--- /dev/null
+++ b/src/video_core/texture_cache/copy_params.h
@@ -0,0 +1,36 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace VideoCommon {
+
+struct CopyParams {
+ constexpr CopyParams(u32 source_x, u32 source_y, u32 source_z, u32 dest_x, u32 dest_y,
+ u32 dest_z, u32 source_level, u32 dest_level, u32 width, u32 height,
+ u32 depth)
+ : source_x{source_x}, source_y{source_y}, source_z{source_z}, dest_x{dest_x},
+ dest_y{dest_y}, dest_z{dest_z}, source_level{source_level},
+ dest_level{dest_level}, width{width}, height{height}, depth{depth} {}
+
+ constexpr CopyParams(u32 width, u32 height, u32 depth, u32 level)
+ : source_x{}, source_y{}, source_z{}, dest_x{}, dest_y{}, dest_z{}, source_level{level},
+ dest_level{level}, width{width}, height{height}, depth{depth} {}
+
+ u32 source_x;
+ u32 source_y;
+ u32 source_z;
+ u32 dest_x;
+ u32 dest_y;
+ u32 dest_z;
+ u32 source_level;
+ u32 dest_level;
+ u32 width;
+ u32 height;
+ u32 depth;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
new file mode 100644
index 000000000..683c49207
--- /dev/null
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -0,0 +1,302 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/microprofile.h"
+#include "video_core/memory_manager.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/textures/convert.h"
+
+namespace VideoCommon {
+
+MICROPROFILE_DEFINE(GPU_Load_Texture, "GPU", "Texture Load", MP_RGB(128, 192, 128));
+MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192, 128));
+
+using Tegra::Texture::ConvertFromGuestToHost;
+using VideoCore::MortonSwizzleMode;
+using VideoCore::Surface::SurfaceCompression;
+
+StagingCache::StagingCache() = default;
+
+StagingCache::~StagingCache() = default;
+
+SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
+ : params{params}, host_memory_size{params.GetHostSizeInBytes()}, gpu_addr{gpu_addr},
+ mipmap_sizes(params.num_levels), mipmap_offsets(params.num_levels) {
+ std::size_t offset = 0;
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
+ mipmap_sizes[level] = mipmap_size;
+ mipmap_offsets[level] = offset;
+ offset += mipmap_size;
+ }
+ layer_size = offset;
+ if (params.is_layered) {
+ if (params.is_tiled) {
+ layer_size =
+ SurfaceParams::AlignLayered(layer_size, params.block_height, params.block_depth);
+ }
+ guest_memory_size = layer_size * params.depth;
+ } else {
+ guest_memory_size = layer_size;
+ }
+}
+
+MatchTopologyResult SurfaceBaseImpl::MatchesTopology(const SurfaceParams& rhs) const {
+ const u32 src_bpp{params.GetBytesPerPixel()};
+ const u32 dst_bpp{rhs.GetBytesPerPixel()};
+ const bool ib1 = params.IsBuffer();
+ const bool ib2 = rhs.IsBuffer();
+ if (std::tie(src_bpp, params.is_tiled, ib1) == std::tie(dst_bpp, rhs.is_tiled, ib2)) {
+ const bool cb1 = params.IsCompressed();
+ const bool cb2 = rhs.IsCompressed();
+ if (cb1 == cb2) {
+ return MatchTopologyResult::FullMatch;
+ }
+ return MatchTopologyResult::CompressUnmatch;
+ }
+ return MatchTopologyResult::None;
+}
+
+MatchStructureResult SurfaceBaseImpl::MatchesStructure(const SurfaceParams& rhs) const {
+ // Buffer surface Check
+ if (params.IsBuffer()) {
+ const std::size_t wd1 = params.width * params.GetBytesPerPixel();
+ const std::size_t wd2 = rhs.width * rhs.GetBytesPerPixel();
+ if (wd1 == wd2) {
+ return MatchStructureResult::FullMatch;
+ }
+ return MatchStructureResult::None;
+ }
+
+ // Linear Surface check
+ if (!params.is_tiled) {
+ if (std::tie(params.height, params.pitch) == std::tie(rhs.height, rhs.pitch)) {
+ if (params.width == rhs.width) {
+ return MatchStructureResult::FullMatch;
+ } else {
+ return MatchStructureResult::SemiMatch;
+ }
+ }
+ return MatchStructureResult::None;
+ }
+
+ // Tiled Surface check
+ if (std::tie(params.depth, params.block_width, params.block_height, params.block_depth,
+ params.tile_width_spacing, params.num_levels) ==
+ std::tie(rhs.depth, rhs.block_width, rhs.block_height, rhs.block_depth,
+ rhs.tile_width_spacing, rhs.num_levels)) {
+ if (std::tie(params.width, params.height) == std::tie(rhs.width, rhs.height)) {
+ return MatchStructureResult::FullMatch;
+ }
+ const u32 ws = SurfaceParams::ConvertWidth(rhs.GetBlockAlignedWidth(), params.pixel_format,
+ rhs.pixel_format);
+ const u32 hs =
+ SurfaceParams::ConvertHeight(rhs.height, params.pixel_format, rhs.pixel_format);
+ const u32 w1 = params.GetBlockAlignedWidth();
+ if (std::tie(w1, params.height) == std::tie(ws, hs)) {
+ return MatchStructureResult::SemiMatch;
+ }
+ }
+ return MatchStructureResult::None;
+}
+
+std::optional<std::pair<u32, u32>> SurfaceBaseImpl::GetLayerMipmap(
+ const GPUVAddr candidate_gpu_addr) const {
+ if (gpu_addr == candidate_gpu_addr) {
+ return {{0, 0}};
+ }
+ if (candidate_gpu_addr < gpu_addr) {
+ return {};
+ }
+ const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)};
+ const auto layer{static_cast<u32>(relative_address / layer_size)};
+ const GPUVAddr mipmap_address = relative_address - layer_size * layer;
+ const auto mipmap_it =
+ Common::BinaryFind(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
+ if (mipmap_it == mipmap_offsets.end()) {
+ return {};
+ }
+ const auto level{static_cast<u32>(std::distance(mipmap_offsets.begin(), mipmap_it))};
+ return std::make_pair(layer, level);
+}
+
+std::vector<CopyParams> SurfaceBaseImpl::BreakDownLayered(const SurfaceParams& in_params) const {
+ const u32 layers{params.depth};
+ const u32 mipmaps{params.num_levels};
+ std::vector<CopyParams> result;
+ result.reserve(static_cast<std::size_t>(layers) * static_cast<std::size_t>(mipmaps));
+
+ for (u32 layer = 0; layer < layers; layer++) {
+ for (u32 level = 0; level < mipmaps; level++) {
+ const u32 width = SurfaceParams::IntersectWidth(params, in_params, level, level);
+ const u32 height = SurfaceParams::IntersectHeight(params, in_params, level, level);
+ result.emplace_back(width, height, layer, level);
+ }
+ }
+ return result;
+}
+
+std::vector<CopyParams> SurfaceBaseImpl::BreakDownNonLayered(const SurfaceParams& in_params) const {
+ const u32 mipmaps{params.num_levels};
+ std::vector<CopyParams> result;
+ result.reserve(mipmaps);
+
+ for (u32 level = 0; level < mipmaps; level++) {
+ const u32 width = SurfaceParams::IntersectWidth(params, in_params, level, level);
+ const u32 height = SurfaceParams::IntersectHeight(params, in_params, level, level);
+ const u32 depth{std::min(params.GetMipDepth(level), in_params.GetMipDepth(level))};
+ result.emplace_back(width, height, depth, level);
+ }
+ return result;
+}
+
+void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params,
+ u8* buffer, u32 level) {
+ const u32 width{params.GetMipWidth(level)};
+ const u32 height{params.GetMipHeight(level)};
+ const u32 block_height{params.GetMipBlockHeight(level)};
+ const u32 block_depth{params.GetMipBlockDepth(level)};
+
+ std::size_t guest_offset{mipmap_offsets[level]};
+ if (params.is_layered) {
+ std::size_t host_offset{0};
+ const std::size_t guest_stride = layer_size;
+ const std::size_t host_stride = params.GetHostLayerSize(level);
+ for (u32 layer = 0; layer < params.depth; ++layer) {
+ MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth, 1,
+ params.tile_width_spacing, buffer + host_offset, memory + guest_offset);
+ guest_offset += guest_stride;
+ host_offset += host_stride;
+ }
+ } else {
+ MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth,
+ params.GetMipDepth(level), params.tile_width_spacing, buffer,
+ memory + guest_offset);
+ }
+}
+
+void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
+ StagingCache& staging_cache) {
+ MICROPROFILE_SCOPE(GPU_Load_Texture);
+ auto& staging_buffer = staging_cache.GetBuffer(0);
+ u8* host_ptr;
+ is_continuous = memory_manager.IsBlockContinuous(gpu_addr, guest_memory_size);
+
+ // Handle continuouty
+ if (is_continuous) {
+ // Use physical memory directly
+ host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return;
+ }
+ } else {
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
+ }
+
+ if (params.is_tiled) {
+ ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
+ params.block_width, static_cast<u32>(params.target));
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
+ staging_buffer.data() + host_offset, level);
+ }
+ } else {
+ ASSERT_MSG(params.num_levels == 1, "Linear mipmap loading is not implemented");
+ const u32 bpp{params.GetBytesPerPixel()};
+ const u32 block_width{params.GetDefaultBlockWidth()};
+ const u32 block_height{params.GetDefaultBlockHeight()};
+ const u32 width{(params.width + block_width - 1) / block_width};
+ const u32 height{(params.height + block_height - 1) / block_height};
+ const u32 copy_size{width * bpp};
+ if (params.pitch == copy_size) {
+ std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes());
+ } else {
+ const u8* start{host_ptr};
+ u8* write_to{staging_buffer.data()};
+ for (u32 h = height; h > 0; --h) {
+ std::memcpy(write_to, start, copy_size);
+ start += params.pitch;
+ write_to += copy_size;
+ }
+ }
+ }
+
+ auto compression_type = params.GetCompressionType();
+ if (compression_type == SurfaceCompression::None ||
+ compression_type == SurfaceCompression::Compressed)
+ return;
+
+ for (u32 level_up = params.num_levels; level_up > 0; --level_up) {
+ const u32 level = level_up - 1;
+ const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level)};
+ const std::size_t out_host_offset = compression_type == SurfaceCompression::Rearranged
+ ? in_host_offset
+ : params.GetConvertedMipmapOffset(level);
+ u8* in_buffer = staging_buffer.data() + in_host_offset;
+ u8* out_buffer = staging_buffer.data() + out_host_offset;
+ ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format,
+ params.GetMipWidth(level), params.GetMipHeight(level),
+ params.GetMipDepth(level), true, true);
+ }
+}
+
+void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
+ StagingCache& staging_cache) {
+ MICROPROFILE_SCOPE(GPU_Flush_Texture);
+ auto& staging_buffer = staging_cache.GetBuffer(0);
+ u8* host_ptr;
+
+ // Handle continuouty
+ if (is_continuous) {
+ // Use physical memory directly
+ host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return;
+ }
+ } else {
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ }
+
+ if (params.is_tiled) {
+ ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
+ staging_buffer.data() + host_offset, level);
+ }
+ } else {
+ ASSERT(params.target == SurfaceTarget::Texture2D);
+ ASSERT(params.num_levels == 1);
+
+ const u32 bpp{params.GetBytesPerPixel()};
+ const u32 copy_size{params.width * bpp};
+ if (params.pitch == copy_size) {
+ std::memcpy(host_ptr, staging_buffer.data(), guest_memory_size);
+ } else {
+ u8* start{host_ptr};
+ const u8* read_to{staging_buffer.data()};
+ for (u32 h = params.height; h > 0; --h) {
+ std::memcpy(start, read_to, copy_size);
+ start += params.pitch;
+ read_to += copy_size;
+ }
+ }
+ }
+ if (!is_continuous) {
+ memory_manager.WriteBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
+ }
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
new file mode 100644
index 000000000..5e497e49f
--- /dev/null
+++ b/src/video_core/texture_cache/surface_base.h
@@ -0,0 +1,325 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/binary_find.h"
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+#include "video_core/morton.h"
+#include "video_core/texture_cache/copy_params.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace Tegra {
+class MemoryManager;
+}
+
+namespace VideoCommon {
+
+using VideoCore::MortonSwizzleMode;
+using VideoCore::Surface::SurfaceTarget;
+
+enum class MatchStructureResult : u32 {
+ FullMatch = 0,
+ SemiMatch = 1,
+ None = 2,
+};
+
+enum class MatchTopologyResult : u32 {
+ FullMatch = 0,
+ CompressUnmatch = 1,
+ None = 2,
+};
+
+class StagingCache {
+public:
+ explicit StagingCache();
+ ~StagingCache();
+
+ std::vector<u8>& GetBuffer(std::size_t index) {
+ return staging_buffer[index];
+ }
+
+ const std::vector<u8>& GetBuffer(std::size_t index) const {
+ return staging_buffer[index];
+ }
+
+ void SetSize(std::size_t size) {
+ staging_buffer.resize(size);
+ }
+
+private:
+ std::vector<std::vector<u8>> staging_buffer;
+};
+
+class SurfaceBaseImpl {
+public:
+ void LoadBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
+
+ void FlushBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
+
+ GPUVAddr GetGpuAddr() const {
+ return gpu_addr;
+ }
+
+ bool Overlaps(const CacheAddr start, const CacheAddr end) const {
+ return (cache_addr < end) && (cache_addr_end > start);
+ }
+
+ bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) {
+ const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size;
+ return (gpu_addr <= other_start && other_end <= gpu_addr_end);
+ }
+
+ // Use only when recycling a surface
+ void SetGpuAddr(const GPUVAddr new_addr) {
+ gpu_addr = new_addr;
+ }
+
+ VAddr GetCpuAddr() const {
+ return cpu_addr;
+ }
+
+ void SetCpuAddr(const VAddr new_addr) {
+ cpu_addr = new_addr;
+ }
+
+ CacheAddr GetCacheAddr() const {
+ return cache_addr;
+ }
+
+ CacheAddr GetCacheAddrEnd() const {
+ return cache_addr_end;
+ }
+
+ void SetCacheAddr(const CacheAddr new_addr) {
+ cache_addr = new_addr;
+ cache_addr_end = new_addr + guest_memory_size;
+ }
+
+ const SurfaceParams& GetSurfaceParams() const {
+ return params;
+ }
+
+ std::size_t GetSizeInBytes() const {
+ return guest_memory_size;
+ }
+
+ std::size_t GetHostSizeInBytes() const {
+ return host_memory_size;
+ }
+
+ std::size_t GetMipmapSize(const u32 level) const {
+ return mipmap_sizes[level];
+ }
+
+ void MarkAsContinuous(const bool is_continuous) {
+ this->is_continuous = is_continuous;
+ }
+
+ bool IsContinuous() const {
+ return is_continuous;
+ }
+
+ bool IsLinear() const {
+ return !params.is_tiled;
+ }
+
+ bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const {
+ return params.pixel_format == pixel_format;
+ }
+
+ VideoCore::Surface::PixelFormat GetFormat() const {
+ return params.pixel_format;
+ }
+
+ bool MatchTarget(VideoCore::Surface::SurfaceTarget target) const {
+ return params.target == target;
+ }
+
+ MatchTopologyResult MatchesTopology(const SurfaceParams& rhs) const;
+
+ MatchStructureResult MatchesStructure(const SurfaceParams& rhs) const;
+
+ bool MatchesSubTexture(const SurfaceParams& rhs, const GPUVAddr other_gpu_addr) const {
+ return std::tie(gpu_addr, params.target, params.num_levels) ==
+ std::tie(other_gpu_addr, rhs.target, rhs.num_levels) &&
+ params.target == SurfaceTarget::Texture2D && params.num_levels == 1;
+ }
+
+ std::optional<std::pair<u32, u32>> GetLayerMipmap(const GPUVAddr candidate_gpu_addr) const;
+
+ std::vector<CopyParams> BreakDown(const SurfaceParams& in_params) const {
+ return params.is_layered ? BreakDownLayered(in_params) : BreakDownNonLayered(in_params);
+ }
+
+protected:
+ explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params);
+ ~SurfaceBaseImpl() = default;
+
+ virtual void DecorateSurfaceName() = 0;
+
+ const SurfaceParams params;
+ std::size_t layer_size;
+ std::size_t guest_memory_size;
+ const std::size_t host_memory_size;
+ GPUVAddr gpu_addr{};
+ CacheAddr cache_addr{};
+ CacheAddr cache_addr_end{};
+ VAddr cpu_addr{};
+ bool is_continuous{};
+
+ std::vector<std::size_t> mipmap_sizes;
+ std::vector<std::size_t> mipmap_offsets;
+
+private:
+ void SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params, u8* buffer,
+ u32 level);
+
+ std::vector<CopyParams> BreakDownLayered(const SurfaceParams& in_params) const;
+
+ std::vector<CopyParams> BreakDownNonLayered(const SurfaceParams& in_params) const;
+};
+
+template <typename TView>
+class SurfaceBase : public SurfaceBaseImpl {
+public:
+ virtual void UploadTexture(const std::vector<u8>& staging_buffer) = 0;
+
+ virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0;
+
+ void MarkAsModified(bool is_modified_, u64 tick) {
+ is_modified = is_modified_ || is_target;
+ modification_tick = tick;
+ }
+
+ void MarkAsRenderTarget(bool is_target_, u32 index_) {
+ is_target = is_target_;
+ index = index_;
+ }
+
+ void MarkAsPicked(bool is_picked_) {
+ is_picked = is_picked_;
+ }
+
+ bool IsModified() const {
+ return is_modified;
+ }
+
+ bool IsProtected() const {
+ // Only 3D Slices are to be protected
+ return is_target && params.block_depth > 0;
+ }
+
+ bool IsRenderTarget() const {
+ return is_target;
+ }
+
+ u32 GetRenderTarget() const {
+ return index;
+ }
+
+ bool IsRegistered() const {
+ return is_registered;
+ }
+
+ bool IsPicked() const {
+ return is_picked;
+ }
+
+ void MarkAsRegistered(bool is_reg) {
+ is_registered = is_reg;
+ }
+
+ u64 GetModificationTick() const {
+ return modification_tick;
+ }
+
+ TView EmplaceOverview(const SurfaceParams& overview_params) {
+ const u32 num_layers{(params.is_layered && !overview_params.is_layered) ? 1 : params.depth};
+ return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels));
+ }
+
+ std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params,
+ const GPUVAddr view_addr,
+ const std::size_t candidate_size, const u32 mipmap,
+ const u32 layer) {
+ const auto layer_mipmap{GetLayerMipmap(view_addr + candidate_size)};
+ if (!layer_mipmap) {
+ return {};
+ }
+ const u32 end_layer{layer_mipmap->first};
+ const u32 end_mipmap{layer_mipmap->second};
+ if (layer != end_layer) {
+ if (mipmap == 0 && end_mipmap == 0) {
+ return GetView(ViewParams(view_params.target, layer, end_layer - layer + 1, 0, 1));
+ }
+ return {};
+ } else {
+ return GetView(
+ ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap + 1));
+ }
+ }
+
+ std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr,
+ const std::size_t candidate_size) {
+ if (params.target == SurfaceTarget::Texture3D ||
+ (params.num_levels == 1 && !params.is_layered) ||
+ view_params.target == SurfaceTarget::Texture3D) {
+ return {};
+ }
+ const auto layer_mipmap{GetLayerMipmap(view_addr)};
+ if (!layer_mipmap) {
+ return {};
+ }
+ const u32 layer{layer_mipmap->first};
+ const u32 mipmap{layer_mipmap->second};
+ if (GetMipmapSize(mipmap) != candidate_size) {
+ return EmplaceIrregularView(view_params, view_addr, candidate_size, mipmap, layer);
+ }
+ return GetView(ViewParams(view_params.target, layer, 1, mipmap, 1));
+ }
+
+ TView GetMainView() const {
+ return main_view;
+ }
+
+protected:
+ explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params)
+ : SurfaceBaseImpl(gpu_addr, params) {}
+
+ ~SurfaceBase() = default;
+
+ virtual TView CreateView(const ViewParams& view_key) = 0;
+
+ TView main_view;
+ std::unordered_map<ViewParams, TView> views;
+
+private:
+ TView GetView(const ViewParams& key) {
+ const auto [entry, is_cache_miss] = views.try_emplace(key);
+ auto& view{entry->second};
+ if (is_cache_miss) {
+ view = CreateView(key);
+ }
+ return view;
+ }
+
+ static constexpr u32 NO_RT = 0xFFFFFFFF;
+
+ bool is_modified{};
+ bool is_target{};
+ bool is_registered{};
+ bool is_picked{};
+ u32 index{NO_RT};
+ u64 modification_tick{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
new file mode 100644
index 000000000..1e4d3fb79
--- /dev/null
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -0,0 +1,389 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <map>
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "core/core.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/surface_params.h"
+
+namespace VideoCommon {
+
+using VideoCore::Surface::ComponentTypeFromDepthFormat;
+using VideoCore::Surface::ComponentTypeFromRenderTarget;
+using VideoCore::Surface::ComponentTypeFromTexture;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::PixelFormatFromDepthFormat;
+using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
+using VideoCore::Surface::PixelFormatFromTextureFormat;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceTargetFromTextureType;
+using VideoCore::Surface::SurfaceType;
+
+namespace {
+
+SurfaceTarget TextureTypeToSurfaceTarget(Tegra::Shader::TextureType type, bool is_array) {
+ switch (type) {
+ case Tegra::Shader::TextureType::Texture1D:
+ return is_array ? SurfaceTarget::Texture1DArray : SurfaceTarget::Texture1D;
+ case Tegra::Shader::TextureType::Texture2D:
+ return is_array ? SurfaceTarget::Texture2DArray : SurfaceTarget::Texture2D;
+ case Tegra::Shader::TextureType::Texture3D:
+ ASSERT(!is_array);
+ return SurfaceTarget::Texture3D;
+ case Tegra::Shader::TextureType::TextureCube:
+ return is_array ? SurfaceTarget::TextureCubeArray : SurfaceTarget::TextureCubemap;
+ default:
+ UNREACHABLE();
+ return SurfaceTarget::Texture2D;
+ }
+}
+
+SurfaceTarget ImageTypeToSurfaceTarget(Tegra::Shader::ImageType type) {
+ switch (type) {
+ case Tegra::Shader::ImageType::Texture1D:
+ return SurfaceTarget::Texture1D;
+ case Tegra::Shader::ImageType::TextureBuffer:
+ return SurfaceTarget::TextureBuffer;
+ case Tegra::Shader::ImageType::Texture1DArray:
+ return SurfaceTarget::Texture1DArray;
+ case Tegra::Shader::ImageType::Texture2D:
+ return SurfaceTarget::Texture2D;
+ case Tegra::Shader::ImageType::Texture2DArray:
+ return SurfaceTarget::Texture2DArray;
+ case Tegra::Shader::ImageType::Texture3D:
+ return SurfaceTarget::Texture3D;
+ default:
+ UNREACHABLE();
+ return SurfaceTarget::Texture2D;
+ }
+}
+
+constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) {
+ return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile);
+}
+
+} // Anonymous namespace
+
+SurfaceParams SurfaceParams::CreateForTexture(const Tegra::Texture::TICEntry& tic,
+ const VideoCommon::Shader::Sampler& entry) {
+ SurfaceParams params;
+ params.is_tiled = tic.IsTiled();
+ params.srgb_conversion = tic.IsSrgbConversionEnabled();
+ params.block_width = params.is_tiled ? tic.BlockWidth() : 0,
+ params.block_height = params.is_tiled ? tic.BlockHeight() : 0,
+ params.block_depth = params.is_tiled ? tic.BlockDepth() : 0,
+ params.tile_width_spacing = params.is_tiled ? (1 << tic.tile_width_spacing.Value()) : 1;
+ params.pixel_format =
+ PixelFormatFromTextureFormat(tic.format, tic.r_type.Value(), params.srgb_conversion);
+ params.type = GetFormatType(params.pixel_format);
+ if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
+ switch (params.pixel_format) {
+ case PixelFormat::R16U:
+ case PixelFormat::R16F: {
+ params.pixel_format = PixelFormat::Z16;
+ break;
+ }
+ case PixelFormat::R32F: {
+ params.pixel_format = PixelFormat::Z32F;
+ break;
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Unimplemented shadow convert format: {}",
+ static_cast<u32>(params.pixel_format));
+ }
+ }
+ params.type = GetFormatType(params.pixel_format);
+ }
+ params.component_type = ComponentTypeFromTexture(tic.r_type.Value());
+ params.type = GetFormatType(params.pixel_format);
+ // TODO: on 1DBuffer we should use the tic info.
+ if (tic.IsBuffer()) {
+ params.target = SurfaceTarget::TextureBuffer;
+ params.width = tic.Width();
+ params.pitch = params.width * params.GetBytesPerPixel();
+ params.height = 1;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ } else {
+ params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
+ params.width = tic.Width();
+ params.height = tic.Height();
+ params.depth = tic.Depth();
+ params.pitch = params.is_tiled ? 0 : tic.Pitch();
+ if (params.target == SurfaceTarget::TextureCubemap ||
+ params.target == SurfaceTarget::TextureCubeArray) {
+ params.depth *= 6;
+ }
+ params.num_levels = tic.max_mip_level + 1;
+ params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap());
+ params.is_layered = params.IsLayered();
+ }
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForImage(const Tegra::Texture::TICEntry& tic,
+ const VideoCommon::Shader::Image& entry) {
+ SurfaceParams params;
+ params.is_tiled = tic.IsTiled();
+ params.srgb_conversion = tic.IsSrgbConversionEnabled();
+ params.block_width = params.is_tiled ? tic.BlockWidth() : 0,
+ params.block_height = params.is_tiled ? tic.BlockHeight() : 0,
+ params.block_depth = params.is_tiled ? tic.BlockDepth() : 0,
+ params.tile_width_spacing = params.is_tiled ? (1 << tic.tile_width_spacing.Value()) : 1;
+ params.pixel_format =
+ PixelFormatFromTextureFormat(tic.format, tic.r_type.Value(), params.srgb_conversion);
+ params.type = GetFormatType(params.pixel_format);
+ params.component_type = ComponentTypeFromTexture(tic.r_type.Value());
+ params.type = GetFormatType(params.pixel_format);
+ params.target = ImageTypeToSurfaceTarget(entry.GetType());
+ // TODO: on 1DBuffer we should use the tic info.
+ if (tic.IsBuffer()) {
+ params.target = SurfaceTarget::TextureBuffer;
+ params.width = tic.Width();
+ params.pitch = params.width * params.GetBytesPerPixel();
+ params.height = 1;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ } else {
+ params.width = tic.Width();
+ params.height = tic.Height();
+ params.depth = tic.Depth();
+ params.pitch = params.is_tiled ? 0 : tic.Pitch();
+ if (params.target == SurfaceTarget::TextureCubemap ||
+ params.target == SurfaceTarget::TextureCubeArray) {
+ params.depth *= 6;
+ }
+ params.num_levels = tic.max_mip_level + 1;
+ params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap());
+ params.is_layered = params.IsLayered();
+ }
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForDepthBuffer(
+ Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
+ u32 block_width, u32 block_height, u32 block_depth,
+ Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
+ SurfaceParams params;
+ params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
+ params.srgb_conversion = false;
+ params.block_width = std::min(block_width, 5U);
+ params.block_height = std::min(block_height, 5U);
+ params.block_depth = std::min(block_depth, 5U);
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromDepthFormat(format);
+ params.component_type = ComponentTypeFromDepthFormat(format);
+ params.type = GetFormatType(params.pixel_format);
+ params.width = zeta_width;
+ params.height = zeta_height;
+ params.target = SurfaceTarget::Texture2D;
+ params.depth = 1;
+ params.pitch = 0;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::size_t index) {
+ const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
+ SurfaceParams params;
+ params.is_tiled =
+ config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
+ params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
+ config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
+ params.block_width = config.memory_layout.block_width;
+ params.block_height = config.memory_layout.block_height;
+ params.block_depth = config.memory_layout.block_depth;
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
+ params.component_type = ComponentTypeFromRenderTarget(config.format);
+ params.type = GetFormatType(params.pixel_format);
+ if (params.is_tiled) {
+ params.pitch = 0;
+ params.width = config.width;
+ } else {
+ const u32 bpp = GetFormatBpp(params.pixel_format) / CHAR_BIT;
+ params.pitch = config.width;
+ params.width = params.pitch / bpp;
+ }
+ params.height = config.height;
+ params.depth = 1;
+ params.target = SurfaceTarget::Texture2D;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForFermiCopySurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config) {
+ SurfaceParams params{};
+ params.is_tiled = !config.linear;
+ params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
+ config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
+ params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 5U) : 0,
+ params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 5U) : 0,
+ params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 5U) : 0,
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
+ params.component_type = ComponentTypeFromRenderTarget(config.format);
+ params.type = GetFormatType(params.pixel_format);
+ params.width = config.width;
+ params.height = config.height;
+ params.pitch = config.pitch;
+ // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters
+ params.target = SurfaceTarget::Texture2D;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = params.IsLayered();
+ return params;
+}
+
+bool SurfaceParams::IsLayered() const {
+ switch (target) {
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubemap:
+ case SurfaceTarget::TextureCubeArray:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Auto block resizing algorithm from:
+// https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
+u32 SurfaceParams::GetMipBlockHeight(u32 level) const {
+ if (level == 0) {
+ return this->block_height;
+ }
+
+ const u32 height_new{GetMipHeight(level)};
+ const u32 default_block_height{GetDefaultBlockHeight()};
+ const u32 blocks_in_y{(height_new + default_block_height - 1) / default_block_height};
+ const u32 block_height_new = Common::Log2Ceil32(blocks_in_y);
+ return std::clamp(block_height_new, 3U, 7U) - 3U;
+}
+
+u32 SurfaceParams::GetMipBlockDepth(u32 level) const {
+ if (level == 0) {
+ return this->block_depth;
+ }
+ if (is_layered) {
+ return 0;
+ }
+
+ const u32 depth_new{GetMipDepth(level)};
+ const u32 block_depth_new = Common::Log2Ceil32(depth_new);
+ if (block_depth_new > 4) {
+ return 5 - (GetMipBlockHeight(level) >= 2);
+ }
+ return block_depth_new;
+}
+
+std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetInnerMipmapMemorySize(i, false, false);
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetConvertedMipmapOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetConvertedMipmapSize(i);
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const {
+ constexpr std::size_t rgba8_bpp = 4ULL;
+ const std::size_t width_t = GetMipWidth(level);
+ const std::size_t height_t = GetMipHeight(level);
+ const std::size_t depth_t = is_layered ? depth : GetMipDepth(level);
+ return width_t * height_t * depth_t * rgba8_bpp;
+}
+
+std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const {
+ std::size_t size = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ size += GetInnerMipmapMemorySize(level, as_host_size, uncompressed);
+ }
+ if (is_tiled && is_layered) {
+ return Common::AlignBits(size,
+ Tegra::Texture::GetGOBSizeShift() + block_height + block_depth);
+ }
+ return size;
+}
+
+std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size,
+ bool uncompressed) const {
+ const u32 width{GetMipmapSize(uncompressed, GetMipWidth(level), GetDefaultBlockWidth())};
+ const u32 height{GetMipmapSize(uncompressed, GetMipHeight(level), GetDefaultBlockHeight())};
+ const u32 depth{is_layered ? 1U : GetMipDepth(level)};
+ if (is_tiled) {
+ return Tegra::Texture::CalculateSize(!as_host_size, GetBytesPerPixel(), width, height,
+ depth, GetMipBlockHeight(level),
+ GetMipBlockDepth(level));
+ } else if (as_host_size || IsBuffer()) {
+ return GetBytesPerPixel() * width * height * depth;
+ } else {
+ // Linear Texture Case
+ return pitch * height * depth;
+ }
+}
+
+bool SurfaceParams::operator==(const SurfaceParams& rhs) const {
+ return std::tie(is_tiled, block_width, block_height, block_depth, tile_width_spacing, width,
+ height, depth, pitch, num_levels, pixel_format, component_type, type, target) ==
+ std::tie(rhs.is_tiled, rhs.block_width, rhs.block_height, rhs.block_depth,
+ rhs.tile_width_spacing, rhs.width, rhs.height, rhs.depth, rhs.pitch,
+ rhs.num_levels, rhs.pixel_format, rhs.component_type, rhs.type, rhs.target);
+}
+
+std::string SurfaceParams::TargetName() const {
+ switch (target) {
+ case SurfaceTarget::Texture1D:
+ return "1D";
+ case SurfaceTarget::TextureBuffer:
+ return "TexBuffer";
+ case SurfaceTarget::Texture2D:
+ return "2D";
+ case SurfaceTarget::Texture3D:
+ return "3D";
+ case SurfaceTarget::Texture1DArray:
+ return "1DArray";
+ case SurfaceTarget::Texture2DArray:
+ return "2DArray";
+ case SurfaceTarget::TextureCubemap:
+ return "Cube";
+ case SurfaceTarget::TextureCubeArray:
+ return "CubeArray";
+ default:
+ LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
+ UNREACHABLE();
+ return fmt::format("TUK({})", static_cast<u32>(target));
+ }
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h
new file mode 100644
index 000000000..c58e7f8a4
--- /dev/null
+++ b/src/video_core/texture_cache/surface_params.h
@@ -0,0 +1,286 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "common/cityhash.h"
+#include "common/common_types.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/shader/shader_ir.h"
+#include "video_core/surface.h"
+#include "video_core/textures/decoders.h"
+
+namespace VideoCommon {
+
+using VideoCore::Surface::SurfaceCompression;
+
+class SurfaceParams {
+public:
+ /// Creates SurfaceCachedParams from a texture configuration.
+ static SurfaceParams CreateForTexture(const Tegra::Texture::TICEntry& tic,
+ const VideoCommon::Shader::Sampler& entry);
+
+ /// Creates SurfaceCachedParams from an image configuration.
+ static SurfaceParams CreateForImage(const Tegra::Texture::TICEntry& tic,
+ const VideoCommon::Shader::Image& entry);
+
+ /// Creates SurfaceCachedParams for a depth buffer configuration.
+ static SurfaceParams CreateForDepthBuffer(
+ Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
+ u32 block_width, u32 block_height, u32 block_depth,
+ Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
+
+ /// Creates SurfaceCachedParams from a framebuffer configuration.
+ static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index);
+
+ /// Creates SurfaceCachedParams from a Fermi2D surface configuration.
+ static SurfaceParams CreateForFermiCopySurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config);
+
+ std::size_t Hash() const {
+ return static_cast<std::size_t>(
+ Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
+ }
+
+ bool operator==(const SurfaceParams& rhs) const;
+
+ bool operator!=(const SurfaceParams& rhs) const {
+ return !operator==(rhs);
+ }
+
+ std::size_t GetGuestSizeInBytes() const {
+ return GetInnerMemorySize(false, false, false);
+ }
+
+ std::size_t GetHostSizeInBytes() const {
+ std::size_t host_size_in_bytes;
+ if (GetCompressionType() == SurfaceCompression::Converted) {
+ // ASTC is uncompressed in software, in emulated as RGBA8
+ host_size_in_bytes = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ host_size_in_bytes += GetConvertedMipmapSize(level);
+ }
+ } else {
+ host_size_in_bytes = GetInnerMemorySize(true, false, false);
+ }
+ return host_size_in_bytes;
+ }
+
+ u32 GetBlockAlignedWidth() const {
+ return Common::AlignUp(width, 64 / GetBytesPerPixel());
+ }
+
+ /// Returns the width of a given mipmap level.
+ u32 GetMipWidth(u32 level) const {
+ return std::max(1U, width >> level);
+ }
+
+ /// Returns the height of a given mipmap level.
+ u32 GetMipHeight(u32 level) const {
+ return std::max(1U, height >> level);
+ }
+
+ /// Returns the depth of a given mipmap level.
+ u32 GetMipDepth(u32 level) const {
+ return is_layered ? depth : std::max(1U, depth >> level);
+ }
+
+ /// Returns the block height of a given mipmap level.
+ u32 GetMipBlockHeight(u32 level) const;
+
+ /// Returns the block depth of a given mipmap level.
+ u32 GetMipBlockDepth(u32 level) const;
+
+ /// Returns the best possible row/pitch alignment for the surface.
+ u32 GetRowAlignment(u32 level) const {
+ const u32 bpp =
+ GetCompressionType() == SurfaceCompression::Converted ? 4 : GetBytesPerPixel();
+ return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp);
+ }
+
+ /// Returns the offset in bytes in guest memory of a given mipmap level.
+ std::size_t GetGuestMipmapLevelOffset(u32 level) const;
+
+ /// Returns the offset in bytes in host memory (linear) of a given mipmap level.
+ std::size_t GetHostMipmapLevelOffset(u32 level) const;
+
+ /// Returns the offset in bytes in host memory (linear) of a given mipmap level
+ /// for a texture that is converted in host gpu.
+ std::size_t GetConvertedMipmapOffset(u32 level) const;
+
+ /// Returns the size in bytes in guest memory of a given mipmap level.
+ std::size_t GetGuestMipmapSize(u32 level) const {
+ return GetInnerMipmapMemorySize(level, false, false);
+ }
+
+ /// Returns the size in bytes in host memory (linear) of a given mipmap level.
+ std::size_t GetHostMipmapSize(u32 level) const {
+ return GetInnerMipmapMemorySize(level, true, false) * GetNumLayers();
+ }
+
+ std::size_t GetConvertedMipmapSize(u32 level) const;
+
+ /// Returns the size of a layer in bytes in guest memory.
+ std::size_t GetGuestLayerSize() const {
+ return GetLayerSize(false, false);
+ }
+
+ /// Returns the size of a layer in bytes in host memory for a given mipmap level.
+ std::size_t GetHostLayerSize(u32 level) const {
+ ASSERT(target != VideoCore::Surface::SurfaceTarget::Texture3D);
+ return GetInnerMipmapMemorySize(level, true, false);
+ }
+
+ /// Returns the max possible mipmap that the texture can have in host gpu
+ u32 MaxPossibleMipmap() const {
+ const u32 max_mipmap_w = Common::Log2Ceil32(width) + 1U;
+ const u32 max_mipmap_h = Common::Log2Ceil32(height) + 1U;
+ const u32 max_mipmap = std::max(max_mipmap_w, max_mipmap_h);
+ if (target != VideoCore::Surface::SurfaceTarget::Texture3D)
+ return max_mipmap;
+ return std::max(max_mipmap, Common::Log2Ceil32(depth) + 1U);
+ }
+
+ /// Returns if the guest surface is a compressed surface.
+ bool IsCompressed() const {
+ return GetDefaultBlockHeight() > 1 || GetDefaultBlockWidth() > 1;
+ }
+
+ /// Returns the default block width.
+ u32 GetDefaultBlockWidth() const {
+ return VideoCore::Surface::GetDefaultBlockWidth(pixel_format);
+ }
+
+ /// Returns the default block height.
+ u32 GetDefaultBlockHeight() const {
+ return VideoCore::Surface::GetDefaultBlockHeight(pixel_format);
+ }
+
+ /// Returns the bits per pixel.
+ u32 GetBitsPerPixel() const {
+ return VideoCore::Surface::GetFormatBpp(pixel_format);
+ }
+
+ /// Returns the bytes per pixel.
+ u32 GetBytesPerPixel() const {
+ return VideoCore::Surface::GetBytesPerPixel(pixel_format);
+ }
+
+ /// Returns true if the pixel format is a depth and/or stencil format.
+ bool IsPixelFormatZeta() const {
+ return pixel_format >= VideoCore::Surface::PixelFormat::MaxColorFormat &&
+ pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
+ }
+
+ /// Returns how the compression should be handled for this texture.
+ SurfaceCompression GetCompressionType() const {
+ return VideoCore::Surface::GetFormatCompressionType(pixel_format);
+ }
+
+ /// Returns is the surface is a TextureBuffer type of surface.
+ bool IsBuffer() const {
+ return target == VideoCore::Surface::SurfaceTarget::TextureBuffer;
+ }
+
+ /// Returns the debug name of the texture for use in graphic debuggers.
+ std::string TargetName() const;
+
+ // Helper used for out of class size calculations
+ static std::size_t AlignLayered(const std::size_t out_size, const u32 block_height,
+ const u32 block_depth) {
+ return Common::AlignBits(out_size,
+ Tegra::Texture::GetGOBSizeShift() + block_height + block_depth);
+ }
+
+ /// Converts a width from a type of surface into another. This helps represent the
+ /// equivalent value between compressed/non-compressed textures.
+ static u32 ConvertWidth(u32 width, VideoCore::Surface::PixelFormat pixel_format_from,
+ VideoCore::Surface::PixelFormat pixel_format_to) {
+ const u32 bw1 = VideoCore::Surface::GetDefaultBlockWidth(pixel_format_from);
+ const u32 bw2 = VideoCore::Surface::GetDefaultBlockWidth(pixel_format_to);
+ return (width * bw2 + bw1 - 1) / bw1;
+ }
+
+ /// Converts a height from a type of surface into another. This helps represent the
+ /// equivalent value between compressed/non-compressed textures.
+ static u32 ConvertHeight(u32 height, VideoCore::Surface::PixelFormat pixel_format_from,
+ VideoCore::Surface::PixelFormat pixel_format_to) {
+ const u32 bh1 = VideoCore::Surface::GetDefaultBlockHeight(pixel_format_from);
+ const u32 bh2 = VideoCore::Surface::GetDefaultBlockHeight(pixel_format_to);
+ return (height * bh2 + bh1 - 1) / bh1;
+ }
+
+ // Finds the maximun possible width between 2 2D layers of different formats
+ static u32 IntersectWidth(const SurfaceParams& src_params, const SurfaceParams& dst_params,
+ const u32 src_level, const u32 dst_level) {
+ const u32 bw1 = src_params.GetDefaultBlockWidth();
+ const u32 bw2 = dst_params.GetDefaultBlockWidth();
+ const u32 t_src_width = (src_params.GetMipWidth(src_level) * bw2 + bw1 - 1) / bw1;
+ const u32 t_dst_width = (dst_params.GetMipWidth(dst_level) * bw1 + bw2 - 1) / bw2;
+ return std::min(t_src_width, t_dst_width);
+ }
+
+ // Finds the maximun possible height between 2 2D layers of different formats
+ static u32 IntersectHeight(const SurfaceParams& src_params, const SurfaceParams& dst_params,
+ const u32 src_level, const u32 dst_level) {
+ const u32 bh1 = src_params.GetDefaultBlockHeight();
+ const u32 bh2 = dst_params.GetDefaultBlockHeight();
+ const u32 t_src_height = (src_params.GetMipHeight(src_level) * bh2 + bh1 - 1) / bh1;
+ const u32 t_dst_height = (dst_params.GetMipHeight(dst_level) * bh1 + bh2 - 1) / bh2;
+ return std::min(t_src_height, t_dst_height);
+ }
+
+ bool is_tiled;
+ bool srgb_conversion;
+ bool is_layered;
+ u32 block_width;
+ u32 block_height;
+ u32 block_depth;
+ u32 tile_width_spacing;
+ u32 width;
+ u32 height;
+ u32 depth;
+ u32 pitch;
+ u32 num_levels;
+ u32 emulated_levels;
+ VideoCore::Surface::PixelFormat pixel_format;
+ VideoCore::Surface::ComponentType component_type;
+ VideoCore::Surface::SurfaceType type;
+ VideoCore::Surface::SurfaceTarget target;
+
+private:
+ /// Returns the size of a given mipmap level inside a layer.
+ std::size_t GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool uncompressed) const;
+
+ /// Returns the size of all mipmap levels and aligns as needed.
+ std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const {
+ return GetLayerSize(as_host_size, uncompressed) * (layer_only ? 1U : depth);
+ }
+
+ /// Returns the size of a layer
+ std::size_t GetLayerSize(bool as_host_size, bool uncompressed) const;
+
+ std::size_t GetNumLayers() const {
+ return is_layered ? depth : 1;
+ }
+
+ /// Returns true if these parameters are from a layered surface.
+ bool IsLayered() const;
+};
+
+} // namespace VideoCommon
+
+namespace std {
+
+template <>
+struct hash<VideoCommon::SurfaceParams> {
+ std::size_t operator()(const VideoCommon::SurfaceParams& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp
new file mode 100644
index 000000000..57a1f5803
--- /dev/null
+++ b/src/video_core/texture_cache/surface_view.cpp
@@ -0,0 +1,23 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+
+#include "common/common_types.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace VideoCommon {
+
+std::size_t ViewParams::Hash() const {
+ return static_cast<std::size_t>(base_layer) ^ (static_cast<std::size_t>(num_layers) << 16) ^
+ (static_cast<std::size_t>(base_level) << 24) ^
+ (static_cast<std::size_t>(num_levels) << 32) ^ (static_cast<std::size_t>(target) << 36);
+}
+
+bool ViewParams::operator==(const ViewParams& rhs) const {
+ return std::tie(base_layer, num_layers, base_level, num_levels, target) ==
+ std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h
new file mode 100644
index 000000000..b17fd11a9
--- /dev/null
+++ b/src/video_core/texture_cache/surface_view.h
@@ -0,0 +1,67 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <functional>
+
+#include "common/common_types.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/surface_params.h"
+
+namespace VideoCommon {
+
+struct ViewParams {
+ constexpr explicit ViewParams(VideoCore::Surface::SurfaceTarget target, u32 base_layer,
+ u32 num_layers, u32 base_level, u32 num_levels)
+ : target{target}, base_layer{base_layer}, num_layers{num_layers}, base_level{base_level},
+ num_levels{num_levels} {}
+
+ std::size_t Hash() const;
+
+ bool operator==(const ViewParams& rhs) const;
+
+ bool IsLayered() const {
+ switch (target) {
+ case VideoCore::Surface::SurfaceTarget::Texture1DArray:
+ case VideoCore::Surface::SurfaceTarget::Texture2DArray:
+ case VideoCore::Surface::SurfaceTarget::TextureCubemap:
+ case VideoCore::Surface::SurfaceTarget::TextureCubeArray:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ VideoCore::Surface::SurfaceTarget target{};
+ u32 base_layer{};
+ u32 num_layers{};
+ u32 base_level{};
+ u32 num_levels{};
+};
+
+class ViewBase {
+public:
+ constexpr explicit ViewBase(const ViewParams& params) : params{params} {}
+
+ constexpr const ViewParams& GetViewParams() const {
+ return params;
+ }
+
+protected:
+ ViewParams params;
+};
+
+} // namespace VideoCommon
+
+namespace std {
+
+template <>
+struct hash<VideoCommon::ViewParams> {
+ std::size_t operator()(const VideoCommon::ViewParams& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
new file mode 100644
index 000000000..877c6635d
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -0,0 +1,835 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <tuple>
+#include <unordered_map>
+#include <vector>
+
+#include <boost/icl/interval_map.hpp>
+#include <boost/range/iterator_range.hpp>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/math_util.h"
+#include "core/core.h"
+#include "core/memory.h"
+#include "core/settings.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/copy_params.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace Tegra::Texture {
+struct FullTextureInfo;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace VideoCommon {
+
+using VideoCore::Surface::PixelFormat;
+
+using VideoCore::Surface::SurfaceTarget;
+using RenderTargetConfig = Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig;
+
+template <typename TSurface, typename TView>
+class TextureCache {
+ using IntervalMap = boost::icl::interval_map<CacheAddr, std::set<TSurface>>;
+ using IntervalType = typename IntervalMap::interval_type;
+
+public:
+ void InvalidateRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : GetSurfacesInRegion(addr, size)) {
+ Unregister(surface);
+ }
+ }
+
+ /***
+ * `Guard` guarantees that rendertargets don't unregister themselves if the
+ * collide. Protection is currently only done on 3D slices.
+ ***/
+ void GuardRenderTargets(bool new_guard) {
+ guard_render_targets = new_guard;
+ }
+
+ void GuardSamplers(bool new_guard) {
+ guard_samplers = new_guard;
+ }
+
+ void FlushRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ auto surfaces = GetSurfacesInRegion(addr, size);
+ if (surfaces.empty()) {
+ return;
+ }
+ std::sort(surfaces.begin(), surfaces.end(), [](const TSurface& a, const TSurface& b) {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (const auto& surface : surfaces) {
+ FlushSurface(surface);
+ }
+ }
+
+ TView GetTextureSurface(const Tegra::Texture::TICEntry& tic,
+ const VideoCommon::Shader::Sampler& entry) {
+ std::lock_guard lock{mutex};
+ const auto gpu_addr{tic.Address()};
+ if (!gpu_addr) {
+ return {};
+ }
+ const auto params{SurfaceParams::CreateForTexture(tic, entry)};
+ const auto [surface, view] = GetSurface(gpu_addr, params, true, false);
+ if (guard_samplers) {
+ sampled_textures.push_back(surface);
+ }
+ return view;
+ }
+
+ TView GetImageSurface(const Tegra::Texture::TICEntry& tic,
+ const VideoCommon::Shader::Image& entry) {
+ std::lock_guard lock{mutex};
+ const auto gpu_addr{tic.Address()};
+ if (!gpu_addr) {
+ return {};
+ }
+ const auto params{SurfaceParams::CreateForImage(tic, entry)};
+ const auto [surface, view] = GetSurface(gpu_addr, params, true, false);
+ if (guard_samplers) {
+ sampled_textures.push_back(surface);
+ }
+ return view;
+ }
+
+ bool TextureBarrier() {
+ const bool any_rt =
+ std::any_of(sampled_textures.begin(), sampled_textures.end(),
+ [](const auto& surface) { return surface->IsRenderTarget(); });
+ sampled_textures.clear();
+ return any_rt;
+ }
+
+ TView GetDepthBufferSurface(bool preserve_contents) {
+ std::lock_guard lock{mutex};
+ auto& maxwell3d = system.GPU().Maxwell3D();
+
+ if (!maxwell3d.dirty.depth_buffer) {
+ return depth_buffer.view;
+ }
+ maxwell3d.dirty.depth_buffer = false;
+
+ const auto& regs{maxwell3d.regs};
+ const auto gpu_addr{regs.zeta.Address()};
+ if (!gpu_addr || !regs.zeta_enable) {
+ SetEmptyDepthBuffer();
+ return {};
+ }
+ const auto depth_params{SurfaceParams::CreateForDepthBuffer(
+ system, regs.zeta_width, regs.zeta_height, regs.zeta.format,
+ regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
+ regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
+ auto surface_view = GetSurface(gpu_addr, depth_params, preserve_contents, true);
+ if (depth_buffer.target)
+ depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
+ depth_buffer.target = surface_view.first;
+ depth_buffer.view = surface_view.second;
+ if (depth_buffer.target)
+ depth_buffer.target->MarkAsRenderTarget(true, DEPTH_RT);
+ return surface_view.second;
+ }
+
+ TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
+ std::lock_guard lock{mutex};
+ ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty.render_target[index]) {
+ return render_targets[index].view;
+ }
+ maxwell3d.dirty.render_target[index] = false;
+
+ const auto& regs{maxwell3d.regs};
+ if (index >= regs.rt_control.count || regs.rt[index].Address() == 0 ||
+ regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
+ SetEmptyColorBuffer(index);
+ return {};
+ }
+
+ const auto& config{regs.rt[index]};
+ const auto gpu_addr{config.Address()};
+ if (!gpu_addr) {
+ SetEmptyColorBuffer(index);
+ return {};
+ }
+
+ auto surface_view = GetSurface(gpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
+ preserve_contents, true);
+ if (render_targets[index].target)
+ render_targets[index].target->MarkAsRenderTarget(false, NO_RT);
+ render_targets[index].target = surface_view.first;
+ render_targets[index].view = surface_view.second;
+ if (render_targets[index].target)
+ render_targets[index].target->MarkAsRenderTarget(true, static_cast<u32>(index));
+ return surface_view.second;
+ }
+
+ void MarkColorBufferInUse(std::size_t index) {
+ if (auto& render_target = render_targets[index].target) {
+ render_target->MarkAsModified(true, Tick());
+ }
+ }
+
+ void MarkDepthBufferInUse() {
+ if (depth_buffer.target) {
+ depth_buffer.target->MarkAsModified(true, Tick());
+ }
+ }
+
+ void SetEmptyDepthBuffer() {
+ if (depth_buffer.target == nullptr) {
+ return;
+ }
+ depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
+ depth_buffer.target = nullptr;
+ depth_buffer.view = nullptr;
+ }
+
+ void SetEmptyColorBuffer(std::size_t index) {
+ if (render_targets[index].target == nullptr) {
+ return;
+ }
+ render_targets[index].target->MarkAsRenderTarget(false, NO_RT);
+ render_targets[index].target = nullptr;
+ render_targets[index].view = nullptr;
+ }
+
+ void DoFermiCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
+ const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
+ std::lock_guard lock{mutex};
+ std::pair<TSurface, TView> dst_surface = GetFermiSurface(dst_config);
+ std::pair<TSurface, TView> src_surface = GetFermiSurface(src_config);
+ ImageBlit(src_surface.second, dst_surface.second, copy_config);
+ dst_surface.first->MarkAsModified(true, Tick());
+ }
+
+ TSurface TryFindFramebufferSurface(const u8* host_ptr) {
+ const CacheAddr cache_addr = ToCacheAddr(host_ptr);
+ if (!cache_addr) {
+ return nullptr;
+ }
+ const CacheAddr page = cache_addr >> registry_page_bits;
+ std::vector<TSurface>& list = registry[page];
+ for (auto& surface : list) {
+ if (surface->GetCacheAddr() == cache_addr) {
+ return surface;
+ }
+ }
+ return nullptr;
+ }
+
+ u64 Tick() {
+ return ++ticks;
+ }
+
+protected:
+ TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
+ : system{system}, rasterizer{rasterizer} {
+ for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
+ SetEmptyColorBuffer(i);
+ }
+
+ SetEmptyDepthBuffer();
+ staging_cache.SetSize(2);
+
+ const auto make_siblings = [this](PixelFormat a, PixelFormat b) {
+ siblings_table[static_cast<std::size_t>(a)] = b;
+ siblings_table[static_cast<std::size_t>(b)] = a;
+ };
+ std::fill(siblings_table.begin(), siblings_table.end(), PixelFormat::Invalid);
+ make_siblings(PixelFormat::Z16, PixelFormat::R16U);
+ make_siblings(PixelFormat::Z32F, PixelFormat::R32F);
+ make_siblings(PixelFormat::Z32FS8, PixelFormat::RG32F);
+
+ sampled_textures.reserve(64);
+ }
+
+ ~TextureCache() = default;
+
+ virtual TSurface CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) = 0;
+
+ virtual void ImageCopy(TSurface& src_surface, TSurface& dst_surface,
+ const CopyParams& copy_params) = 0;
+
+ virtual void ImageBlit(TView& src_view, TView& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) = 0;
+
+ // Depending on the backend, a buffer copy can be slow as it means deoptimizing the texture
+ // and reading it from a sepparate buffer.
+ virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0;
+
+ void ManageRenderTargetUnregister(TSurface& surface) {
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ const u32 index = surface->GetRenderTarget();
+ if (index == DEPTH_RT) {
+ maxwell3d.dirty.depth_buffer = true;
+ } else {
+ maxwell3d.dirty.render_target[index] = true;
+ }
+ maxwell3d.dirty.render_settings = true;
+ }
+
+ void Register(TSurface surface) {
+ const GPUVAddr gpu_addr = surface->GetGpuAddr();
+ const CacheAddr cache_ptr = ToCacheAddr(system.GPU().MemoryManager().GetPointer(gpu_addr));
+ const std::size_t size = surface->GetSizeInBytes();
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cache_ptr || !cpu_addr) {
+ LOG_CRITICAL(HW_GPU, "Failed to register surface with unmapped gpu_address 0x{:016x}",
+ gpu_addr);
+ return;
+ }
+ const bool continuous = system.GPU().MemoryManager().IsBlockContinuous(gpu_addr, size);
+ surface->MarkAsContinuous(continuous);
+ surface->SetCacheAddr(cache_ptr);
+ surface->SetCpuAddr(*cpu_addr);
+ RegisterInnerCache(surface);
+ surface->MarkAsRegistered(true);
+ rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
+ }
+
+ void Unregister(TSurface surface) {
+ if (guard_render_targets && surface->IsProtected()) {
+ return;
+ }
+ if (!guard_render_targets && surface->IsRenderTarget()) {
+ ManageRenderTargetUnregister(surface);
+ }
+ const std::size_t size = surface->GetSizeInBytes();
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ UnregisterInnerCache(surface);
+ surface->MarkAsRegistered(false);
+ ReserveSurface(surface->GetSurfaceParams(), surface);
+ }
+
+ TSurface GetUncachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) {
+ if (const auto surface = TryGetReservedSurface(params); surface) {
+ surface->SetGpuAddr(gpu_addr);
+ return surface;
+ }
+ // No reserved surface available, create a new one and reserve it
+ auto new_surface{CreateSurface(gpu_addr, params)};
+ return new_surface;
+ }
+
+ std::pair<TSurface, TView> GetFermiSurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config) {
+ SurfaceParams params = SurfaceParams::CreateForFermiCopySurface(config);
+ const GPUVAddr gpu_addr = config.Address();
+ return GetSurface(gpu_addr, params, true, false);
+ }
+
+ Core::System& system;
+
+private:
+ enum class RecycleStrategy : u32 {
+ Ignore = 0,
+ Flush = 1,
+ BufferCopy = 3,
+ };
+
+ /**
+ * `PickStrategy` takes care of selecting a proper strategy to deal with a texture recycle.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ * @param untopological, tells the recycler that the texture has no way to match the overlaps
+ * due to topological reasons.
+ **/
+ RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params,
+ const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
+ if (Settings::values.use_accurate_gpu_emulation) {
+ return RecycleStrategy::Flush;
+ }
+ // 3D Textures decision
+ if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) {
+ return RecycleStrategy::Flush;
+ }
+ for (auto s : overlaps) {
+ const auto& s_params = s->GetSurfaceParams();
+ if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) {
+ return RecycleStrategy::Flush;
+ }
+ }
+ // Untopological decision
+ if (untopological == MatchTopologyResult::CompressUnmatch) {
+ return RecycleStrategy::Flush;
+ }
+ if (untopological == MatchTopologyResult::FullMatch && !params.is_tiled) {
+ return RecycleStrategy::Flush;
+ }
+ return RecycleStrategy::Ignore;
+ }
+
+ /**
+ * `RecycleSurface` es a method we use to decide what to do with textures we can't resolve in
+ *the cache It has 2 implemented strategies: Ignore and Flush. Ignore just unregisters all the
+ *overlaps and loads the new texture. Flush, flushes all the overlaps into memory and loads the
+ *new surface from that data.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ * @param preserve_contents, tells if the new surface should be loaded from meory or left blank
+ * @param untopological, tells the recycler that the texture has no way to match the overlaps
+ * due to topological reasons.
+ **/
+ std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
+ const SurfaceParams& params, const GPUVAddr gpu_addr,
+ const bool preserve_contents,
+ const MatchTopologyResult untopological) {
+ const bool do_load = preserve_contents && Settings::values.use_accurate_gpu_emulation;
+ for (auto& surface : overlaps) {
+ Unregister(surface);
+ }
+ switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
+ case RecycleStrategy::Ignore: {
+ return InitializeSurface(gpu_addr, params, do_load);
+ }
+ case RecycleStrategy::Flush: {
+ std::sort(overlaps.begin(), overlaps.end(),
+ [](const TSurface& a, const TSurface& b) -> bool {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (auto& surface : overlaps) {
+ FlushSurface(surface);
+ }
+ return InitializeSurface(gpu_addr, params, preserve_contents);
+ }
+ case RecycleStrategy::BufferCopy: {
+ auto new_surface = GetUncachedSurface(gpu_addr, params);
+ BufferCopy(overlaps[0], new_surface);
+ return {new_surface, new_surface->GetMainView()};
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
+ return InitializeSurface(gpu_addr, params, do_load);
+ }
+ }
+ }
+
+ /**
+ * `RebuildSurface` this method takes a single surface and recreates into another that
+ * may differ in format, target or width alingment.
+ * @param current_surface, the registered surface in the cache which we want to convert.
+ * @param params, the new surface params which we'll use to recreate the surface.
+ **/
+ std::pair<TSurface, TView> RebuildSurface(TSurface current_surface, const SurfaceParams& params,
+ bool is_render) {
+ const auto gpu_addr = current_surface->GetGpuAddr();
+ const auto& cr_params = current_surface->GetSurfaceParams();
+ TSurface new_surface;
+ if (cr_params.pixel_format != params.pixel_format && !is_render &&
+ GetSiblingFormat(cr_params.pixel_format) == params.pixel_format) {
+ SurfaceParams new_params = params;
+ new_params.pixel_format = cr_params.pixel_format;
+ new_params.component_type = cr_params.component_type;
+ new_params.type = cr_params.type;
+ new_surface = GetUncachedSurface(gpu_addr, new_params);
+ } else {
+ new_surface = GetUncachedSurface(gpu_addr, params);
+ }
+ const auto& final_params = new_surface->GetSurfaceParams();
+ if (cr_params.type != final_params.type ||
+ (cr_params.component_type != final_params.component_type)) {
+ BufferCopy(current_surface, new_surface);
+ } else {
+ std::vector<CopyParams> bricks = current_surface->BreakDown(final_params);
+ for (auto& brick : bricks) {
+ ImageCopy(current_surface, new_surface, brick);
+ }
+ }
+ Unregister(current_surface);
+ Register(new_surface);
+ new_surface->MarkAsModified(current_surface->IsModified(), Tick());
+ return {new_surface, new_surface->GetMainView()};
+ }
+
+ /**
+ * `ManageStructuralMatch` this method takes a single surface and checks with the new surface's
+ * params if it's an exact match, we return the main view of the registered surface. If it's
+ * formats don't match, we rebuild the surface. We call this last method a `Mirage`. If formats
+ * match but the targets don't, we create an overview View of the registered surface.
+ * @param current_surface, the registered surface in the cache which we want to convert.
+ * @param params, the new surface params which we want to check.
+ **/
+ std::pair<TSurface, TView> ManageStructuralMatch(TSurface current_surface,
+ const SurfaceParams& params, bool is_render) {
+ const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
+ const bool matches_target = current_surface->MatchTarget(params.target);
+ const auto match_check = [&]() -> std::pair<TSurface, TView> {
+ if (matches_target) {
+ return {current_surface, current_surface->GetMainView()};
+ }
+ return {current_surface, current_surface->EmplaceOverview(params)};
+ };
+ if (!is_mirage) {
+ return match_check();
+ }
+ if (!is_render && GetSiblingFormat(current_surface->GetFormat()) == params.pixel_format) {
+ return match_check();
+ }
+ return RebuildSurface(current_surface, params, is_render);
+ }
+
+ /**
+ * `TryReconstructSurface` unlike `RebuildSurface` where we know the registered surface
+ * matches the candidate in some way, we got no guarantess here. We try to see if the overlaps
+ * are sublayers/mipmaps of the new surface, if they all match we end up recreating a surface
+ * for them, else we return nothing.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ **/
+ std::optional<std::pair<TSurface, TView>> TryReconstructSurface(std::vector<TSurface>& overlaps,
+ const SurfaceParams& params,
+ const GPUVAddr gpu_addr) {
+ if (params.target == SurfaceTarget::Texture3D) {
+ return {};
+ }
+ bool modified = false;
+ TSurface new_surface = GetUncachedSurface(gpu_addr, params);
+ u32 passed_tests = 0;
+ for (auto& surface : overlaps) {
+ const SurfaceParams& src_params = surface->GetSurfaceParams();
+ if (src_params.is_layered || src_params.num_levels > 1) {
+ // We send this cases to recycle as they are more complex to handle
+ return {};
+ }
+ const std::size_t candidate_size = surface->GetSizeInBytes();
+ auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
+ if (!mipmap_layer) {
+ continue;
+ }
+ const auto [layer, mipmap] = *mipmap_layer;
+ if (new_surface->GetMipmapSize(mipmap) != candidate_size) {
+ continue;
+ }
+ modified |= surface->IsModified();
+ // Now we got all the data set up
+ const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap);
+ const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap);
+ const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap, width, height, 1);
+ passed_tests++;
+ ImageCopy(surface, new_surface, copy_params);
+ }
+ if (passed_tests == 0) {
+ return {};
+ // In Accurate GPU all tests should pass, else we recycle
+ } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
+ return {};
+ }
+ for (auto surface : overlaps) {
+ Unregister(surface);
+ }
+ new_surface->MarkAsModified(modified, Tick());
+ Register(new_surface);
+ return {{new_surface, new_surface->GetMainView()}};
+ }
+
+ /**
+ * `GetSurface` gets the starting address and parameters of a candidate surface and tries
+ * to find a matching surface within the cache. This is done in 3 big steps. The first is to
+ * check the 1st Level Cache in order to find an exact match, if we fail, we move to step 2.
+ * Step 2 is checking if there are any overlaps at all, if none, we just load the texture from
+ * memory else we move to step 3. Step 3 consists on figuring the relationship between the
+ * candidate texture and the overlaps. We divide the scenarios depending if there's 1 or many
+ * overlaps. If there's many, we just try to reconstruct a new surface out of them based on the
+ * candidate's parameters, if we fail, we recycle. When there's only 1 overlap then we have to
+ * check if the candidate is a view (layer/mipmap) of the overlap or if the registered surface
+ * is a mipmap/layer of the candidate. In this last case we reconstruct a new surface.
+ * @param gpu_addr, the starting address of the candidate surface.
+ * @param params, the paremeters on the candidate surface.
+ * @param preserve_contents, tells if the new surface should be loaded from meory or left blank.
+ **/
+ std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool preserve_contents, bool is_render) {
+ const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
+ const auto cache_addr{ToCacheAddr(host_ptr)};
+
+ // Step 0: guarantee a valid surface
+ if (!cache_addr) {
+ // Return a null surface if it's invalid
+ SurfaceParams new_params = params;
+ new_params.width = 1;
+ new_params.height = 1;
+ new_params.depth = 1;
+ new_params.block_height = 0;
+ new_params.block_depth = 0;
+ return InitializeSurface(gpu_addr, new_params, false);
+ }
+
+ // Step 1
+ // Check Level 1 Cache for a fast structural match. If candidate surface
+ // matches at certain level we are pretty much done.
+ if (const auto iter = l1_cache.find(cache_addr); iter != l1_cache.end()) {
+ TSurface& current_surface = iter->second;
+ const auto topological_result = current_surface->MatchesTopology(params);
+ if (topological_result != MatchTopologyResult::FullMatch) {
+ std::vector<TSurface> overlaps{current_surface};
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
+ }
+ const auto struct_result = current_surface->MatchesStructure(params);
+ if (struct_result != MatchStructureResult::None &&
+ (params.target != SurfaceTarget::Texture3D ||
+ current_surface->MatchTarget(params.target))) {
+ if (struct_result == MatchStructureResult::FullMatch) {
+ return ManageStructuralMatch(current_surface, params, is_render);
+ } else {
+ return RebuildSurface(current_surface, params, is_render);
+ }
+ }
+ }
+
+ // Step 2
+ // Obtain all possible overlaps in the memory region
+ const std::size_t candidate_size = params.GetGuestSizeInBytes();
+ auto overlaps{GetSurfacesInRegion(cache_addr, candidate_size)};
+
+ // If none are found, we are done. we just load the surface and create it.
+ if (overlaps.empty()) {
+ return InitializeSurface(gpu_addr, params, preserve_contents);
+ }
+
+ // Step 3
+ // Now we need to figure the relationship between the texture and its overlaps
+ // we do a topological test to ensure we can find some relationship. If it fails
+ // inmediatly recycle the texture
+ for (const auto& surface : overlaps) {
+ const auto topological_result = surface->MatchesTopology(params);
+ if (topological_result != MatchTopologyResult::FullMatch) {
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
+ }
+ }
+
+ // Split cases between 1 overlap or many.
+ if (overlaps.size() == 1) {
+ TSurface current_surface = overlaps[0];
+ // First check if the surface is within the overlap. If not, it means
+ // two things either the candidate surface is a supertexture of the overlap
+ // or they don't match in any known way.
+ if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) {
+ if (current_surface->GetGpuAddr() == gpu_addr) {
+ std::optional<std::pair<TSurface, TView>> view =
+ TryReconstructSurface(overlaps, params, gpu_addr);
+ if (view) {
+ return *view;
+ }
+ }
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ // Now we check if the candidate is a mipmap/layer of the overlap
+ std::optional<TView> view =
+ current_surface->EmplaceView(params, gpu_addr, candidate_size);
+ if (view) {
+ const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
+ if (is_mirage) {
+ // On a mirage view, we need to recreate the surface under this new view
+ // and then obtain a view again.
+ SurfaceParams new_params = current_surface->GetSurfaceParams();
+ const u32 wh = SurfaceParams::ConvertWidth(
+ new_params.width, new_params.pixel_format, params.pixel_format);
+ const u32 hh = SurfaceParams::ConvertHeight(
+ new_params.height, new_params.pixel_format, params.pixel_format);
+ new_params.width = wh;
+ new_params.height = hh;
+ new_params.pixel_format = params.pixel_format;
+ std::pair<TSurface, TView> pair =
+ RebuildSurface(current_surface, new_params, is_render);
+ std::optional<TView> mirage_view =
+ pair.first->EmplaceView(params, gpu_addr, candidate_size);
+ if (mirage_view)
+ return {pair.first, *mirage_view};
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ return {current_surface, *view};
+ }
+ } else {
+ // If there are many overlaps, odds are they are subtextures of the candidate
+ // surface. We try to construct a new surface based on the candidate parameters,
+ // using the overlaps. If a single overlap fails, this will fail.
+ std::optional<std::pair<TSurface, TView>> view =
+ TryReconstructSurface(overlaps, params, gpu_addr);
+ if (view) {
+ return *view;
+ }
+ }
+ // We failed all the tests, recycle the overlaps into a new texture.
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+
+ std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool preserve_contents) {
+ auto new_surface{GetUncachedSurface(gpu_addr, params)};
+ Register(new_surface);
+ if (preserve_contents) {
+ LoadSurface(new_surface);
+ }
+ return {new_surface, new_surface->GetMainView()};
+ }
+
+ void LoadSurface(const TSurface& surface) {
+ staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
+ surface->LoadBuffer(system.GPU().MemoryManager(), staging_cache);
+ surface->UploadTexture(staging_cache.GetBuffer(0));
+ surface->MarkAsModified(false, Tick());
+ }
+
+ void FlushSurface(const TSurface& surface) {
+ if (!surface->IsModified()) {
+ return;
+ }
+ staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
+ surface->DownloadTexture(staging_cache.GetBuffer(0));
+ surface->FlushBuffer(system.GPU().MemoryManager(), staging_cache);
+ surface->MarkAsModified(false, Tick());
+ }
+
+ void RegisterInnerCache(TSurface& surface) {
+ const CacheAddr cache_addr = surface->GetCacheAddr();
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
+ l1_cache[cache_addr] = surface;
+ while (start <= end) {
+ registry[start].push_back(surface);
+ start++;
+ }
+ }
+
+ void UnregisterInnerCache(TSurface& surface) {
+ const CacheAddr cache_addr = surface->GetCacheAddr();
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
+ l1_cache.erase(cache_addr);
+ while (start <= end) {
+ auto& reg{registry[start]};
+ reg.erase(std::find(reg.begin(), reg.end(), surface));
+ start++;
+ }
+ }
+
+ std::vector<TSurface> GetSurfacesInRegion(const CacheAddr cache_addr, const std::size_t size) {
+ if (size == 0) {
+ return {};
+ }
+ const CacheAddr cache_addr_end = cache_addr + size;
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (cache_addr_end - 1) >> registry_page_bits;
+ std::vector<TSurface> surfaces;
+ while (start <= end) {
+ std::vector<TSurface>& list = registry[start];
+ for (auto& surface : list) {
+ if (!surface->IsPicked() && surface->Overlaps(cache_addr, cache_addr_end)) {
+ surface->MarkAsPicked(true);
+ surfaces.push_back(surface);
+ }
+ }
+ start++;
+ }
+ for (auto& surface : surfaces) {
+ surface->MarkAsPicked(false);
+ }
+ return surfaces;
+ }
+
+ void ReserveSurface(const SurfaceParams& params, TSurface surface) {
+ surface_reserve[params].push_back(std::move(surface));
+ }
+
+ TSurface TryGetReservedSurface(const SurfaceParams& params) {
+ auto search{surface_reserve.find(params)};
+ if (search == surface_reserve.end()) {
+ return {};
+ }
+ for (auto& surface : search->second) {
+ if (!surface->IsRegistered()) {
+ return surface;
+ }
+ }
+ return {};
+ }
+
+ constexpr PixelFormat GetSiblingFormat(PixelFormat format) const {
+ return siblings_table[static_cast<std::size_t>(format)];
+ }
+
+ struct FramebufferTargetInfo {
+ TSurface target;
+ TView view;
+ };
+
+ VideoCore::RasterizerInterface& rasterizer;
+
+ u64 ticks{};
+
+ // Guards the cache for protection conflicts.
+ bool guard_render_targets{};
+ bool guard_samplers{};
+
+ // The siblings table is for formats that can inter exchange with one another
+ // without causing issues. This is only valid when a conflict occurs on a non
+ // rendering use.
+ std::array<PixelFormat, static_cast<std::size_t>(PixelFormat::Max)> siblings_table;
+
+ // The internal Cache is different for the Texture Cache. It's based on buckets
+ // of 1MB. This fits better for the purpose of this cache as textures are normaly
+ // large in size.
+ static constexpr u64 registry_page_bits{20};
+ static constexpr u64 registry_page_size{1 << registry_page_bits};
+ std::unordered_map<CacheAddr, std::vector<TSurface>> registry;
+
+ static constexpr u32 DEPTH_RT = 8;
+ static constexpr u32 NO_RT = 0xFFFFFFFF;
+
+ // The L1 Cache is used for fast texture lookup before checking the overlaps
+ // This avoids calculating size and other stuffs.
+ std::unordered_map<CacheAddr, TSurface> l1_cache;
+
+ /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
+ /// previously been used. This is to prevent surfaces from being constantly created and
+ /// destroyed when used with different surface parameters.
+ std::unordered_map<SurfaceParams, std::vector<TSurface>> surface_reserve;
+ std::array<FramebufferTargetInfo, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
+ render_targets;
+ FramebufferTargetInfo depth_buffer;
+
+ std::vector<TSurface> sampled_textures;
+
+ StagingCache staging_cache;
+ std::recursive_mutex mutex;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/textures/convert.cpp b/src/video_core/textures/convert.cpp
index 82050bd51..f3efa7eb0 100644
--- a/src/video_core/textures/convert.cpp
+++ b/src/video_core/textures/convert.cpp
@@ -62,19 +62,19 @@ static void ConvertZ24S8ToS8Z24(u8* data, u32 width, u32 height) {
SwapS8Z24ToZ24S8<true>(data, width, height);
}
-void ConvertFromGuestToHost(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth,
- bool convert_astc, bool convert_s8z24) {
+void ConvertFromGuestToHost(u8* in_data, u8* out_data, PixelFormat pixel_format, u32 width,
+ u32 height, u32 depth, bool convert_astc, bool convert_s8z24) {
if (convert_astc && IsPixelFormatASTC(pixel_format)) {
// Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
u32 block_width{};
u32 block_height{};
std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format);
- const std::vector<u8> rgba8_data =
- Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height);
- std::copy(rgba8_data.begin(), rgba8_data.end(), data);
+ const std::vector<u8> rgba8_data = Tegra::Texture::ASTC::Decompress(
+ in_data, width, height, depth, block_width, block_height);
+ std::copy(rgba8_data.begin(), rgba8_data.end(), out_data);
} else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) {
- Tegra::Texture::ConvertS8Z24ToZ24S8(data, width, height);
+ Tegra::Texture::ConvertS8Z24ToZ24S8(in_data, width, height);
}
}
@@ -90,4 +90,4 @@ void ConvertFromHostToGuest(u8* data, PixelFormat pixel_format, u32 width, u32 h
}
}
-} // namespace Tegra::Texture \ No newline at end of file
+} // namespace Tegra::Texture
diff --git a/src/video_core/textures/convert.h b/src/video_core/textures/convert.h
index 12542e71c..d5d6c77bb 100644
--- a/src/video_core/textures/convert.h
+++ b/src/video_core/textures/convert.h
@@ -12,10 +12,11 @@ enum class PixelFormat;
namespace Tegra::Texture {
-void ConvertFromGuestToHost(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
- u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
+void ConvertFromGuestToHost(u8* in_data, u8* out_data, VideoCore::Surface::PixelFormat pixel_format,
+ u32 width, u32 height, u32 depth, bool convert_astc,
+ bool convert_s8z24);
void ConvertFromHostToGuest(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
-} // namespace Tegra::Texture \ No newline at end of file
+} // namespace Tegra::Texture
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 217805386..7df5f1452 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -36,10 +36,16 @@ struct alignas(64) SwizzleTable {
std::array<std::array<u16, M>, N> values{};
};
-constexpr u32 gob_size_x = 64;
-constexpr u32 gob_size_y = 8;
-constexpr u32 gob_size_z = 1;
-constexpr u32 gob_size = gob_size_x * gob_size_y * gob_size_z;
+constexpr u32 gob_size_x_shift = 6;
+constexpr u32 gob_size_y_shift = 3;
+constexpr u32 gob_size_z_shift = 0;
+constexpr u32 gob_size_shift = gob_size_x_shift + gob_size_y_shift + gob_size_z_shift;
+
+constexpr u32 gob_size_x = 1U << gob_size_x_shift;
+constexpr u32 gob_size_y = 1U << gob_size_y_shift;
+constexpr u32 gob_size_z = 1U << gob_size_z_shift;
+constexpr u32 gob_size = 1U << gob_size_shift;
+
constexpr u32 fast_swizzle_align = 16;
constexpr auto legacy_swizzle_table = SwizzleTable<gob_size_y, gob_size_x, gob_size_z>();
@@ -171,14 +177,16 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
u32 out_bytes_per_pixel, u8* const swizzled_data, u8* const unswizzled_data,
bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing) {
+ const u32 block_height_size{1U << block_height};
+ const u32 block_depth_size{1U << block_depth};
if (bytes_per_pixel % 3 != 0 && (width * bytes_per_pixel) % fast_swizzle_align == 0) {
SwizzledData<true>(swizzled_data, unswizzled_data, unswizzle, width, height, depth,
- bytes_per_pixel, out_bytes_per_pixel, block_height, block_depth,
- width_spacing);
+ bytes_per_pixel, out_bytes_per_pixel, block_height_size,
+ block_depth_size, width_spacing);
} else {
SwizzledData<false>(swizzled_data, unswizzled_data, unswizzle, width, height, depth,
- bytes_per_pixel, out_bytes_per_pixel, block_height, block_depth,
- width_spacing);
+ bytes_per_pixel, out_bytes_per_pixel, block_height_size,
+ block_depth_size, width_spacing);
}
}
@@ -248,18 +256,22 @@ std::vector<u8> UnswizzleTexture(u8* address, u32 tile_size_x, u32 tile_size_y,
}
void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height) {
+ u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data,
+ u32 block_height_bit, u32 offset_x, u32 offset_y) {
+ const u32 block_height = 1U << block_height_bit;
const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + (gob_size_x - 1)) /
gob_size_x};
for (u32 line = 0; line < subrect_height; ++line) {
+ const u32 dst_y = line + offset_y;
const u32 gob_address_y =
- (line / (gob_size_y * block_height)) * gob_size * block_height * image_width_in_gobs +
- ((line % (gob_size_y * block_height)) / gob_size_y) * gob_size;
- const auto& table = legacy_swizzle_table[line % gob_size_y];
+ (dst_y / (gob_size_y * block_height)) * gob_size * block_height * image_width_in_gobs +
+ ((dst_y % (gob_size_y * block_height)) / gob_size_y) * gob_size;
+ const auto& table = legacy_swizzle_table[dst_y % gob_size_y];
for (u32 x = 0; x < subrect_width; ++x) {
+ const u32 dst_x = x + offset_x;
const u32 gob_address =
- gob_address_y + (x * bytes_per_pixel / gob_size_x) * gob_size * block_height;
- const u32 swizzled_offset = gob_address + table[(x * bytes_per_pixel) % gob_size_x];
+ gob_address_y + (dst_x * bytes_per_pixel / gob_size_x) * gob_size * block_height;
+ const u32 swizzled_offset = gob_address + table[(dst_x * bytes_per_pixel) % gob_size_x];
u8* source_line = unswizzled_data + line * source_pitch + x * bytes_per_pixel;
u8* dest_addr = swizzled_data + swizzled_offset;
@@ -269,8 +281,9 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
}
void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height,
- u32 offset_x, u32 offset_y) {
+ u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data,
+ u32 block_height_bit, u32 offset_x, u32 offset_y) {
+ const u32 block_height = 1U << block_height_bit;
for (u32 line = 0; line < subrect_height; ++line) {
const u32 y2 = line + offset_y;
const u32 gob_address_y = (y2 / (gob_size_y * block_height)) * gob_size * block_height +
@@ -289,8 +302,9 @@ void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32
}
void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
- const u32 block_height, const std::size_t copy_size, const u8* source_data,
+ const u32 block_height_bit, const std::size_t copy_size, const u8* source_data,
u8* swizzle_data) {
+ const u32 block_height = 1U << block_height_bit;
const u32 image_width_in_gobs{(width + gob_size_x - 1) / gob_size_x};
std::size_t count = 0;
for (std::size_t y = dst_y; y < height && count < copy_size; ++y) {
@@ -356,9 +370,9 @@ std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat
std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height, u32 block_depth) {
if (tiled) {
- const u32 aligned_width = Common::AlignUp(width * bytes_per_pixel, gob_size_x);
- const u32 aligned_height = Common::AlignUp(height, gob_size_y * block_height);
- const u32 aligned_depth = Common::AlignUp(depth, gob_size_z * block_depth);
+ const u32 aligned_width = Common::AlignBits(width * bytes_per_pixel, gob_size_x_shift);
+ const u32 aligned_height = Common::AlignBits(height, gob_size_y_shift + block_height);
+ const u32 aligned_depth = Common::AlignBits(depth, gob_size_z_shift + block_depth);
return aligned_width * aligned_height * aligned_depth;
} else {
return width * height * depth * bytes_per_pixel;
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index e072d8401..f1e3952bc 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -12,8 +12,8 @@ namespace Tegra::Texture {
// GOBSize constant. Calculated by 64 bytes in x multiplied by 8 y coords, represents
// an small rect of (64/bytes_per_pixel)X8.
-inline std::size_t GetGOBSize() {
- return 512;
+inline std::size_t GetGOBSizeShift() {
+ return 9;
}
/// Unswizzles a swizzled texture without changing its format.
@@ -44,7 +44,8 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
/// Copies an untiled subrectangle into a tiled surface.
void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height);
+ u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height,
+ u32 offset_x, u32 offset_y);
/// Copies a tiled subrectangle into a linear surface.
void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h
index 219bfd559..e36bc2c04 100644
--- a/src/video_core/textures/texture.h
+++ b/src/video_core/textures/texture.h
@@ -52,9 +52,9 @@ enum class TextureFormat : u32 {
DXT45 = 0x26,
DXN1 = 0x27,
DXN2 = 0x28,
- Z24S8 = 0x29,
+ S8Z24 = 0x29,
X8Z24 = 0x2a,
- S8Z24 = 0x2b,
+ Z24S8 = 0x2b,
X4V4Z24__COV4R4V = 0x2c,
X4V4Z24__COV8R8V = 0x2d,
V8Z24__COV4R12V = 0x2e,
@@ -172,12 +172,16 @@ struct TICEntry {
BitField<26, 1, u32> use_header_opt_control;
BitField<27, 1, u32> depth_texture;
BitField<28, 4, u32> max_mip_level;
+
+ BitField<0, 16, u32> buffer_high_width_minus_one;
};
union {
BitField<0, 16, u32> width_minus_1;
BitField<22, 1, u32> srgb_conversion;
BitField<23, 4, TextureType> texture_type;
BitField<29, 3, u32> border_size;
+
+ BitField<0, 16, u32> buffer_low_width_minus_one;
};
union {
BitField<0, 16, u32> height_minus_1;
@@ -206,7 +210,10 @@ struct TICEntry {
}
u32 Width() const {
- return width_minus_1 + 1;
+ if (header_version != TICHeaderVersion::OneDBuffer) {
+ return width_minus_1 + 1;
+ }
+ return ((buffer_high_width_minus_one << 16) | buffer_low_width_minus_one) + 1;
}
u32 Height() const {
@@ -219,20 +226,17 @@ struct TICEntry {
u32 BlockWidth() const {
ASSERT(IsTiled());
- // The block height is stored in log2 format.
- return 1 << block_width;
+ return block_width;
}
u32 BlockHeight() const {
ASSERT(IsTiled());
- // The block height is stored in log2 format.
- return 1 << block_height;
+ return block_height;
}
u32 BlockDepth() const {
ASSERT(IsTiled());
- // The block height is stored in log2 format.
- return 1 << block_depth;
+ return block_depth;
}
bool IsTiled() const {
@@ -240,6 +244,15 @@ struct TICEntry {
header_version == TICHeaderVersion::BlockLinearColorKey;
}
+ bool IsLineal() const {
+ return header_version == TICHeaderVersion::Pitch ||
+ header_version == TICHeaderVersion::PitchColorKey;
+ }
+
+ bool IsBuffer() const {
+ return header_version == TICHeaderVersion::OneDBuffer;
+ }
+
bool IsSrgbConversionEnabled() const {
return srgb_conversion != 0;
}