diff options
Diffstat (limited to 'src/video_core')
24 files changed, 1194 insertions, 957 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index a780215c1..3f906a517 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -21,6 +21,8 @@ add_library(video_core STATIC macro_interpreter.h memory_manager.cpp memory_manager.h + morton.cpp + morton.h rasterizer_cache.cpp rasterizer_cache.h rasterizer_interface.h @@ -62,7 +64,6 @@ add_library(video_core STATIC textures/decoders.cpp textures/decoders.h textures/texture.h - utils.h video_core.cpp video_core.h ) diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 9e480dc39..4f137e693 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -389,6 +389,13 @@ public: ReverseSubtract = 3, Min = 4, Max = 5, + + // These values are used by Nouveau and some games. + AddGL = 0x8006, + SubtractGL = 0x8007, + ReverseSubtractGL = 0x8008, + MinGL = 0x800a, + MaxGL = 0x800b }; enum class Factor : u32 { @@ -624,7 +631,16 @@ public: } } zeta; - INSERT_PADDING_WORDS(0x5B); + INSERT_PADDING_WORDS(0x41); + + union { + BitField<0, 4, u32> stencil; + BitField<4, 4, u32> unknown; + BitField<8, 4, u32> scissor; + BitField<12, 4, u32> viewport; + } clear_flags; + + INSERT_PADDING_WORDS(0x19); std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format; @@ -1127,6 +1143,7 @@ ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D7); ASSERT_REG_POSITION(color_mask_common, 0x3E4); ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB); ASSERT_REG_POSITION(zeta, 0x3F8); +ASSERT_REG_POSITION(clear_flags, 0x43E); ASSERT_REG_POSITION(vertex_attrib_format, 0x458); ASSERT_REG_POSITION(rt_control, 0x487); ASSERT_REG_POSITION(zeta_width, 0x48a); diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 5b84bcb24..52d03aee8 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -82,6 +82,8 @@ union Attribute { Position = 7, Attribute_0 = 8, Attribute_31 = 39, + ClipDistances0123 = 44, + ClipDistances4567 = 45, PointCoord = 46, // This attribute contains a tuple of (~, ~, InstanceId, VertexId) when inside a vertex // shader, and a tuple of (TessCoord.x, TessCoord.y, TessCoord.z, ~) when inside a Tess Eval @@ -153,6 +155,7 @@ enum class PredCondition : u64 { NotEqual = 5, GreaterEqual = 6, LessThanWithNan = 9, + LessEqualWithNan = 11, GreaterThanWithNan = 12, NotEqualWithNan = 13, GreaterEqualWithNan = 14, @@ -261,7 +264,7 @@ enum class FlowCondition : u64 { Fcsm_Tr = 0x1C, // TODO(bunnei): What is this used for? }; -enum class ControlCode : u64 { +enum class ConditionCode : u64 { F = 0, LT = 1, EQ = 2, @@ -574,7 +577,6 @@ union Instruction { BitField<39, 2, u64> tab5cb8_2; BitField<41, 3, u64> tab5c68_1; BitField<44, 2, u64> tab5c68_0; - BitField<47, 1, u64> cc; BitField<48, 1, u64> negate_b; } fmul; @@ -836,7 +838,7 @@ union Instruction { union { BitField<0, 3, u64> pred0; BitField<3, 3, u64> pred3; - BitField<8, 5, ControlCode> cc; // flag in cc + BitField<8, 5, ConditionCode> cc; // flag in cc BitField<39, 3, u64> pred39; BitField<42, 1, u64> neg_pred39; BitField<45, 4, PredOperation> op; // op with pred39 @@ -1246,7 +1248,7 @@ union Instruction { BitField<60, 1, u64> is_b_gpr; BitField<59, 1, u64> is_c_gpr; BitField<20, 24, s64> smem_imm; - BitField<0, 5, ControlCode> flow_control_code; + BitField<0, 5, ConditionCode> flow_condition_code; Attribute attribute; Sampler sampler; diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h index a0e015c4b..99c34649f 100644 --- a/src/video_core/engines/shader_header.h +++ b/src/video_core/engines/shader_header.h @@ -62,7 +62,16 @@ struct Header { INSERT_PADDING_BYTES(1); // ImapSystemValuesB INSERT_PADDING_BYTES(16); // ImapGenericVector[32] INSERT_PADDING_BYTES(2); // ImapColor - INSERT_PADDING_BYTES(2); // ImapSystemValuesC + union { + BitField<0, 8, u16> clip_distances; + BitField<8, 1, u16> point_sprite_s; + BitField<9, 1, u16> point_sprite_t; + BitField<10, 1, u16> fog_coordinate; + BitField<12, 1, u16> tessellation_eval_point_u; + BitField<13, 1, u16> tessellation_eval_point_v; + BitField<14, 1, u16> instance_id; + BitField<15, 1, u16> vertex_id; + }; INSERT_PADDING_BYTES(5); // ImapFixedFncTexture[10] INSERT_PADDING_BYTES(1); // ImapReserved INSERT_PADDING_BYTES(3); // OmapSystemValuesA diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 83c7e5b0b..51b3904f6 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -17,6 +17,8 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) { switch (format) { case PixelFormat::ABGR8: return 4; + default: + return 4; } UNREACHABLE(); diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro_interpreter.cpp index 335a8d407..2b0dea5cd 100644 --- a/src/video_core/macro_interpreter.cpp +++ b/src/video_core/macro_interpreter.cpp @@ -35,6 +35,7 @@ void MacroInterpreter::Reset() { // The next parameter index starts at 1, because $r1 already has the value of the first // parameter. next_parameter_index = 1; + carry_flag = false; } bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) { @@ -135,14 +136,28 @@ MacroInterpreter::Opcode MacroInterpreter::GetOpcode(u32 offset) const { return {macro_memory[offset + pc / sizeof(u32)]}; } -u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b) const { +u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b) { switch (operation) { - case ALUOperation::Add: - return src_a + src_b; - // TODO(Subv): Implement AddWithCarry - case ALUOperation::Subtract: - return src_a - src_b; - // TODO(Subv): Implement SubtractWithBorrow + case ALUOperation::Add: { + const u64 result{static_cast<u64>(src_a) + src_b}; + carry_flag = result > 0xffffffff; + return static_cast<u32>(result); + } + case ALUOperation::AddWithCarry: { + const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)}; + carry_flag = result > 0xffffffff; + return static_cast<u32>(result); + } + case ALUOperation::Subtract: { + const u64 result{static_cast<u64>(src_a) - src_b}; + carry_flag = result < 0x100000000; + return static_cast<u32>(result); + } + case ALUOperation::SubtractWithBorrow: { + const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)}; + carry_flag = result < 0x100000000; + return static_cast<u32>(result); + } case ALUOperation::Xor: return src_a ^ src_b; case ALUOperation::Or: diff --git a/src/video_core/macro_interpreter.h b/src/video_core/macro_interpreter.h index 62d1ce289..cde360288 100644 --- a/src/video_core/macro_interpreter.h +++ b/src/video_core/macro_interpreter.h @@ -117,7 +117,7 @@ private: bool Step(u32 offset, bool is_delay_slot); /// Calculates the result of an ALU operation. src_a OP src_b; - u32 GetALUResult(ALUOperation operation, u32 src_a, u32 src_b) const; + u32 GetALUResult(ALUOperation operation, u32 src_a, u32 src_b); /// Performs the result operation on the input result and stores it in the specified register /// (if necessary). @@ -165,5 +165,7 @@ private: std::vector<u32> parameters; /// Index of the next parameter that will be fetched by the 'parm' instruction. u32 next_parameter_index = 0; + + bool carry_flag{}; }; } // namespace Tegra diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 77a20bb84..47247f097 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -9,6 +9,13 @@ namespace Tegra { +MemoryManager::MemoryManager() { + // Mark the first page as reserved, so that 0 is not a valid GPUVAddr. Otherwise, games might + // try to use 0 as a valid address, which is also used to mean nullptr. This fixes a bug with + // Undertale using 0 for a render target. + PageSlot(0) = static_cast<u64>(PageStatus::Reserved); +} + GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { const std::optional<GPUVAddr> gpu_addr{FindFreeBlock(0, size, align, PageStatus::Unmapped)}; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 4eb338aa2..fb03497ca 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -18,7 +18,7 @@ using GPUVAddr = u64; class MemoryManager final { public: - MemoryManager() = default; + MemoryManager(); GPUVAddr AllocateSpace(u64 size, u64 align); GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align); @@ -37,6 +37,7 @@ private: enum class PageStatus : u64 { Unmapped = 0xFFFFFFFFFFFFFFFFULL, Allocated = 0xFFFFFFFFFFFFFFFEULL, + Reserved = 0xFFFFFFFFFFFFFFFDULL, }; std::optional<GPUVAddr> FindFreeBlock(GPUVAddr region_start, u64 size, u64 align, diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp new file mode 100644 index 000000000..f14abba7d --- /dev/null +++ b/src/video_core/morton.cpp @@ -0,0 +1,353 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <array> +#include <cstring> +#include "common/assert.h" +#include "common/common_types.h" +#include "core/memory.h" +#include "video_core/morton.h" +#include "video_core/surface.h" +#include "video_core/textures/decoders.h" + +namespace VideoCore { + +using Surface::GetBytesPerPixel; +using Surface::PixelFormat; + +using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, VAddr); +using ConversionArray = std::array<MortonCopyFn, Surface::MaxPixelFormat>; + +template <bool morton_to_linear, PixelFormat format> +static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, + u8* buffer, std::size_t buffer_size, VAddr addr) { + constexpr u32 bytes_per_pixel = GetBytesPerPixel(format); + + // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual + // pixel values. + const u32 tile_size_x{GetDefaultBlockWidth(format)}; + const u32 tile_size_y{GetDefaultBlockHeight(format)}; + + if constexpr (morton_to_linear) { + Tegra::Texture::UnswizzleTexture(buffer, addr, tile_size_x, tile_size_y, bytes_per_pixel, + stride, height, depth, block_height, block_depth); + } else { + Tegra::Texture::CopySwizzledData((stride + tile_size_x - 1) / tile_size_x, + (height + tile_size_y - 1) / tile_size_y, depth, + bytes_per_pixel, bytes_per_pixel, Memory::GetPointer(addr), + buffer, false, block_height, block_depth); + } +} + +static constexpr ConversionArray morton_to_linear_fns = { + // clang-format off + MortonCopy<true, PixelFormat::ABGR8U>, + MortonCopy<true, PixelFormat::ABGR8S>, + MortonCopy<true, PixelFormat::ABGR8UI>, + MortonCopy<true, PixelFormat::B5G6R5U>, + MortonCopy<true, PixelFormat::A2B10G10R10U>, + MortonCopy<true, PixelFormat::A1B5G5R5U>, + MortonCopy<true, PixelFormat::R8U>, + MortonCopy<true, PixelFormat::R8UI>, + MortonCopy<true, PixelFormat::RGBA16F>, + MortonCopy<true, PixelFormat::RGBA16U>, + MortonCopy<true, PixelFormat::RGBA16UI>, + MortonCopy<true, PixelFormat::R11FG11FB10F>, + MortonCopy<true, PixelFormat::RGBA32UI>, + MortonCopy<true, PixelFormat::DXT1>, + MortonCopy<true, PixelFormat::DXT23>, + MortonCopy<true, PixelFormat::DXT45>, + MortonCopy<true, PixelFormat::DXN1>, + MortonCopy<true, PixelFormat::DXN2UNORM>, + MortonCopy<true, PixelFormat::DXN2SNORM>, + MortonCopy<true, PixelFormat::BC7U>, + MortonCopy<true, PixelFormat::BC6H_UF16>, + MortonCopy<true, PixelFormat::BC6H_SF16>, + MortonCopy<true, PixelFormat::ASTC_2D_4X4>, + MortonCopy<true, PixelFormat::G8R8U>, + MortonCopy<true, PixelFormat::G8R8S>, + MortonCopy<true, PixelFormat::BGRA8>, + MortonCopy<true, PixelFormat::RGBA32F>, + MortonCopy<true, PixelFormat::RG32F>, + MortonCopy<true, PixelFormat::R32F>, + MortonCopy<true, PixelFormat::R16F>, + MortonCopy<true, PixelFormat::R16U>, + MortonCopy<true, PixelFormat::R16S>, + MortonCopy<true, PixelFormat::R16UI>, + MortonCopy<true, PixelFormat::R16I>, + MortonCopy<true, PixelFormat::RG16>, + MortonCopy<true, PixelFormat::RG16F>, + MortonCopy<true, PixelFormat::RG16UI>, + MortonCopy<true, PixelFormat::RG16I>, + MortonCopy<true, PixelFormat::RG16S>, + MortonCopy<true, PixelFormat::RGB32F>, + MortonCopy<true, PixelFormat::RGBA8_SRGB>, + MortonCopy<true, PixelFormat::RG8U>, + MortonCopy<true, PixelFormat::RG8S>, + MortonCopy<true, PixelFormat::RG32UI>, + MortonCopy<true, PixelFormat::R32UI>, + MortonCopy<true, PixelFormat::ASTC_2D_8X8>, + MortonCopy<true, PixelFormat::ASTC_2D_8X5>, + MortonCopy<true, PixelFormat::ASTC_2D_5X4>, + MortonCopy<true, PixelFormat::BGRA8_SRGB>, + MortonCopy<true, PixelFormat::DXT1_SRGB>, + MortonCopy<true, PixelFormat::DXT23_SRGB>, + MortonCopy<true, PixelFormat::DXT45_SRGB>, + MortonCopy<true, PixelFormat::BC7U_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_5X5>, + MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>, + MortonCopy<true, PixelFormat::ASTC_2D_10X8>, + MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>, + MortonCopy<true, PixelFormat::Z32F>, + MortonCopy<true, PixelFormat::Z16>, + MortonCopy<true, PixelFormat::Z24S8>, + MortonCopy<true, PixelFormat::S8Z24>, + MortonCopy<true, PixelFormat::Z32FS8>, + // clang-format on +}; + +static constexpr ConversionArray linear_to_morton_fns = { + // clang-format off + MortonCopy<false, PixelFormat::ABGR8U>, + MortonCopy<false, PixelFormat::ABGR8S>, + MortonCopy<false, PixelFormat::ABGR8UI>, + MortonCopy<false, PixelFormat::B5G6R5U>, + MortonCopy<false, PixelFormat::A2B10G10R10U>, + MortonCopy<false, PixelFormat::A1B5G5R5U>, + MortonCopy<false, PixelFormat::R8U>, + MortonCopy<false, PixelFormat::R8UI>, + MortonCopy<false, PixelFormat::RGBA16F>, + MortonCopy<false, PixelFormat::RGBA16U>, + MortonCopy<false, PixelFormat::RGBA16UI>, + MortonCopy<false, PixelFormat::R11FG11FB10F>, + MortonCopy<false, PixelFormat::RGBA32UI>, + MortonCopy<false, PixelFormat::DXT1>, + MortonCopy<false, PixelFormat::DXT23>, + MortonCopy<false, PixelFormat::DXT45>, + MortonCopy<false, PixelFormat::DXN1>, + MortonCopy<false, PixelFormat::DXN2UNORM>, + MortonCopy<false, PixelFormat::DXN2SNORM>, + MortonCopy<false, PixelFormat::BC7U>, + MortonCopy<false, PixelFormat::BC6H_UF16>, + MortonCopy<false, PixelFormat::BC6H_SF16>, + // TODO(Subv): Swizzling ASTC formats are not supported + nullptr, + MortonCopy<false, PixelFormat::G8R8U>, + MortonCopy<false, PixelFormat::G8R8S>, + MortonCopy<false, PixelFormat::BGRA8>, + MortonCopy<false, PixelFormat::RGBA32F>, + MortonCopy<false, PixelFormat::RG32F>, + MortonCopy<false, PixelFormat::R32F>, + MortonCopy<false, PixelFormat::R16F>, + MortonCopy<false, PixelFormat::R16U>, + MortonCopy<false, PixelFormat::R16S>, + MortonCopy<false, PixelFormat::R16UI>, + MortonCopy<false, PixelFormat::R16I>, + MortonCopy<false, PixelFormat::RG16>, + MortonCopy<false, PixelFormat::RG16F>, + MortonCopy<false, PixelFormat::RG16UI>, + MortonCopy<false, PixelFormat::RG16I>, + MortonCopy<false, PixelFormat::RG16S>, + MortonCopy<false, PixelFormat::RGB32F>, + MortonCopy<false, PixelFormat::RGBA8_SRGB>, + MortonCopy<false, PixelFormat::RG8U>, + MortonCopy<false, PixelFormat::RG8S>, + MortonCopy<false, PixelFormat::RG32UI>, + MortonCopy<false, PixelFormat::R32UI>, + nullptr, + nullptr, + nullptr, + MortonCopy<false, PixelFormat::BGRA8_SRGB>, + MortonCopy<false, PixelFormat::DXT1_SRGB>, + MortonCopy<false, PixelFormat::DXT23_SRGB>, + MortonCopy<false, PixelFormat::DXT45_SRGB>, + MortonCopy<false, PixelFormat::BC7U_SRGB>, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + MortonCopy<false, PixelFormat::Z32F>, + MortonCopy<false, PixelFormat::Z16>, + MortonCopy<false, PixelFormat::Z24S8>, + MortonCopy<false, PixelFormat::S8Z24>, + MortonCopy<false, PixelFormat::Z32FS8>, + // clang-format on +}; + +constexpr MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFormat format) { + switch (mode) { + case MortonSwizzleMode::MortonToLinear: + return morton_to_linear_fns[static_cast<std::size_t>(format)]; + case MortonSwizzleMode::LinearToMorton: + return linear_to_morton_fns[static_cast<std::size_t>(format)]; + } +} + +/// 8x8 Z-Order coordinate from 2D coordinates +static u32 MortonInterleave(u32 x, u32 y) { + static const u32 xlut[] = {0x00, 0x01, 0x04, 0x05, 0x10, 0x11, 0x14, 0x15}; + static const u32 ylut[] = {0x00, 0x02, 0x08, 0x0a, 0x20, 0x22, 0x28, 0x2a}; + return xlut[x % 8] + ylut[y % 8]; +} + +/// Calculates the offset of the position of the pixel in Morton order +static u32 GetMortonOffset(u32 x, u32 y, u32 bytes_per_pixel) { + // Images are split into 8x8 tiles. Each tile is composed of four 4x4 subtiles each + // of which is composed of four 2x2 subtiles each of which is composed of four texels. + // Each structure is embedded into the next-bigger one in a diagonal pattern, e.g. + // texels are laid out in a 2x2 subtile like this: + // 2 3 + // 0 1 + // + // The full 8x8 tile has the texels arranged like this: + // + // 42 43 46 47 58 59 62 63 + // 40 41 44 45 56 57 60 61 + // 34 35 38 39 50 51 54 55 + // 32 33 36 37 48 49 52 53 + // 10 11 14 15 26 27 30 31 + // 08 09 12 13 24 25 28 29 + // 02 03 06 07 18 19 22 23 + // 00 01 04 05 16 17 20 21 + // + // This pattern is what's called Z-order curve, or Morton order. + + const unsigned int block_height = 8; + const unsigned int coarse_x = x & ~7; + + u32 i = MortonInterleave(x, y); + + const unsigned int offset = coarse_x * block_height; + + return (i + offset) * bytes_per_pixel; +} + +static u32 MortonInterleave128(u32 x, u32 y) { + // 128x128 Z-Order coordinate from 2D coordinates + static constexpr u32 xlut[] = { + 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, + 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, + 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, + 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, + 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, + 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, + 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, + 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, + 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, + 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, + 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, + 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, + 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, + 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, + 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, + 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, + 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, + 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, + 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, + 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, + 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, + 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, + 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, + 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, + 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, + 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, + 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, + 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, + 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, + 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, + 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, + 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, + 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, + 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, + 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, + }; + static constexpr u32 ylut[] = { + 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, + 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, + 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, + 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, + 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, + 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, + 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, + 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, + 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, + 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, + 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, + 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, + 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, + 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, + 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, + 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, + 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, + 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, + 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, + 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, + 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, + 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, + 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, + 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, + 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, + 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, + 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, + 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, + 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, + 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, + 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, + 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, + 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, + 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, + 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, + }; + return xlut[x % 128] + ylut[y % 128]; +} + +static u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) { + // Calculates the offset of the position of the pixel in Morton order + // Framebuffer images are split into 128x128 tiles. + + constexpr u32 block_height = 128; + const u32 coarse_x = x & ~127; + + const u32 i = MortonInterleave128(x, y); + + const u32 offset = coarse_x * block_height; + + return (i + offset) * bytes_per_pixel; +} + +void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride, + u32 block_height, u32 height, u32 block_depth, u32 depth, u8* buffer, + std::size_t buffer_size, VAddr addr) { + + GetSwizzleFunction(mode, format)(stride, block_height, height, block_depth, depth, buffer, + buffer_size, addr); +} + +void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel, + u8* morton_data, u8* linear_data, bool morton_to_linear) { + u8* data_ptrs[2]; + for (u32 y = 0; y < height; ++y) { + for (u32 x = 0; x < width; ++x) { + const u32 coarse_y = y & ~127; + const u32 morton_offset = + GetMortonOffset128(x, y, bytes_per_pixel) + coarse_y * width * bytes_per_pixel; + const u32 linear_pixel_index = (x + y * width) * linear_bytes_per_pixel; + + data_ptrs[morton_to_linear ? 1 : 0] = morton_data + morton_offset; + data_ptrs[morton_to_linear ? 0 : 1] = &linear_data[linear_pixel_index]; + + std::memcpy(data_ptrs[0], data_ptrs[1], bytes_per_pixel); + } + } +} + +} // namespace VideoCore
\ No newline at end of file diff --git a/src/video_core/morton.h b/src/video_core/morton.h new file mode 100644 index 000000000..b9b9eca86 --- /dev/null +++ b/src/video_core/morton.h @@ -0,0 +1,21 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "video_core/surface.h" + +namespace VideoCore { + +enum class MortonSwizzleMode { MortonToLinear, LinearToMorton }; + +void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat format, u32 stride, + u32 block_height, u32 height, u32 block_depth, u32 depth, u8* buffer, + std::size_t buffer_size, VAddr addr); + +void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel, + u8* morton_data, u8* linear_data, bool morton_to_linear); + +} // namespace VideoCore
\ No newline at end of file diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index ae6aaee4c..630a58e49 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -98,14 +98,9 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, ScreenInfo has_ARB_direct_state_access = true; } else if (extension == "GL_ARB_multi_bind") { has_ARB_multi_bind = true; - } else if (extension == "GL_ARB_separate_shader_objects") { - has_ARB_separate_shader_objects = true; - } else if (extension == "GL_ARB_vertex_attrib_binding") { - has_ARB_vertex_attrib_binding = true; } } - ASSERT_MSG(has_ARB_separate_shader_objects, "has_ARB_separate_shader_objects is unsupported"); OpenGLState::ApplyDefaultState(); // Create render framebuffer @@ -542,6 +537,30 @@ void RasterizerOpenGL::Clear() { ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear stencil but buffer is not enabled!"); use_stencil = true; clear_state.stencil.test_enabled = true; + if (regs.clear_flags.stencil) { + // Stencil affects the clear so fill it with the used masks + clear_state.stencil.front.test_func = GL_ALWAYS; + clear_state.stencil.front.test_mask = regs.stencil_front_func_mask; + clear_state.stencil.front.action_stencil_fail = GL_KEEP; + clear_state.stencil.front.action_depth_fail = GL_KEEP; + clear_state.stencil.front.action_depth_pass = GL_KEEP; + clear_state.stencil.front.write_mask = regs.stencil_front_mask; + if (regs.stencil_two_side_enable) { + clear_state.stencil.back.test_func = GL_ALWAYS; + clear_state.stencil.back.test_mask = regs.stencil_back_func_mask; + clear_state.stencil.back.action_stencil_fail = GL_KEEP; + clear_state.stencil.back.action_depth_fail = GL_KEEP; + clear_state.stencil.back.action_depth_pass = GL_KEEP; + clear_state.stencil.back.write_mask = regs.stencil_back_mask; + } else { + clear_state.stencil.back.test_func = GL_ALWAYS; + clear_state.stencil.back.test_mask = 0xFFFFFFFF; + clear_state.stencil.back.write_mask = 0xFFFFFFFF; + clear_state.stencil.back.action_stencil_fail = GL_KEEP; + clear_state.stencil.back.action_depth_fail = GL_KEEP; + clear_state.stencil.back.action_depth_pass = GL_KEEP; + } + } } if (!use_color && !use_depth && !use_stencil) { @@ -553,6 +572,14 @@ void RasterizerOpenGL::Clear() { ConfigureFramebuffers(clear_state, use_color, use_depth || use_stencil, false, regs.clear_buffers.RT.Value()); + if (regs.clear_flags.scissor) { + SyncScissorTest(clear_state); + } + + if (regs.clear_flags.viewport) { + clear_state.EmulateViewportWithScissor(); + } + clear_state.Apply(); if (use_color) { @@ -588,7 +615,7 @@ void RasterizerOpenGL::DrawArrays() { SyncLogicOpState(); SyncCullMode(); SyncPrimitiveRestart(); - SyncScissorTest(); + SyncScissorTest(state); // Alpha Testing is synced on shaders. SyncTransformFeedback(); SyncPointState(); @@ -815,7 +842,7 @@ void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntr } const u32 bias = config.mip_lod_bias.Value(); // Sign extend the 13-bit value. - const u32 mask = 1U << (13 - 1); + constexpr u32 mask = 1U << (13 - 1); const float bias_lod = static_cast<s32>((bias ^ mask) - mask) / 256.f; if (lod_bias != bias_lod) { lod_bias = bias_lod; @@ -947,8 +974,8 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { auto& viewport = current_state.viewports[i]; viewport.x = viewport_rect.left; viewport.y = viewport_rect.bottom; - viewport.width = static_cast<GLfloat>(viewport_rect.GetWidth()); - viewport.height = static_cast<GLfloat>(viewport_rect.GetHeight()); + viewport.width = viewport_rect.GetWidth(); + viewport.height = viewport_rect.GetHeight(); viewport.depth_range_far = regs.viewports[i].depth_range_far; viewport.depth_range_near = regs.viewports[i].depth_range_near; } @@ -1120,11 +1147,11 @@ void RasterizerOpenGL::SyncLogicOpState() { state.logic_op.operation = MaxwellToGL::LogicOp(regs.logic_op.operation); } -void RasterizerOpenGL::SyncScissorTest() { +void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) { const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumViewports; i++) { const auto& src = regs.scissor_test[i]; - auto& dst = state.viewports[i].scissor; + auto& dst = current_state.viewports[i].scissor; dst.enabled = (src.enable != 0); if (dst.enabled == 0) { return; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 6e78ab4cd..f4354289c 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -91,19 +91,20 @@ private: void SyncWithConfig(const Tegra::Texture::TSCEntry& info); private: - Tegra::Texture::TextureFilter mag_filter; - Tegra::Texture::TextureFilter min_filter; - Tegra::Texture::TextureMipmapFilter mip_filter; - Tegra::Texture::WrapMode wrap_u; - Tegra::Texture::WrapMode wrap_v; - Tegra::Texture::WrapMode wrap_p; - bool uses_depth_compare; - Tegra::Texture::DepthCompareFunc depth_compare_func; - GLvec4 border_color; - float min_lod; - float max_lod; - float lod_bias; - float max_anisotropic; + Tegra::Texture::TextureFilter mag_filter = Tegra::Texture::TextureFilter::Nearest; + Tegra::Texture::TextureFilter min_filter = Tegra::Texture::TextureFilter::Nearest; + Tegra::Texture::TextureMipmapFilter mip_filter = Tegra::Texture::TextureMipmapFilter::None; + Tegra::Texture::WrapMode wrap_u = Tegra::Texture::WrapMode::ClampToEdge; + Tegra::Texture::WrapMode wrap_v = Tegra::Texture::WrapMode::ClampToEdge; + Tegra::Texture::WrapMode wrap_p = Tegra::Texture::WrapMode::ClampToEdge; + bool uses_depth_compare = false; + Tegra::Texture::DepthCompareFunc depth_compare_func = + Tegra::Texture::DepthCompareFunc::Always; + GLvec4 border_color = {}; + float min_lod = 0.0f; + float max_lod = 16.0f; + float lod_bias = 0.0f; + float max_anisotropic = 1.0f; }; /** @@ -171,7 +172,7 @@ private: void SyncMultiSampleState(); /// Syncs the scissor test state to match the guest state - void SyncScissorTest(); + void SyncScissorTest(OpenGLState& current_state); /// Syncs the transform feedback state to match the guest state void SyncTransformFeedback(); @@ -187,8 +188,6 @@ private: bool has_ARB_direct_state_access = false; bool has_ARB_multi_bind = false; - bool has_ARB_separate_shader_objects = false; - bool has_ARB_vertex_attrib_binding = false; OpenGLState state; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 9ca82c06c..d458f77e4 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -15,6 +15,7 @@ #include "core/memory.h" #include "core/settings.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/morton.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_rasterizer_cache.h" #include "video_core/renderer_opengl/gl_state.h" @@ -22,10 +23,11 @@ #include "video_core/surface.h" #include "video_core/textures/astc.h" #include "video_core/textures/decoders.h" -#include "video_core/utils.h" namespace OpenGL { +using VideoCore::MortonSwizzle; +using VideoCore::MortonSwizzleMode; using VideoCore::Surface::ComponentTypeFromDepthFormat; using VideoCore::Surface::ComponentTypeFromRenderTarget; using VideoCore::Surface::ComponentTypeFromTexture; @@ -265,11 +267,11 @@ static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXN2UNORM {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM - {GL_COMPRESSED_RGBA_BPTC_UNORM_ARB, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, + {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // BC7U - {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, - ComponentType::Float, true}, // BC6H_UF16 - {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float, + {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float, + true}, // BC6H_UF16 + {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float, true}, // BC6H_SF16 {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4 {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // G8R8U @@ -306,8 +308,8 @@ static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex true}, // DXT23_SRGB {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXT45_SRGB - {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, - ComponentType::UNorm, true}, // BC7U_SRGB + {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, + true}, // BC7U_SRGB {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB @@ -346,7 +348,7 @@ static GLenum SurfaceTargetToGL(SurfaceTarget target) { case SurfaceTarget::TextureCubemap: return GL_TEXTURE_CUBE_MAP; case SurfaceTarget::TextureCubeArray: - return GL_TEXTURE_CUBE_MAP_ARRAY_ARB; + return GL_TEXTURE_CUBE_MAP_ARRAY; } LOG_CRITICAL(Render_OpenGL, "Unimplemented texture target={}", static_cast<u32>(target)); UNREACHABLE(); @@ -370,174 +372,7 @@ MathUtil::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const { return {0, actual_height, MipWidth(mip_level), 0}; } -template <bool morton_to_gl, PixelFormat format> -void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, u8* gl_buffer, - std::size_t gl_buffer_size, VAddr addr) { - constexpr u32 bytes_per_pixel = GetBytesPerPixel(format); - - // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual - // pixel values. - const u32 tile_size_x{GetDefaultBlockWidth(format)}; - const u32 tile_size_y{GetDefaultBlockHeight(format)}; - - if (morton_to_gl) { - Tegra::Texture::UnswizzleTexture(gl_buffer, addr, tile_size_x, tile_size_y, bytes_per_pixel, - stride, height, depth, block_height, block_depth); - } else { - Tegra::Texture::CopySwizzledData((stride + tile_size_x - 1) / tile_size_x, - (height + tile_size_y - 1) / tile_size_y, depth, - bytes_per_pixel, bytes_per_pixel, Memory::GetPointer(addr), - gl_buffer, false, block_height, block_depth); - } -} - -using GLConversionArray = std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, VAddr), - VideoCore::Surface::MaxPixelFormat>; - -static constexpr GLConversionArray morton_to_gl_fns = { - // clang-format off - MortonCopy<true, PixelFormat::ABGR8U>, - MortonCopy<true, PixelFormat::ABGR8S>, - MortonCopy<true, PixelFormat::ABGR8UI>, - MortonCopy<true, PixelFormat::B5G6R5U>, - MortonCopy<true, PixelFormat::A2B10G10R10U>, - MortonCopy<true, PixelFormat::A1B5G5R5U>, - MortonCopy<true, PixelFormat::R8U>, - MortonCopy<true, PixelFormat::R8UI>, - MortonCopy<true, PixelFormat::RGBA16F>, - MortonCopy<true, PixelFormat::RGBA16U>, - MortonCopy<true, PixelFormat::RGBA16UI>, - MortonCopy<true, PixelFormat::R11FG11FB10F>, - MortonCopy<true, PixelFormat::RGBA32UI>, - MortonCopy<true, PixelFormat::DXT1>, - MortonCopy<true, PixelFormat::DXT23>, - MortonCopy<true, PixelFormat::DXT45>, - MortonCopy<true, PixelFormat::DXN1>, - MortonCopy<true, PixelFormat::DXN2UNORM>, - MortonCopy<true, PixelFormat::DXN2SNORM>, - MortonCopy<true, PixelFormat::BC7U>, - MortonCopy<true, PixelFormat::BC6H_UF16>, - MortonCopy<true, PixelFormat::BC6H_SF16>, - MortonCopy<true, PixelFormat::ASTC_2D_4X4>, - MortonCopy<true, PixelFormat::G8R8U>, - MortonCopy<true, PixelFormat::G8R8S>, - MortonCopy<true, PixelFormat::BGRA8>, - MortonCopy<true, PixelFormat::RGBA32F>, - MortonCopy<true, PixelFormat::RG32F>, - MortonCopy<true, PixelFormat::R32F>, - MortonCopy<true, PixelFormat::R16F>, - MortonCopy<true, PixelFormat::R16U>, - MortonCopy<true, PixelFormat::R16S>, - MortonCopy<true, PixelFormat::R16UI>, - MortonCopy<true, PixelFormat::R16I>, - MortonCopy<true, PixelFormat::RG16>, - MortonCopy<true, PixelFormat::RG16F>, - MortonCopy<true, PixelFormat::RG16UI>, - MortonCopy<true, PixelFormat::RG16I>, - MortonCopy<true, PixelFormat::RG16S>, - MortonCopy<true, PixelFormat::RGB32F>, - MortonCopy<true, PixelFormat::RGBA8_SRGB>, - MortonCopy<true, PixelFormat::RG8U>, - MortonCopy<true, PixelFormat::RG8S>, - MortonCopy<true, PixelFormat::RG32UI>, - MortonCopy<true, PixelFormat::R32UI>, - MortonCopy<true, PixelFormat::ASTC_2D_8X8>, - MortonCopy<true, PixelFormat::ASTC_2D_8X5>, - MortonCopy<true, PixelFormat::ASTC_2D_5X4>, - MortonCopy<true, PixelFormat::BGRA8_SRGB>, - MortonCopy<true, PixelFormat::DXT1_SRGB>, - MortonCopy<true, PixelFormat::DXT23_SRGB>, - MortonCopy<true, PixelFormat::DXT45_SRGB>, - MortonCopy<true, PixelFormat::BC7U_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_5X5>, - MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>, - MortonCopy<true, PixelFormat::ASTC_2D_10X8>, - MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>, - MortonCopy<true, PixelFormat::Z32F>, - MortonCopy<true, PixelFormat::Z16>, - MortonCopy<true, PixelFormat::Z24S8>, - MortonCopy<true, PixelFormat::S8Z24>, - MortonCopy<true, PixelFormat::Z32FS8>, - // clang-format on -}; - -static constexpr GLConversionArray gl_to_morton_fns = { - // clang-format off - MortonCopy<false, PixelFormat::ABGR8U>, - MortonCopy<false, PixelFormat::ABGR8S>, - MortonCopy<false, PixelFormat::ABGR8UI>, - MortonCopy<false, PixelFormat::B5G6R5U>, - MortonCopy<false, PixelFormat::A2B10G10R10U>, - MortonCopy<false, PixelFormat::A1B5G5R5U>, - MortonCopy<false, PixelFormat::R8U>, - MortonCopy<false, PixelFormat::R8UI>, - MortonCopy<false, PixelFormat::RGBA16F>, - MortonCopy<false, PixelFormat::RGBA16U>, - MortonCopy<false, PixelFormat::RGBA16UI>, - MortonCopy<false, PixelFormat::R11FG11FB10F>, - MortonCopy<false, PixelFormat::RGBA32UI>, - MortonCopy<false, PixelFormat::DXT1>, - MortonCopy<false, PixelFormat::DXT23>, - MortonCopy<false, PixelFormat::DXT45>, - MortonCopy<false, PixelFormat::DXN1>, - MortonCopy<false, PixelFormat::DXN2UNORM>, - MortonCopy<false, PixelFormat::DXN2SNORM>, - MortonCopy<false, PixelFormat::BC7U>, - MortonCopy<false, PixelFormat::BC6H_UF16>, - MortonCopy<false, PixelFormat::BC6H_SF16>, - // TODO(Subv): Swizzling ASTC formats are not supported - nullptr, - MortonCopy<false, PixelFormat::G8R8U>, - MortonCopy<false, PixelFormat::G8R8S>, - MortonCopy<false, PixelFormat::BGRA8>, - MortonCopy<false, PixelFormat::RGBA32F>, - MortonCopy<false, PixelFormat::RG32F>, - MortonCopy<false, PixelFormat::R32F>, - MortonCopy<false, PixelFormat::R16F>, - MortonCopy<false, PixelFormat::R16U>, - MortonCopy<false, PixelFormat::R16S>, - MortonCopy<false, PixelFormat::R16UI>, - MortonCopy<false, PixelFormat::R16I>, - MortonCopy<false, PixelFormat::RG16>, - MortonCopy<false, PixelFormat::RG16F>, - MortonCopy<false, PixelFormat::RG16UI>, - MortonCopy<false, PixelFormat::RG16I>, - MortonCopy<false, PixelFormat::RG16S>, - MortonCopy<false, PixelFormat::RGB32F>, - MortonCopy<false, PixelFormat::RGBA8_SRGB>, - MortonCopy<false, PixelFormat::RG8U>, - MortonCopy<false, PixelFormat::RG8S>, - MortonCopy<false, PixelFormat::RG32UI>, - MortonCopy<false, PixelFormat::R32UI>, - nullptr, - nullptr, - nullptr, - MortonCopy<false, PixelFormat::BGRA8_SRGB>, - MortonCopy<false, PixelFormat::DXT1_SRGB>, - MortonCopy<false, PixelFormat::DXT23_SRGB>, - MortonCopy<false, PixelFormat::DXT45_SRGB>, - MortonCopy<false, PixelFormat::BC7U_SRGB>, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - MortonCopy<false, PixelFormat::Z32F>, - MortonCopy<false, PixelFormat::Z16>, - MortonCopy<false, PixelFormat::Z24S8>, - MortonCopy<false, PixelFormat::S8Z24>, - MortonCopy<false, PixelFormat::Z32FS8>, - // clang-format on -}; - -void SwizzleFunc(const GLConversionArray& functions, const SurfaceParams& params, +void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params, std::vector<u8>& gl_buffer, u32 mip_level) { u32 depth = params.MipDepth(mip_level); if (params.target == SurfaceTarget::Texture2D) { @@ -550,19 +385,19 @@ void SwizzleFunc(const GLConversionArray& functions, const SurfaceParams& params const u64 layer_size = params.LayerMemorySize(); const u64 gl_size = params.LayerSizeGL(mip_level); for (u32 i = 0; i < params.depth; i++) { - functions[static_cast<std::size_t>(params.pixel_format)]( - params.MipWidth(mip_level), params.MipBlockHeight(mip_level), - params.MipHeight(mip_level), params.MipBlockDepth(mip_level), 1, - gl_buffer.data() + offset_gl, gl_size, params.addr + offset); + MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), + params.MipBlockHeight(mip_level), params.MipHeight(mip_level), + params.MipBlockDepth(mip_level), 1, gl_buffer.data() + offset_gl, gl_size, + params.addr + offset); offset += layer_size; offset_gl += gl_size; } } else { const u64 offset = params.GetMipmapLevelOffset(mip_level); - functions[static_cast<std::size_t>(params.pixel_format)]( - params.MipWidth(mip_level), params.MipBlockHeight(mip_level), - params.MipHeight(mip_level), params.MipBlockDepth(mip_level), depth, gl_buffer.data(), - gl_buffer.size(), params.addr + offset); + MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), + params.MipBlockHeight(mip_level), params.MipHeight(mip_level), + params.MipBlockDepth(mip_level), depth, gl_buffer.data(), gl_buffer.size(), + params.addr + offset); } } @@ -726,7 +561,7 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface, const std::size_t buffer_size = std::max(src_params.size_in_bytes, dst_params.size_in_bytes); glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle); - glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB); + glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW); if (source_format.compressed) { glGetCompressedTextureImage(src_surface->Texture().handle, src_attachment, static_cast<GLsizei>(src_params.size_in_bytes), nullptr); @@ -996,7 +831,7 @@ void CachedSurface::LoadGLBuffer() { ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", params.block_width, static_cast<u32>(params.target)); for (u32 i = 0; i < params.max_mip_level; i++) - SwizzleFunc(morton_to_gl_fns, params, gl_buffer[i], i); + SwizzleFunc(MortonSwizzleMode::MortonToLinear, params, gl_buffer[i], i); } else { const auto texture_src_data{Memory::GetPointer(params.addr)}; const auto texture_src_data_end{texture_src_data + params.size_in_bytes_gl}; @@ -1035,7 +870,7 @@ void CachedSurface::FlushGLBuffer() { ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", params.block_width, static_cast<u32>(params.target)); - SwizzleFunc(gl_to_morton_fns, params, gl_buffer[0], 0); + SwizzleFunc(MortonSwizzleMode::LinearToMorton, params, gl_buffer[0], 0); } else { std::memcpy(Memory::GetPointer(GetAddr()), gl_buffer[0].data(), GetSizeInBytes()); } @@ -1275,6 +1110,31 @@ Surface RasterizerCacheOpenGL::GetUncachedSurface(const SurfaceParams& params) { return surface; } +void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface, + const Surface& dst_surface) { + const auto& init_params{src_surface->GetSurfaceParams()}; + const auto& dst_params{dst_surface->GetSurfaceParams()}; + VAddr address = init_params.addr; + const std::size_t layer_size = dst_params.LayerMemorySize(); + for (u32 layer = 0; layer < dst_params.depth; layer++) { + for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) { + const VAddr sub_address = address + dst_params.GetMipmapLevelOffset(mipmap); + const Surface& copy = TryGet(sub_address); + if (!copy) + continue; + const auto& src_params{copy->GetSurfaceParams()}; + const u32 width{std::min(src_params.width, dst_params.MipWidth(mipmap))}; + const u32 height{std::min(src_params.height, dst_params.MipHeight(mipmap))}; + + glCopyImageSubData(copy->Texture().handle, SurfaceTargetToGL(src_params.target), 0, 0, + 0, 0, dst_surface->Texture().handle, + SurfaceTargetToGL(dst_params.target), mipmap, 0, 0, layer, width, + height, 1); + } + address += layer_size; + } +} + void RasterizerCacheOpenGL::FermiCopySurface( const Tegra::Engines::Fermi2D::Regs::Surface& src_config, const Tegra::Engines::Fermi2D::Regs::Surface& dst_config) { @@ -1340,11 +1200,13 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, CopySurface(old_surface, new_surface, copy_pbo.handle); } break; - case SurfaceTarget::TextureCubemap: case SurfaceTarget::Texture3D: + AccurateCopySurface(old_surface, new_surface); + break; + case SurfaceTarget::TextureCubemap: case SurfaceTarget::Texture2DArray: case SurfaceTarget::TextureCubeArray: - AccurateCopySurface(old_surface, new_surface); + FastLayeredCopySurface(old_surface, new_surface); break; default: LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}", diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 494f6b903..9ac79c5a4 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -350,6 +350,7 @@ private: /// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface); + void FastLayeredCopySurface(const Surface& src_surface, const Surface& dst_surface); /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have /// previously been used. This is to prevent surfaces from being constantly created and diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index a85a7c0c5..038b25c75 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -84,6 +84,7 @@ CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type) } entries = program_result.second; + shader_length = entries.shader_length; if (program_type != Maxwell::ShaderProgram::Geometry) { OGLShader shader; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index ffbf21831..08f470de3 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -30,7 +30,7 @@ public: } std::size_t GetSizeInBytes() const override { - return GLShader::MAX_PROGRAM_CODE_LENGTH * sizeof(u64); + return shader_length; } // We do not have to flush this cache as things in it are never modified by us. @@ -82,6 +82,7 @@ private: u32 max_vertices, const std::string& debug_name); VAddr addr; + std::size_t shader_length; Maxwell::ShaderProgram program_type; GLShader::ShaderSetup setup; GLShader::ShaderEntries entries; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 30050b585..3a75f9d16 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -34,6 +34,17 @@ constexpr u32 PROGRAM_HEADER_SIZE = sizeof(Tegra::Shader::Header); constexpr u32 MAX_GEOMETRY_BUFFERS = 6; constexpr u32 MAX_ATTRIBUTES = 0x100; // Size in vec4s, this value is untested +static const char* INTERNAL_FLAG_NAMES[] = {"zero_flag", "sign_flag", "carry_flag", + "overflow_flag"}; + +enum class InternalFlag : u64 { + ZeroFlag = 0, + SignFlag = 1, + CarryFlag = 2, + OverflowFlag = 3, + Amount +}; + class DecompileFail : public std::runtime_error { public: using std::runtime_error::runtime_error; @@ -49,8 +60,7 @@ static std::string GetTopologyName(Tegra::Shader::OutputTopology topology) { case Tegra::Shader::OutputTopology::TriangleStrip: return "triangle_strip"; default: - LOG_CRITICAL(Render_OpenGL, "Unknown output topology {}", static_cast<u32>(topology)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unknown output topology: {}", static_cast<u32>(topology)); return "points"; } } @@ -85,7 +95,8 @@ struct Subroutine { class ControlFlowAnalyzer { public: ControlFlowAnalyzer(const ProgramCode& program_code, u32 main_offset, const std::string& suffix) - : program_code(program_code) { + : program_code(program_code), shader_coverage_begin(main_offset), + shader_coverage_end(main_offset + 1) { // Recursively finds all subroutines. const Subroutine& program_main = AddSubroutine(main_offset, PROGRAM_END, suffix); @@ -97,10 +108,16 @@ public: return std::move(subroutines); } + std::size_t GetShaderLength() const { + return shader_coverage_end * sizeof(u64); + } + private: const ProgramCode& program_code; std::set<Subroutine> subroutines; std::map<std::pair<u32, u32>, ExitMethod> exit_method_map; + u32 shader_coverage_begin; + u32 shader_coverage_end; /// Adds and analyzes a new subroutine if it is not added yet. const Subroutine& AddSubroutine(u32 begin, u32 end, const std::string& suffix) { @@ -142,6 +159,9 @@ private: return exit_method; for (u32 offset = begin; offset != end && offset != PROGRAM_END; ++offset) { + shader_coverage_begin = std::min(shader_coverage_begin, offset); + shader_coverage_end = std::max(shader_coverage_end, offset + 1); + const Instruction instr = {program_code[offset]}; if (const auto opcode = OpCode::Decode(instr)) { switch (opcode->get().GetId()) { @@ -167,8 +187,8 @@ private: case OpCode::Id::SSY: case OpCode::Id::PBK: { // The SSY and PBK use a similar encoding as the BRA instruction. - ASSERT_MSG(instr.bra.constant_buffer == 0, - "Constant buffer branching is not supported"); + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer branching is not supported"); const u32 target = offset + instr.bra.GetBranchTarget(); labels.insert(target); // Continue scanning for an exit method. @@ -258,14 +278,6 @@ private: const std::string& suffix; }; -enum class InternalFlag : u64 { - ZeroFlag = 0, - CarryFlag = 1, - OverflowFlag = 2, - NaNFlag = 3, - Amount -}; - /** * Used to manage shader registers that are emulated with GLSL. This class keeps track of the state * of all registers (e.g. whether they are currently being used as Floats or Integers), and @@ -299,8 +311,7 @@ public: // Default - do nothing return value; default: - LOG_CRITICAL(HW_GPU, "Unimplemented conversion size {}", static_cast<u32>(size)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented conversion size: {}", static_cast<u32>(size)); } } @@ -363,7 +374,7 @@ public: u64 value_num_components, bool is_saturated = false, u64 dest_elem = 0, Register::Size size = Register::Size::Word, bool sets_cc = false) { - ASSERT_MSG(!is_saturated, "Unimplemented"); + UNIMPLEMENTED_IF(is_saturated); const std::string func{is_signed ? "intBitsToFloat" : "uintBitsToFloat"}; @@ -373,7 +384,7 @@ public: if (sets_cc) { const std::string zero_condition = "( " + ConvertIntegerSize(value, size) + " == 0 )"; SetInternalFlag(InternalFlag::ZeroFlag, zero_condition); - LOG_WARNING(HW_GPU, "Control Codes Imcomplete."); + LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete."); } } @@ -392,7 +403,7 @@ public: Tegra::Shader::HalfMerge merge, u64 dest_num_components, u64 value_num_components, bool is_saturated = false, u64 dest_elem = 0) { - ASSERT_MSG(!is_saturated, "Unimplemented"); + UNIMPLEMENTED_IF(is_saturated); const std::string result = [&]() { switch (merge) { @@ -456,24 +467,25 @@ public: shader.AddLine("lmem[" + index + "] = " + func + '(' + value + ");"); } - std::string GetControlCode(const Tegra::Shader::ControlCode cc) const { + std::string GetConditionCode(const Tegra::Shader::ConditionCode cc) const { switch (cc) { - case Tegra::Shader::ControlCode::NEU: + case Tegra::Shader::ConditionCode::NEU: return "!(" + GetInternalFlag(InternalFlag::ZeroFlag) + ')'; default: - LOG_CRITICAL(HW_GPU, "Unimplemented Control Code {}", static_cast<u32>(cc)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast<u32>(cc)); return "false"; } } - std::string GetInternalFlag(const InternalFlag ii) const { - const u32 code = static_cast<u32>(ii); - return "internalFlag_" + std::to_string(code) + suffix; + std::string GetInternalFlag(const InternalFlag flag) const { + const auto index = static_cast<u32>(flag); + ASSERT(index < static_cast<u32>(InternalFlag::Amount)); + + return std::string(INTERNAL_FLAG_NAMES[index]) + '_' + suffix; } - void SetInternalFlag(const InternalFlag ii, const std::string& value) const { - shader.AddLine(GetInternalFlag(ii) + " = " + value + ';'); + void SetInternalFlag(const InternalFlag flag, const std::string& value) const { + shader.AddLine(GetInternalFlag(flag) + " = " + value + ';'); } /** @@ -488,27 +500,42 @@ public: const Register& buf_reg) { const std::string dest = GetOutputAttribute(attribute); const std::string src = GetRegisterAsFloat(val_reg); + if (dest.empty()) + return; - if (!dest.empty()) { - // Can happen with unknown/unimplemented output attributes, in which case we ignore the - // instruction for now. - if (stage == Maxwell3D::Regs::ShaderStage::Geometry) { - // TODO(Rodrigo): nouveau sets some attributes after setting emitting a geometry - // shader. These instructions use a dirty register as buffer index, to avoid some - // drivers from complaining about out of boundary writes, guard them. - const std::string buf_index{"((" + GetRegisterAsInteger(buf_reg) + ") % " + - std::to_string(MAX_GEOMETRY_BUFFERS) + ')'}; - shader.AddLine("amem[" + buf_index + "][" + - std::to_string(static_cast<u32>(attribute)) + ']' + - GetSwizzle(elem) + " = " + src + ';'); - } else { - if (attribute == Attribute::Index::PointSize) { - fixed_pipeline_output_attributes_used.insert(attribute); - shader.AddLine(dest + " = " + src + ';'); - } else { - shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';'); - } - } + // Can happen with unknown/unimplemented output attributes, in which case we ignore the + // instruction for now. + if (stage == Maxwell3D::Regs::ShaderStage::Geometry) { + // TODO(Rodrigo): nouveau sets some attributes after setting emitting a geometry + // shader. These instructions use a dirty register as buffer index, to avoid some + // drivers from complaining about out of boundary writes, guard them. + const std::string buf_index{"((" + GetRegisterAsInteger(buf_reg) + ") % " + + std::to_string(MAX_GEOMETRY_BUFFERS) + ')'}; + shader.AddLine("amem[" + buf_index + "][" + + std::to_string(static_cast<u32>(attribute)) + ']' + GetSwizzle(elem) + + " = " + src + ';'); + return; + } + + switch (attribute) { + case Attribute::Index::ClipDistances0123: + case Attribute::Index::ClipDistances4567: { + const u64 index = attribute == Attribute::Index::ClipDistances4567 ? 4 : 0 + elem; + UNIMPLEMENTED_IF_MSG( + ((header.vtg.clip_distances >> index) & 1) == 0, + "Shader is setting gl_ClipDistance{} without enabling it in the header", index); + + fixed_pipeline_output_attributes_used.insert(attribute); + shader.AddLine(dest + '[' + std::to_string(index) + "] = " + src + ';'); + break; + } + case Attribute::Index::PointSize: + fixed_pipeline_output_attributes_used.insert(attribute); + shader.AddLine(dest + " = " + src + ';'); + break; + default: + shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';'); + break; } } @@ -624,8 +651,8 @@ private: /// Generates declarations for internal flags. void GenerateInternalFlags() { - for (u32 ii = 0; ii < static_cast<u64>(InternalFlag::Amount); ii++) { - const InternalFlag code = static_cast<InternalFlag>(ii); + for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) { + const InternalFlag code = static_cast<InternalFlag>(flag); declarations.AddLine("bool " + GetInternalFlag(code) + " = false;"); } declarations.AddNewLine(); @@ -728,12 +755,19 @@ private: void GenerateVertex() { if (stage != Maxwell3D::Regs::ShaderStage::Vertex) return; + bool clip_distances_declared = false; + declarations.AddLine("out gl_PerVertex {"); ++declarations.scope; declarations.AddLine("vec4 gl_Position;"); for (auto& o : fixed_pipeline_output_attributes_used) { if (o == Attribute::Index::PointSize) declarations.AddLine("float gl_PointSize;"); + if (!clip_distances_declared && (o == Attribute::Index::ClipDistances0123 || + o == Attribute::Index::ClipDistances4567)) { + declarations.AddLine("float gl_ClipDistance[];"); + clip_distances_declared = true; + } } --declarations.scope; declarations.AddLine("};"); @@ -761,8 +795,7 @@ private: u64 dest_num_components, u64 value_num_components, u64 dest_elem, bool precise) { if (reg == Register::ZeroIndex) { - LOG_CRITICAL(HW_GPU, "Cannot set Register::ZeroIndex"); - UNREACHABLE(); + // Setting RZ is a nop in hardware. return; } @@ -847,16 +880,13 @@ private: if (declr_input_attribute.count(attribute) == 0) { declr_input_attribute[attribute] = input_mode; } else { - if (declr_input_attribute[attribute] != input_mode) { - LOG_CRITICAL(HW_GPU, "Same Input multiple input modes"); - UNREACHABLE(); - } + UNIMPLEMENTED_IF_MSG(declr_input_attribute[attribute] != input_mode, + "Multiple input modes for the same attribute"); } return GeometryPass("input_attribute_" + std::to_string(index)); } - LOG_CRITICAL(HW_GPU, "Unhandled input attribute: {}", static_cast<u32>(attribute)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); } return "vec4(0, 0, 0, 0)"; @@ -882,24 +912,20 @@ private: break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled Ipa InterpMode: {}", static_cast<u32>(interp_mode)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode)); } } switch (sample_mode) { - case Tegra::Shader::IpaSampleMode::Centroid: { - // Note not implemented, it can be implemented with the "centroid " keyword in glsl; - LOG_CRITICAL(HW_GPU, "Ipa Sampler Mode: centroid, not implemented"); - UNREACHABLE(); + case Tegra::Shader::IpaSampleMode::Centroid: + // It can be implemented with the "centroid " keyword in glsl + UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid"); break; - } - case Tegra::Shader::IpaSampleMode::Default: { + case Tegra::Shader::IpaSampleMode::Default: // Default, n/a break; - } default: { - LOG_CRITICAL(HW_GPU, "Unhandled Ipa SampleMode: {}", static_cast<u32>(sample_mode)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode)); + break; } } return out; @@ -912,6 +938,10 @@ private: return "gl_PointSize"; case Attribute::Index::Position: return "position"; + case Attribute::Index::ClipDistances0123: + case Attribute::Index::ClipDistances4567: { + return "gl_ClipDistance"; + } default: const u32 index{static_cast<u32>(attribute) - static_cast<u32>(Attribute::Index::Attribute_0)}; @@ -920,8 +950,7 @@ private: return "output_attribute_" + std::to_string(index); } - LOG_CRITICAL(HW_GPU, "Unhandled output attribute: {}", index); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled output attribute={}", index); return {}; } } @@ -951,9 +980,10 @@ private: class GLSLGenerator { public: GLSLGenerator(const std::set<Subroutine>& subroutines, const ProgramCode& program_code, - u32 main_offset, Maxwell3D::Regs::ShaderStage stage, const std::string& suffix) + u32 main_offset, Maxwell3D::Regs::ShaderStage stage, const std::string& suffix, + std::size_t shader_length) : subroutines(subroutines), program_code(program_code), main_offset(main_offset), - stage(stage), suffix(suffix) { + stage(stage), suffix(suffix), shader_length(shader_length) { std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); local_memory_size = header.GetLocalMemorySize(); regs.SetLocalMemory(local_memory_size); @@ -966,7 +996,7 @@ public: /// Returns entries in the shader that are useful for external functions ShaderEntries GetEntries() const { - return {regs.GetConstBuffersDeclarations(), regs.GetSamplers()}; + return {regs.GetConstBuffersDeclarations(), regs.GetSamplers(), shader_length}; } private: @@ -1071,19 +1101,26 @@ private: const std::string& op_a, const std::string& op_b) const { using Tegra::Shader::PredCondition; static const std::unordered_map<PredCondition, const char*> PredicateComparisonStrings = { - {PredCondition::LessThan, "<"}, {PredCondition::Equal, "=="}, - {PredCondition::LessEqual, "<="}, {PredCondition::GreaterThan, ">"}, - {PredCondition::NotEqual, "!="}, {PredCondition::GreaterEqual, ">="}, - {PredCondition::LessThanWithNan, "<"}, {PredCondition::NotEqualWithNan, "!="}, - {PredCondition::GreaterThanWithNan, ">"}, {PredCondition::GreaterEqualWithNan, ">="}}; + {PredCondition::LessThan, "<"}, + {PredCondition::Equal, "=="}, + {PredCondition::LessEqual, "<="}, + {PredCondition::GreaterThan, ">"}, + {PredCondition::NotEqual, "!="}, + {PredCondition::GreaterEqual, ">="}, + {PredCondition::LessThanWithNan, "<"}, + {PredCondition::NotEqualWithNan, "!="}, + {PredCondition::LessEqualWithNan, "<="}, + {PredCondition::GreaterThanWithNan, ">"}, + {PredCondition::GreaterEqualWithNan, ">="}}; const auto& comparison{PredicateComparisonStrings.find(condition)}; - ASSERT_MSG(comparison != PredicateComparisonStrings.end(), - "Unknown predicate comparison operation"); + UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonStrings.end(), + "Unknown predicate comparison operation"); std::string predicate{'(' + op_a + ") " + comparison->second + " (" + op_b + ')'}; if (condition == PredCondition::LessThanWithNan || condition == PredCondition::NotEqualWithNan || + condition == PredCondition::LessEqualWithNan || condition == PredCondition::GreaterThanWithNan || condition == PredCondition::GreaterEqualWithNan) { predicate += " || isnan(" + op_a + ") || isnan(" + op_b + ')'; @@ -1107,7 +1144,7 @@ private: }; auto op = PredicateOperationStrings.find(operation); - ASSERT_MSG(op != PredicateOperationStrings.end(), "Unknown predicate operation"); + UNIMPLEMENTED_IF_MSG(op == PredicateOperationStrings.end(), "Unknown predicate operation"); return op->second; } @@ -1205,8 +1242,7 @@ private: break; } default: - LOG_CRITICAL(HW_GPU, "Unimplemented logic operation: {}", static_cast<u32>(logic_op)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast<u32>(logic_op)); } if (dest != Tegra::Shader::Register::ZeroIndex) { @@ -1224,9 +1260,8 @@ private: SetPredicate(static_cast<u64>(predicate), '(' + result + ") != 0"); break; default: - LOG_CRITICAL(HW_GPU, "Unimplemented predicate result mode: {}", - static_cast<u32>(predicate_mode)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}", + static_cast<u32>(predicate_mode)); } } @@ -1264,6 +1299,7 @@ private: shader.AddLine('{'); ++shader.scope; shader.AddLine(coord); + shader.AddLine("vec4 texture_tmp = " + texture + ';'); // TEXS has two destination registers and a swizzle. The first two elements in the swizzle // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 @@ -1276,37 +1312,32 @@ private: if (written_components < 2) { // Write the first two swizzle components to gpr0 and gpr0+1 - regs.SetRegisterToFloat(instr.gpr0, component, texture, 1, 4, false, + regs.SetRegisterToFloat(instr.gpr0, component, "texture_tmp", 1, 4, false, written_components % 2); } else { ASSERT(instr.texs.HasTwoDestinations()); // Write the rest of the swizzle components to gpr28 and gpr28+1 - regs.SetRegisterToFloat(instr.gpr28, component, texture, 1, 4, false, + regs.SetRegisterToFloat(instr.gpr28, component, "texture_tmp", 1, 4, false, written_components % 2); } ++written_components; } - --shader.scope; shader.AddLine('}'); } static u32 TextureCoordinates(Tegra::Shader::TextureType texture_type) { switch (texture_type) { - case Tegra::Shader::TextureType::Texture1D: { + case Tegra::Shader::TextureType::Texture1D: return 1; - } - case Tegra::Shader::TextureType::Texture2D: { + case Tegra::Shader::TextureType::Texture2D: return 2; - } case Tegra::Shader::TextureType::Texture3D: - case Tegra::Shader::TextureType::TextureCube: { + case Tegra::Shader::TextureType::TextureCube: return 3; - } default: - LOG_CRITICAL(HW_GPU, "Unhandled texture type {}", static_cast<u32>(texture_type)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type)); return 0; } } @@ -1342,7 +1373,7 @@ private: void EmitFragmentOutputsWrite() { ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment); - ASSERT_MSG(header.ps.omap.sample_mask == 0, "Samplemask write is unimplemented"); + UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Samplemask write is unimplemented"); shader.AddLine("if (alpha_test[0] != 0) {"); ++shader.scope; @@ -1408,7 +1439,7 @@ private: case Tegra::Shader::VideoType::Size32: // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when // this type is used (1 * 1 + 0 == 0x5b800000). Until a better - // explanation is found: assert. + // explanation is found: abort. UNIMPLEMENTED(); return zero; case Tegra::Shader::VideoType::Invalid: @@ -1464,8 +1495,7 @@ private: // Decoding failure if (!opcode) { - LOG_CRITICAL(HW_GPU, "Unhandled instruction: {0:x}", instr.value); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value); return offset + 1; } @@ -1473,8 +1503,8 @@ private: fmt::format("// {}: {} (0x{:016x})", offset, opcode->get().GetName(), instr.value)); using Tegra::Shader::Pred; - ASSERT_MSG(instr.pred.full_pred != Pred::NeverExecute, - "NeverExecute predicate not implemented"); + UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute, + "NeverExecute predicate not implemented"); // Some instructions (like SSY) don't have a predicate field, they are always // unconditionally executed. @@ -1517,37 +1547,36 @@ private: case OpCode::Id::FMUL_R: case OpCode::Id::FMUL_IMM: { // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. - ASSERT_MSG(instr.fmul.tab5cb8_2 == 0, "FMUL tab5cb8_2({}) is not implemented", - instr.fmul.tab5cb8_2.Value()); - ASSERT_MSG(instr.fmul.tab5c68_1 == 0, "FMUL tab5cb8_1({}) is not implemented", - instr.fmul.tab5c68_1.Value()); - ASSERT_MSG(instr.fmul.tab5c68_0 == 1, "FMUL tab5cb8_0({}) is not implemented", - instr.fmul.tab5c68_0 - .Value()); // SMO typical sends 1 here which seems to be the default - ASSERT_MSG(instr.fmul.cc == 0, "FMUL cc is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, + "FMUL tab5cb8_2({}) is not implemented", + instr.fmul.tab5cb8_2.Value()); + UNIMPLEMENTED_IF_MSG(instr.fmul.tab5c68_1 != 0, + "FMUL tab5cb8_1({}) is not implemented", + instr.fmul.tab5c68_1.Value()); + UNIMPLEMENTED_IF_MSG( + instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented", + instr.fmul.tab5c68_0 + .Value()); // SMO typical sends 1 here which seems to be the default + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in FMUL is not implemented"); op_b = GetOperandAbsNeg(op_b, false, instr.fmul.negate_b); regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " * " + op_b, 1, 1, instr.alu.saturate_d, 0, true); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "FMUL Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::FADD_C: case OpCode::Id::FADD_R: case OpCode::Id::FADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in FADD is not implemented"); + op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, instr.alu.saturate_d, 0, true); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "FADD Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::MUFU: { @@ -1582,15 +1611,17 @@ private: instr.alu.saturate_d, 0, true); break; default: - LOG_CRITICAL(HW_GPU, "Unhandled MUFU sub op: {0:x}", - static_cast<unsigned>(instr.sub_op.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}", + static_cast<unsigned>(instr.sub_op.Value())); } break; } case OpCode::Id::FMNMX_C: case OpCode::Id::FMNMX_R: case OpCode::Id::FMNMX_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in FMNMX is not implemented"); + op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a); op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b); @@ -1601,10 +1632,6 @@ private: '(' + condition + ") ? min(" + parameters + ") : max(" + parameters + ')', 1, 1, false, 0, true); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "FMNMX Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::RRO_C: @@ -1617,9 +1644,7 @@ private: break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled arithmetic instruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled arithmetic instruction: {}", opcode->get().GetName()); } } break; @@ -1631,17 +1656,19 @@ private: break; } case OpCode::Id::FMUL32_IMM: { + UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc, + "Condition codes generation in FMUL32 is not implemented"); + regs.SetRegisterToFloat(instr.gpr0, 0, regs.GetRegisterAsFloat(instr.gpr8) + " * " + GetImmediate32(instr), 1, 1, instr.fmul32.saturate, 0, true); - if (instr.op_32.generates_cc) { - LOG_CRITICAL(HW_GPU, "FMUL32 Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::FADD32I: { + UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc, + "Condition codes generation in FADD32I is not implemented"); + std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); std::string op_b = GetImmediate32(instr); @@ -1662,23 +1689,22 @@ private: } regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, false, 0, true); - if (instr.op_32.generates_cc) { - LOG_CRITICAL(HW_GPU, "FADD32 Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } } break; } case OpCode::Type::Bfe: { - ASSERT_MSG(!instr.bfe.negate_b, "Unimplemented"); + UNIMPLEMENTED_IF(instr.bfe.negate_b); std::string op_a = instr.bfe.negate_a ? "-" : ""; op_a += regs.GetRegisterAsInteger(instr.gpr8); switch (opcode->get().GetId()) { case OpCode::Id::BFE_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in BFE is not implemented"); + std::string inner_shift = '(' + op_a + " << " + std::to_string(instr.bfe.GetLeftShiftValue()) + ')'; std::string outer_shift = @@ -1686,15 +1712,10 @@ private: std::to_string(instr.bfe.GetLeftShiftValue() + instr.bfe.shift_position) + ')'; regs.SetRegisterToInteger(instr.gpr0, true, 0, outer_shift, 1, 1); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "BFE Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled BFE instruction: {}", opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled BFE instruction: {}", opcode->get().GetName()); } } @@ -1719,6 +1740,9 @@ private: case OpCode::Id::SHR_C: case OpCode::Id::SHR_R: case OpCode::Id::SHR_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in SHR is not implemented"); + if (!instr.shift.is_signed) { // Logical shift right op_a = "uint(" + op_a + ')'; @@ -1727,24 +1751,17 @@ private: // Cast to int is superfluous for arithmetic shift, it's only for a logical shift regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(" + op_a + " >> " + op_b + ')', 1, 1); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "SHR Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::SHL_C: case OpCode::Id::SHL_R: case OpCode::Id::SHL_IMM: + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in SHL is not implemented"); regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " << " + op_b, 1, 1); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "SHL Generates an unhandled Control Code"); - UNREACHABLE(); - } break; default: { - LOG_CRITICAL(HW_GPU, "Unhandled shift instruction: {}", opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled shift instruction: {}", opcode->get().GetName()); } } break; @@ -1755,17 +1772,19 @@ private: switch (opcode->get().GetId()) { case OpCode::Id::IADD32I: + UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc, + "Condition codes generation in IADD32I is not implemented"); + if (instr.iadd32i.negate_a) op_a = "-(" + op_a + ')'; regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1, instr.iadd32i.saturate != 0); - if (instr.op_32.generates_cc) { - LOG_CRITICAL(HW_GPU, "IADD32 Generates an unhandled Control Code"); - UNREACHABLE(); - } break; case OpCode::Id::LOP32I: { + UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc, + "Condition codes generation in LOP32I is not implemented"); + if (instr.alu.lop32i.invert_a) op_a = "~(" + op_a + ')'; @@ -1775,16 +1794,11 @@ private: WriteLogicOperation(instr.gpr0, instr.alu.lop32i.operation, op_a, op_b, Tegra::Shader::PredicateResultMode::None, Tegra::Shader::Pred::UnusedIndex); - if (instr.op_32.generates_cc) { - LOG_CRITICAL(HW_GPU, "LOP32I Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled ArithmeticIntegerImmediate instruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled ArithmeticIntegerImmediate instruction: {}", + opcode->get().GetName()); } } break; @@ -1807,6 +1821,9 @@ private: case OpCode::Id::IADD_C: case OpCode::Id::IADD_R: case OpCode::Id::IADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in IADD is not implemented"); + if (instr.alu_integer.negate_a) op_a = "-(" + op_a + ')'; @@ -1815,15 +1832,14 @@ private: regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1, instr.alu.saturate_d); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "IADD Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::IADD3_C: case OpCode::Id::IADD3_R: case OpCode::Id::IADD3_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in IADD3 is not implemented"); + std::string op_c = regs.GetRegisterAsInteger(instr.gpr39); auto apply_height = [](auto height, auto& oprand) { @@ -1837,9 +1853,8 @@ private: oprand = "((" + oprand + ") >> 16)"; break; default: - LOG_CRITICAL(HW_GPU, "Unhandled IADD3 height: {}", - static_cast<u32>(height.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}", + static_cast<u32>(height.Value())); } }; @@ -1880,16 +1895,14 @@ private: } regs.SetRegisterToInteger(instr.gpr0, true, 0, result, 1, 1); - - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "IADD3 Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::ISCADD_C: case OpCode::Id::ISCADD_R: case OpCode::Id::ISCADD_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in ISCADD is not implemented"); + if (instr.alu_integer.negate_a) op_a = "-(" + op_a + ')'; @@ -1900,10 +1913,6 @@ private: regs.SetRegisterToInteger(instr.gpr0, true, 0, "((" + op_a + " << " + shift + ") + " + op_b + ')', 1, 1); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "ISCADD Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::POPC_C: @@ -1927,6 +1936,9 @@ private: case OpCode::Id::LOP_C: case OpCode::Id::LOP_R: case OpCode::Id::LOP_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in LOP is not implemented"); + if (instr.alu.lop.invert_a) op_a = "~(" + op_a + ')'; @@ -1935,15 +1947,14 @@ private: WriteLogicOperation(instr.gpr0, instr.alu.lop.operation, op_a, op_b, instr.alu.lop.pred_result_mode, instr.alu.lop.pred48); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "LOP Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::LOP3_C: case OpCode::Id::LOP3_R: case OpCode::Id::LOP3_IMM: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in LOP3 is not implemented"); + const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39); std::string lut; @@ -1954,17 +1965,15 @@ private: } WriteLop3Instruction(instr.gpr0, op_a, op_b, op_c, lut); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "LOP3 Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::IMNMX_C: case OpCode::Id::IMNMX_R: case OpCode::Id::IMNMX_IMM: { - ASSERT_MSG(instr.imnmx.exchange == Tegra::Shader::IMinMaxExchange::None, - "Unimplemented"); + UNIMPLEMENTED_IF(instr.imnmx.exchange != Tegra::Shader::IMinMaxExchange::None); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in IMNMX is not implemented"); + const std::string condition = GetPredicateCondition(instr.imnmx.pred, instr.imnmx.negate_pred != 0); const std::string parameters = op_a + ',' + op_b; @@ -1972,10 +1981,6 @@ private: '(' + condition + ") ? min(" + parameters + ") : max(" + parameters + ')', 1, 1); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "IMNMX Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::LEA_R2: @@ -2030,24 +2035,19 @@ private: op_b = regs.GetRegisterAsInteger(instr.gpr8); op_a = std::to_string(instr.lea.imm.entry_a); op_c = std::to_string(instr.lea.imm.entry_b); - LOG_CRITICAL(HW_GPU, "Unhandled LEA subinstruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled LEA subinstruction: {}", opcode->get().GetName()); } } - if (instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex)) { - LOG_ERROR(HW_GPU, "Unhandled LEA Predicate"); - UNREACHABLE(); - } + UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex), + "Unhandled LEA Predicate"); const std::string value = '(' + op_a + " + (" + op_b + "*(1 << " + op_c + ")))"; regs.SetRegisterToInteger(instr.gpr0, true, 0, value, 1, 1); break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled ArithmeticInteger instruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled ArithmeticInteger instruction: {}", + opcode->get().GetName()); } } @@ -2056,7 +2056,7 @@ private: case OpCode::Type::ArithmeticHalf: { if (opcode->get().GetId() == OpCode::Id::HADD2_C || opcode->get().GetId() == OpCode::Id::HADD2_R) { - ASSERT_MSG(instr.alu_half.ftz == 0, "Unimplemented"); + UNIMPLEMENTED_IF(instr.alu_half.ftz != 0); } const bool negate_a = opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0; @@ -2094,9 +2094,8 @@ private: case OpCode::Id::HMUL2_R: return '(' + op_a + " * " + op_b + ')'; default: - LOG_CRITICAL(HW_GPU, "Unhandled half float instruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", + opcode->get().GetName()); return std::string("0"); } }(); @@ -2107,10 +2106,10 @@ private: } case OpCode::Type::ArithmeticHalfImmediate: { if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) { - ASSERT_MSG(instr.alu_half_imm.ftz == 0, "Unimplemented"); + UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0); } else { - ASSERT_MSG(instr.alu_half_imm.precision == Tegra::Shader::HalfPrecision::None, - "Unimplemented"); + UNIMPLEMENTED_IF(instr.alu_half_imm.precision != + Tegra::Shader::HalfPrecision::None); } const std::string op_a = GetHalfFloat( @@ -2140,11 +2139,14 @@ private: std::string op_b = instr.ffma.negate_b ? "-" : ""; std::string op_c = instr.ffma.negate_c ? "-" : ""; - ASSERT_MSG(instr.ffma.cc == 0, "FFMA cc not implemented"); - ASSERT_MSG(instr.ffma.tab5980_0 == 1, "FFMA tab5980_0({}) not implemented", - instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO - ASSERT_MSG(instr.ffma.tab5980_1 == 0, "FFMA tab5980_1({}) not implemented", - instr.ffma.tab5980_1.Value()); + UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); + UNIMPLEMENTED_IF_MSG( + instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented", + instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO + UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented", + instr.ffma.tab5980_1.Value()); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in FFMA is not implemented"); switch (opcode->get().GetId()) { case OpCode::Id::FFMA_CR: { @@ -2170,27 +2172,19 @@ private: break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled FFMA instruction: {}", opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled FFMA instruction: {}", opcode->get().GetName()); } } regs.SetRegisterToFloat(instr.gpr0, 0, "fma(" + op_a + ", " + op_b + ", " + op_c + ')', 1, 1, instr.alu.saturate_d, 0, true); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "FFMA Generates an unhandled Control Code"); - UNREACHABLE(); - } - break; } case OpCode::Type::Hfma2: { if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) { - ASSERT_MSG(instr.hfma2.rr.precision == Tegra::Shader::HalfPrecision::None, - "Unimplemented"); + UNIMPLEMENTED_IF(instr.hfma2.rr.precision != Tegra::Shader::HalfPrecision::None); } else { - ASSERT_MSG(instr.hfma2.precision == Tegra::Shader::HalfPrecision::None, - "Unimplemented"); + UNIMPLEMENTED_IF(instr.hfma2.precision != Tegra::Shader::HalfPrecision::None); } const bool saturate = opcode->get().GetId() == OpCode::Id::HFMA2_RR ? instr.hfma2.rr.saturate != 0 @@ -2240,7 +2234,7 @@ private: case OpCode::Type::Conversion: { switch (opcode->get().GetId()) { case OpCode::Id::I2I_R: { - ASSERT_MSG(!instr.conversion.selector, "Unimplemented"); + UNIMPLEMENTED_IF(instr.conversion.selector); std::string op_a = regs.GetRegisterAsInteger( instr.gpr20, 0, instr.conversion.is_input_signed, instr.conversion.src_size); @@ -2260,8 +2254,10 @@ private: } case OpCode::Id::I2F_R: case OpCode::Id::I2F_C: { - ASSERT_MSG(instr.conversion.dest_size == Register::Size::Word, "Unimplemented"); - ASSERT_MSG(!instr.conversion.selector, "Unimplemented"); + UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.selector); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in I2F is not implemented"); std::string op_a{}; @@ -2286,16 +2282,13 @@ private: } regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); - - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "I2F Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::F2F_R: { - ASSERT_MSG(instr.conversion.dest_size == Register::Size::Word, "Unimplemented"); - ASSERT_MSG(instr.conversion.src_size == Register::Size::Word, "Unimplemented"); + UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in F2F is not implemented"); std::string op_a = regs.GetRegisterAsFloat(instr.gpr20); if (instr.conversion.abs_a) { @@ -2322,23 +2315,19 @@ private: op_a = "trunc(" + op_a + ')'; break; default: - LOG_CRITICAL(HW_GPU, "Unimplemented f2f rounding mode {}", - static_cast<u32>(instr.conversion.f2f.rounding.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}", + static_cast<u32>(instr.conversion.f2f.rounding.Value())); break; } regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1, instr.alu.saturate_d); - - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "F2F Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } case OpCode::Id::F2I_R: case OpCode::Id::F2I_C: { - ASSERT_MSG(instr.conversion.src_size == Register::Size::Word, "Unimplemented"); + UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in F2I is not implemented"); std::string op_a{}; if (instr.is_b_gpr) { @@ -2369,9 +2358,8 @@ private: op_a = "trunc(" + op_a + ')'; break; default: - LOG_CRITICAL(HW_GPU, "Unimplemented f2i rounding mode {}", - static_cast<u32>(instr.conversion.f2i.rounding.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}", + static_cast<u32>(instr.conversion.f2i.rounding.Value())); break; } @@ -2383,16 +2371,10 @@ private: regs.SetRegisterToInteger(instr.gpr0, instr.conversion.is_output_signed, 0, op_a, 1, 1, false, 0, instr.conversion.dest_size); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "F2I Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled conversion instruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName()); } } break; @@ -2401,10 +2383,10 @@ private: switch (opcode->get().GetId()) { case OpCode::Id::LD_A: { // Note: Shouldn't this be interp mode flat? As in no interpolation made. - ASSERT_MSG(instr.gpr8.Value() == Register::ZeroIndex, - "Indirect attribute loads are not supported"); - ASSERT_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) == 0, - "Unaligned attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, + "Indirect attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, + "Unaligned attribute loads are not supported"); Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective, Tegra::Shader::IpaSampleMode::Default}; @@ -2431,7 +2413,7 @@ private: break; } case OpCode::Id::LD_C: { - ASSERT_MSG(instr.ld_c.unknown == 0, "Unimplemented"); + UNIMPLEMENTED_IF(instr.ld_c.unknown != 0); // Add an extra scope and declare the index register inside to prevent // overwriting it in case it is used as an output of the LD instruction. @@ -2459,9 +2441,8 @@ private: break; } default: - LOG_CRITICAL(HW_GPU, "Unhandled type: {}", - static_cast<unsigned>(instr.ld_c.type.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled type: {}", + static_cast<unsigned>(instr.ld_c.type.Value())); } --shader.scope; @@ -2469,6 +2450,9 @@ private: break; } case OpCode::Id::LD_L: { + UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}", + static_cast<unsigned>(instr.ld_l.unknown.Value())); + // Add an extra scope and declare the index register inside to prevent // overwriting it in case it is used as an output of the LD instruction. shader.AddLine('{'); @@ -2481,20 +2465,13 @@ private: const std::string op_a = regs.GetLocalMemoryAsFloat("index"); - if (instr.ld_l.unknown != 1) { - LOG_CRITICAL(HW_GPU, "LD_L Unhandled mode: {}", - static_cast<unsigned>(instr.ld_l.unknown.Value())); - UNREACHABLE(); - } - switch (instr.ldst_sl.type.Value()) { case Tegra::Shader::StoreType::Bytes32: regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1); break; default: - LOG_CRITICAL(HW_GPU, "LD_L Unhandled type: {}", - static_cast<unsigned>(instr.ldst_sl.type.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("LD_L Unhandled type: {}", + static_cast<unsigned>(instr.ldst_sl.type.Value())); } --shader.scope; @@ -2502,10 +2479,10 @@ private: break; } case OpCode::Id::ST_A: { - ASSERT_MSG(instr.gpr8.Value() == Register::ZeroIndex, - "Indirect attribute loads are not supported"); - ASSERT_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) == 0, - "Unaligned attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex, + "Indirect attribute loads are not supported"); + UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, + "Unaligned attribute loads are not supported"); u64 next_element = instr.attribute.fmt20.element; u64 next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); @@ -2530,6 +2507,9 @@ private: break; } case OpCode::Id::ST_L: { + UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}", + static_cast<unsigned>(instr.st_l.unknown.Value())); + // Add an extra scope and declare the index register inside to prevent // overwriting it in case it is used as an output of the LD instruction. shader.AddLine('{'); @@ -2540,20 +2520,13 @@ private: shader.AddLine("uint index = (" + op + " / 4);"); - if (instr.st_l.unknown != 0) { - LOG_CRITICAL(HW_GPU, "ST_L Unhandled mode: {}", - static_cast<unsigned>(instr.st_l.unknown.Value())); - UNREACHABLE(); - } - switch (instr.ldst_sl.type.Value()) { case Tegra::Shader::StoreType::Bytes32: regs.SetLocalMemoryAsFloat("index", regs.GetRegisterAsFloat(instr.gpr0)); break; default: - LOG_CRITICAL(HW_GPU, "ST_L Unhandled type: {}", - static_cast<unsigned>(instr.ldst_sl.type.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("ST_L Unhandled type: {}", + static_cast<unsigned>(instr.ldst_sl.type.Value())); } --shader.scope; @@ -2565,78 +2538,99 @@ private: std::string coord; const bool is_array = instr.tex.array != 0; - ASSERT_MSG(!instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - ASSERT_MSG(!instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), + "AOFFI is not implemented"); const bool depth_compare = instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); u32 num_coordinates = TextureCoordinates(texture_type); - if (depth_compare) - num_coordinates += 1; + u32 start_index = 0; + std::string array_elem; + if (is_array) { + array_elem = regs.GetRegisterAsInteger(instr.gpr8); + start_index = 1; + } + const auto process_mode = instr.tex.GetTextureProcessMode(); + u32 start_index_b = 0; + std::string lod_value; + if (process_mode != Tegra::Shader::TextureProcessMode::LZ && + process_mode != Tegra::Shader::TextureProcessMode::None) { + start_index_b = 1; + lod_value = regs.GetRegisterAsFloat(instr.gpr20); + } + + std::string depth_value; + if (depth_compare) { + depth_value = regs.GetRegisterAsFloat(instr.gpr20.Value() + start_index_b); + } + + bool depth_compare_extra = false; switch (num_coordinates) { case 1: { + const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + start_index); if (is_array) { - const std::string index = regs.GetRegisterAsInteger(instr.gpr8); - const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - coord = "vec2 coords = vec2(" + x + ", " + index + ");"; + if (depth_compare) { + coord = "vec3 coords = vec3(" + x + ", " + depth_value + ", " + + array_elem + ");"; + } else { + coord = "vec2 coords = vec2(" + x + ", " + array_elem + ");"; + } } else { - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - coord = "float coords = " + x + ';'; + if (depth_compare) { + coord = "vec2 coords = vec2(" + x + ", " + depth_value + ");"; + } else { + coord = "float coords = " + x + ';'; + } } break; } case 2: { + const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + start_index); + const std::string y = + regs.GetRegisterAsFloat(instr.gpr8.Value() + start_index + 1); if (is_array) { - const std::string index = regs.GetRegisterAsInteger(instr.gpr8); - const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2); - coord = "vec3 coords = vec3(" + x + ", " + y + ", " + index + ");"; + if (depth_compare) { + coord = "vec4 coords = vec4(" + x + ", " + y + ", " + depth_value + + ", " + array_elem + ");"; + } else { + coord = "vec3 coords = vec3(" + x + ", " + y + ", " + array_elem + ");"; + } } else { - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + if (depth_compare) { + coord = + "vec3 coords = vec3(" + x + ", " + y + ", " + depth_value + ");"; + } else { + coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + } } break; } case 3: { - if (depth_compare) { - if (is_array) { - const std::string index = regs.GetRegisterAsInteger(instr.gpr8); - const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string y = regs.GetRegisterAsFloat(instr.gpr20); - const std::string z = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1); - coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index + - ");"; - } else { - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string z = regs.GetRegisterAsFloat(instr.gpr20); - coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; - } + const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + start_index); + const std::string y = + regs.GetRegisterAsFloat(instr.gpr8.Value() + start_index + 1); + const std::string z = + regs.GetRegisterAsFloat(instr.gpr8.Value() + start_index + 2); + if (is_array) { + depth_compare_extra = depth_compare; + coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + + array_elem + ");"; } else { - if (is_array) { - const std::string index = regs.GetRegisterAsInteger(instr.gpr8); - const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2); - const std::string z = regs.GetRegisterAsFloat(instr.gpr8.Value() + 3); - coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index + - ");"; + if (depth_compare) { + coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + + depth_value + ");"; } else { - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string z = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2); coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; } } break; } default: - LOG_CRITICAL(HW_GPU, "Unhandled coordinates number {}", - static_cast<u32>(num_coordinates)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled coordinates number {}", + static_cast<u32>(num_coordinates)); // Fallback to interpreting as a 2D texture for now const std::string x = regs.GetRegisterAsFloat(instr.gpr8); @@ -2644,126 +2638,172 @@ private: coord = "vec2 coords = vec2(" + x + ", " + y + ");"; texture_type = Tegra::Shader::TextureType::Texture2D; } - // TODO: make sure coordinates are always indexed to gpr8 and gpr20 is always bias - // or lod. - std::string op_c; const std::string sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); // Add an extra scope and declare the texture coords inside to prevent // overwriting them in case they are used as outputs of the texs instruction. - shader.AddLine("{"); + shader.AddLine('{'); ++shader.scope; shader.AddLine(coord); std::string texture; switch (instr.tex.GetTextureProcessMode()) { case Tegra::Shader::TextureProcessMode::None: { - texture = "texture(" + sampler + ", coords)"; + if (!depth_compare_extra) { + texture = "texture(" + sampler + ", coords)"; + } else { + texture = "texture(" + sampler + ", coords, " + depth_value + ')'; + } break; } case Tegra::Shader::TextureProcessMode::LZ: { - texture = "textureLod(" + sampler + ", coords, 0.0)"; + if (!depth_compare_extra) { + texture = "textureLod(" + sampler + ", coords, 0.0)"; + } else { + texture = "texture(" + sampler + ", coords, " + depth_value + ')'; + } break; } case Tegra::Shader::TextureProcessMode::LB: case Tegra::Shader::TextureProcessMode::LBA: { - if (depth_compare) { - if (is_array) - op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 2); - else - op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1); + // TODO: Figure if A suffix changes the equation at all. + if (!depth_compare_extra) { + texture = "texture(" + sampler + ", coords, " + lod_value + ')'; } else { - op_c = regs.GetRegisterAsFloat(instr.gpr20); + texture = "texture(" + sampler + ", coords, " + depth_value + ')'; + LOG_WARNING(HW_GPU, + "OpenGL Limitation: can't set bias value along depth compare"); } - // TODO: Figure if A suffix changes the equation at all. - texture = "texture(" + sampler + ", coords, " + op_c + ')'; break; } case Tegra::Shader::TextureProcessMode::LL: case Tegra::Shader::TextureProcessMode::LLA: { - if (num_coordinates <= 2) { - op_c = regs.GetRegisterAsFloat(instr.gpr20); + // TODO: Figure if A suffix changes the equation at all. + if (!depth_compare_extra) { + texture = "textureLod(" + sampler + ", coords, " + lod_value + ')'; } else { - op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1); + texture = "texture(" + sampler + ", coords, " + depth_value + ')'; + LOG_WARNING(HW_GPU, + "OpenGL Limitation: can't set lod value along depth compare"); } - // TODO: Figure if A suffix changes the equation at all. - texture = "textureLod(" + sampler + ", coords, " + op_c + ')'; break; } default: { - texture = "texture(" + sampler + ", coords)"; - LOG_CRITICAL(HW_GPU, "Unhandled texture process mode {}", - static_cast<u32>(instr.tex.GetTextureProcessMode())); - UNREACHABLE(); + if (!depth_compare_extra) { + texture = "texture(" + sampler + ", coords)"; + } else { + texture = "texture(" + sampler + ", coords, " + depth_value + ')'; + } + UNIMPLEMENTED_MSG("Unhandled texture process mode {}", + static_cast<u32>(instr.tex.GetTextureProcessMode())); } } if (!depth_compare) { + shader.AddLine("vec4 texture_tmp = " + texture + ';'); std::size_t dest_elem{}; for (std::size_t elem = 0; elem < 4; ++elem) { if (!instr.tex.IsComponentEnabled(elem)) { // Skip disabled components continue; } - regs.SetRegisterToFloat(instr.gpr0, elem, texture, 1, 4, false, dest_elem); + regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false, + dest_elem); ++dest_elem; } } else { regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1, false); } --shader.scope; - shader.AddLine("}"); + shader.AddLine('}'); break; } case OpCode::Id::TEXS: { - std::string coord; Tegra::Shader::TextureType texture_type{instr.texs.GetTextureType()}; bool is_array{instr.texs.IsArrayTexture()}; - ASSERT_MSG(!instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); const bool depth_compare = instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); u32 num_coordinates = TextureCoordinates(texture_type); - if (depth_compare) - num_coordinates += 1; + const auto process_mode = instr.texs.GetTextureProcessMode(); + std::string lod_value; + std::string coord; + u32 lod_offset = 0; + if (process_mode == Tegra::Shader::TextureProcessMode::LL) { + if (num_coordinates > 2) { + lod_value = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1); + lod_offset = 2; + } else { + lod_value = regs.GetRegisterAsFloat(instr.gpr20); + lod_offset = 1; + } + } switch (num_coordinates) { + case 1: { + coord = "float coords = " + regs.GetRegisterAsFloat(instr.gpr8) + ';'; + break; + } case 2: { if (is_array) { - const std::string index = regs.GetRegisterAsInteger(instr.gpr8); - const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string y = regs.GetRegisterAsFloat(instr.gpr20); - coord = "vec3 coords = vec3(" + x + ", " + y + ", " + index + ");"; + if (depth_compare) { + const std::string index = regs.GetRegisterAsInteger(instr.gpr8); + const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + const std::string y = regs.GetRegisterAsFloat(instr.gpr20); + const std::string z = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1); + coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index + + ");"; + } else { + const std::string index = regs.GetRegisterAsInteger(instr.gpr8); + const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + const std::string y = regs.GetRegisterAsFloat(instr.gpr20); + coord = "vec3 coords = vec3(" + x + ", " + y + ", " + index + ");"; + } } else { - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const std::string y = regs.GetRegisterAsFloat(instr.gpr20); - coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + if (lod_offset != 0) { + if (depth_compare) { + const std::string x = regs.GetRegisterAsFloat(instr.gpr8); + const std::string y = + regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + const std::string z = + regs.GetRegisterAsFloat(instr.gpr20.Value() + lod_offset); + coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; + } else { + const std::string x = regs.GetRegisterAsFloat(instr.gpr8); + const std::string y = + regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + } + } else { + if (depth_compare) { + const std::string x = regs.GetRegisterAsFloat(instr.gpr8); + const std::string y = + regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + const std::string z = regs.GetRegisterAsFloat(instr.gpr20); + coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; + } else { + const std::string x = regs.GetRegisterAsFloat(instr.gpr8); + const std::string y = regs.GetRegisterAsFloat(instr.gpr20); + coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + } + } } break; } case 3: { - if (is_array) { - const std::string index = regs.GetRegisterAsInteger(instr.gpr8); - const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2); - const std::string z = regs.GetRegisterAsFloat(instr.gpr20); - coord = - "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index + ");"; - } else { - const std::string x = regs.GetRegisterAsFloat(instr.gpr8); - const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - const std::string z = regs.GetRegisterAsFloat(instr.gpr20); - coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; - } + const std::string x = regs.GetRegisterAsFloat(instr.gpr8); + const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + const std::string z = regs.GetRegisterAsFloat(instr.gpr20); + coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; break; } default: - LOG_CRITICAL(HW_GPU, "Unhandled coordinates number {}", - static_cast<u32>(num_coordinates)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled coordinates number {}", + static_cast<u32>(num_coordinates)); // Fallback to interpreting as a 2D texture for now const std::string x = regs.GetRegisterAsFloat(instr.gpr8); @@ -2775,7 +2815,7 @@ private: const std::string sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); std::string texture; - switch (instr.texs.GetTextureProcessMode()) { + switch (process_mode) { case Tegra::Shader::TextureProcessMode::None: { texture = "texture(" + sampler + ", coords)"; break; @@ -2789,15 +2829,13 @@ private: break; } case Tegra::Shader::TextureProcessMode::LL: { - const std::string op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1); - texture = "textureLod(" + sampler + ", coords, " + op_c + ')'; + texture = "textureLod(" + sampler + ", coords, " + lod_value + ')'; break; } default: { texture = "texture(" + sampler + ", coords)"; - LOG_CRITICAL(HW_GPU, "Unhandled texture process mode {}", - static_cast<u32>(instr.texs.GetTextureProcessMode())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled texture process mode {}", + static_cast<u32>(instr.texs.GetTextureProcessMode())); } } if (!depth_compare) { @@ -2805,47 +2843,48 @@ private: } else { WriteTexsInstruction(instr, coord, "vec4(" + texture + ')'); } + break; } case OpCode::Id::TLDS: { - std::string coord; const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()}; const bool is_array{instr.tlds.IsArrayTexture()}; ASSERT(texture_type == Tegra::Shader::TextureType::Texture2D); ASSERT(is_array == false); - ASSERT_MSG(!instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - ASSERT_MSG(!instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - ASSERT_MSG(!instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::MZ), - "MZ is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::MZ), + "MZ is not implemented"); - u32 op_c_offset = 0; + u32 extra_op_offset = 0; + + // Scope to avoid variable name overlaps. + shader.AddLine('{'); + ++shader.scope; + std::string coords; switch (texture_type) { case Tegra::Shader::TextureType::Texture1D: { const std::string x = regs.GetRegisterAsInteger(instr.gpr8); - coord = "int coords = " + x + ';'; + coords = "float coords = " + x + ';'; break; } case Tegra::Shader::TextureType::Texture2D: { - if (is_array) { - LOG_CRITICAL(HW_GPU, "Unhandled 2d array texture"); - UNREACHABLE(); - } else { - const std::string x = regs.GetRegisterAsInteger(instr.gpr8); - const std::string y = regs.GetRegisterAsInteger(instr.gpr20); - coord = "ivec2 coords = ivec2(" + x + ", " + y + ");"; - op_c_offset = 1; - } + UNIMPLEMENTED_IF_MSG(is_array, "Unhandled 2d array texture"); + + const std::string x = regs.GetRegisterAsInteger(instr.gpr8); + const std::string y = regs.GetRegisterAsInteger(instr.gpr20); + // shader.AddLine("ivec2 coords = ivec2(" + x + ", " + y + ");"); + coords = "ivec2 coords = ivec2(" + x + ", " + y + ");"; + extra_op_offset = 1; break; } default: - LOG_CRITICAL(HW_GPU, "Unhandled texture type {}", - static_cast<u32>(texture_type)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); } const std::string sampler = GetSampler(instr.sampler, texture_type, is_array, false); @@ -2856,19 +2895,22 @@ private: break; } case Tegra::Shader::TextureProcessMode::LL: { - const std::string op_c = - regs.GetRegisterAsInteger(instr.gpr20.Value() + op_c_offset); - texture = "texelFetch(" + sampler + ", coords, " + op_c + ')'; + shader.AddLine( + "float lod = " + + regs.GetRegisterAsInteger(instr.gpr20.Value() + extra_op_offset) + ';'); + texture = "texelFetch(" + sampler + ", coords, lod)"; break; } default: { texture = "texelFetch(" + sampler + ", coords, 0)"; - LOG_CRITICAL(HW_GPU, "Unhandled texture process mode {}", - static_cast<u32>(instr.tlds.GetTextureProcessMode())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled texture process mode {}", + static_cast<u32>(instr.tlds.GetTextureProcessMode())); } } - WriteTexsInstruction(instr, coord, texture); + WriteTexsInstruction(instr, coords, texture); + + --shader.scope; + shader.AddLine('}'); break; } case OpCode::Id::TLD4: { @@ -2876,14 +2918,14 @@ private: ASSERT(instr.tld4.array == 0); std::string coord; - ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); - ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::PTP), - "PTP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), + "NDV is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::PTP), + "PTP is not implemented"); const bool depth_compare = instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); auto texture_type = instr.tld4.texture_type.Value(); @@ -2891,61 +2933,71 @@ private: if (depth_compare) num_coordinates += 1; + // Add an extra scope and declare the texture coords inside to prevent + // overwriting them in case they are used as outputs of the texs instruction. + shader.AddLine('{'); + ++shader.scope; + switch (num_coordinates) { case 2: { const std::string x = regs.GetRegisterAsFloat(instr.gpr8); const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");"); break; } case 3: { const std::string x = regs.GetRegisterAsFloat(instr.gpr8); const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); const std::string z = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2); - coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"; + shader.AddLine("vec3 coords = vec3(" + x + ", " + y + ", " + z + ");"); break; } default: - LOG_CRITICAL(HW_GPU, "Unhandled coordinates number {}", - static_cast<u32>(num_coordinates)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled coordinates number {}", + static_cast<u32>(num_coordinates)); const std::string x = regs.GetRegisterAsFloat(instr.gpr8); const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - coord = "vec2 coords = vec2(" + x + ", " + y + ");"; + shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");"); texture_type = Tegra::Shader::TextureType::Texture2D; } const std::string sampler = GetSampler(instr.sampler, texture_type, false, depth_compare); - // Add an extra scope and declare the texture coords inside to prevent - // overwriting them in case they are used as outputs of the texs instruction. - shader.AddLine("{"); - ++shader.scope; - shader.AddLine(coord); + const std::string texture = "textureGather(" + sampler + ", coords, " + std::to_string(instr.tld4.component) + ')'; + if (!depth_compare) { + shader.AddLine("vec4 texture_tmp = " + texture + ';'); std::size_t dest_elem{}; for (std::size_t elem = 0; elem < 4; ++elem) { if (!instr.tex.IsComponentEnabled(elem)) { // Skip disabled components continue; } - regs.SetRegisterToFloat(instr.gpr0, elem, texture, 1, 4, false, dest_elem); + regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false, + dest_elem); ++dest_elem; } } else { regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1, false); } --shader.scope; - shader.AddLine("}"); + shader.AddLine('}'); break; } case OpCode::Id::TLD4S: { - ASSERT_MSG(!instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - ASSERT_MSG(!instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), - "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG( + instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG( + instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + + // Scope to avoid variable name overlaps. + shader.AddLine('{'); + ++shader.scope; + std::string coords; const bool depth_compare = instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC); @@ -2954,28 +3006,32 @@ private: // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. const std::string sampler = GetSampler( instr.sampler, Tegra::Shader::TextureType::Texture2D, false, depth_compare); - std::string coord; if (!depth_compare) { - coord = "vec2 coords = vec2(" + op_a + ", " + op_b + ");"; + coords = "vec2 coords = vec2(" + op_a + ", " + op_b + ");"; } else { // Note: TLD4S coordinate encoding works just like TEXS's - const std::string op_c = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); - coord = "vec3 coords = vec3(" + op_a + ", " + op_c + ", " + op_b + ");"; + const std::string op_y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); + coords = "vec3 coords = vec3(" + op_a + ", " + op_y + ", " + op_b + ");"; } const std::string texture = "textureGather(" + sampler + ", coords, " + std::to_string(instr.tld4s.component) + ')'; if (!depth_compare) { - WriteTexsInstruction(instr, coord, texture); + WriteTexsInstruction(instr, coords, texture); } else { - WriteTexsInstruction(instr, coord, "vec4(" + texture + ')'); + WriteTexsInstruction(instr, coords, "vec4(" + texture + ')'); } + + --shader.scope; + shader.AddLine('}'); break; } case OpCode::Id::TXQ: { - ASSERT_MSG(!instr.txq.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.txq.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); + ++shader.scope; + shader.AddLine('{'); // TODO: the new commits on the texture refactor, change the way samplers work. // Sadly, not all texture instructions specify the type of texture their sampler // uses. This must be fixed at a later instance. @@ -2983,23 +3039,30 @@ private: GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); switch (instr.txq.query_type) { case Tegra::Shader::TextureQueryType::Dimension: { - const std::string texture = "textureQueryLevels(" + sampler + ')'; - regs.SetRegisterToInteger(instr.gpr0, true, 0, texture, 1, 1); + const std::string texture = "textureSize(" + sampler + ", " + + regs.GetRegisterAsInteger(instr.gpr8) + ')'; + const std::string mip_level = "textureQueryLevels(" + sampler + ')'; + shader.AddLine("ivec2 sizes = " + texture + ';'); + regs.SetRegisterToInteger(instr.gpr0, true, 0, "sizes.x", 1, 1); + regs.SetRegisterToInteger(instr.gpr0.Value() + 1, true, 0, "sizes.y", 1, 1); + regs.SetRegisterToInteger(instr.gpr0.Value() + 2, true, 0, "0", 1, 1); + regs.SetRegisterToInteger(instr.gpr0.Value() + 3, true, 0, mip_level, 1, 1); break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled texture query type: {}", - static_cast<u32>(instr.txq.query_type.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled texture query type: {}", + static_cast<u32>(instr.txq.query_type.Value())); } } + --shader.scope; + shader.AddLine('}'); break; } case OpCode::Id::TMML: { - ASSERT_MSG(!instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), - "NODEP is not implemented"); - ASSERT_MSG(!instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), - "NDV is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP), + "NODEP is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), + "NDV is not implemented"); const std::string x = regs.GetRegisterAsFloat(instr.gpr8); const bool is_array = instr.tmml.array != 0; @@ -3021,9 +3084,7 @@ private: break; } default: - LOG_CRITICAL(HW_GPU, "Unhandled texture type {}", - static_cast<u32>(texture_type)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); // Fallback to interpreting as a 2D texture for now const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1); @@ -3046,8 +3107,7 @@ private: break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled memory instruction: {}", opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName()); } } break; @@ -3133,7 +3193,7 @@ private: break; } case OpCode::Type::HalfSetPredicate: { - ASSERT_MSG(instr.hsetp2.ftz == 0, "Unimplemented"); + UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0); const std::string op_a = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hsetp2.type_a, @@ -3178,6 +3238,9 @@ private: break; } case OpCode::Type::PredicateSetRegister: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in PSET is not implemented"); + const std::string op_a = GetPredicateCondition(instr.pset.pred12, instr.pset.neg_pred12 != 0); const std::string op_b = @@ -3198,12 +3261,6 @@ private: const std::string value = '(' + result + ") ? 1.0 : 0.0"; regs.SetRegisterToFloat(instr.gpr0, 0, value, 1, 1); } - - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "PSET Generates an unhandled Control Code"); - UNREACHABLE(); - } - break; } case OpCode::Type::PredicateSetPredicate: { @@ -3241,21 +3298,19 @@ private: const std::string pred = GetPredicateCondition(instr.csetp.pred39, instr.csetp.neg_pred39 != 0); const std::string combiner = GetPredicateCombiner(instr.csetp.op); - const std::string control_code = regs.GetControlCode(instr.csetp.cc); + const std::string condition_code = regs.GetConditionCode(instr.csetp.cc); if (instr.csetp.pred3 != static_cast<u64>(Pred::UnusedIndex)) { SetPredicate(instr.csetp.pred3, - '(' + control_code + ") " + combiner + " (" + pred + ')'); + '(' + condition_code + ") " + combiner + " (" + pred + ')'); } if (instr.csetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { SetPredicate(instr.csetp.pred0, - "!(" + control_code + ") " + combiner + " (" + pred + ')'); + "!(" + condition_code + ") " + combiner + " (" + pred + ')'); } break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled predicate instruction: {}", - opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled predicate instruction: {}", opcode->get().GetName()); } } break; @@ -3363,7 +3418,7 @@ private: break; } case OpCode::Type::HalfSet: { - ASSERT_MSG(instr.hset2.ftz == 0, "Unimplemented"); + UNIMPLEMENTED_IF(instr.hset2.ftz != 0); const std::string op_a = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hset2.type_a, @@ -3407,15 +3462,17 @@ private: break; } case OpCode::Type::Xmad: { - ASSERT_MSG(!instr.xmad.sign_a, "Unimplemented"); - ASSERT_MSG(!instr.xmad.sign_b, "Unimplemented"); + UNIMPLEMENTED_IF(instr.xmad.sign_a); + UNIMPLEMENTED_IF(instr.xmad.sign_b); + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in XMAD is not implemented"); std::string op_a{regs.GetRegisterAsInteger(instr.gpr8, 0, instr.xmad.sign_a)}; std::string op_b; std::string op_c; // TODO(bunnei): Needs to be fixed once op_a or op_b is signed - ASSERT_MSG(instr.xmad.sign_a == instr.xmad.sign_b, "Unimplemented"); + UNIMPLEMENTED_IF(instr.xmad.sign_a != instr.xmad.sign_b); const bool is_signed{instr.xmad.sign_a == 1}; bool is_merge{}; @@ -3448,8 +3505,7 @@ private: break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled XMAD instruction: {}", opcode->get().GetName()); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled XMAD instruction: {}", opcode->get().GetName()); } } @@ -3485,9 +3541,8 @@ private: op_c = "((" + op_c + ") + (" + src2 + "<< 16))"; break; default: { - LOG_CRITICAL(HW_GPU, "Unhandled XMAD mode: {}", - static_cast<u32>(instr.xmad.mode.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled XMAD mode: {}", + static_cast<u32>(instr.xmad.mode.Value())); } } @@ -3497,25 +3552,19 @@ private: } regs.SetRegisterToInteger(instr.gpr0, is_signed, 0, sum, 1, 1); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "XMAD Generates an unhandled Control Code"); - UNREACHABLE(); - } break; } default: { switch (opcode->get().GetId()) { case OpCode::Id::EXIT: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, + "EXIT condition code used: {}", static_cast<u32>(cc)); + if (stage == Maxwell3D::Regs::ShaderStage::Fragment) { EmitFragmentOutputsWrite(); } - const Tegra::Shader::ControlCode cc = instr.flow_control_code; - if (cc != Tegra::Shader::ControlCode::T) { - LOG_CRITICAL(HW_GPU, "EXIT Control Code used: {}", static_cast<u32>(cc)); - UNREACHABLE(); - } - switch (instr.flow.cond) { case Tegra::Shader::FlowCondition::Always: shader.AddLine("return true;"); @@ -3530,26 +3579,24 @@ private: case Tegra::Shader::FlowCondition::Fcsm_Tr: // TODO(bunnei): What is this used for? If we assume this conditon is not // satisifed, dual vertex shaders in Farming Simulator make more sense - LOG_CRITICAL(HW_GPU, "Skipping unknown FlowCondition::Fcsm_Tr"); + UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr"); break; default: - LOG_CRITICAL(HW_GPU, "Unhandled flow condition: {}", - static_cast<u32>(instr.flow.cond.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled flow condition: {}", + static_cast<u32>(instr.flow.cond.Value())); } break; } case OpCode::Id::KIL: { - ASSERT(instr.flow.cond == Tegra::Shader::FlowCondition::Always); + UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always); + + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, + "KIL condition code used: {}", static_cast<u32>(cc)); // Enclose "discard" in a conditional, so that GLSL compilation does not complain // about unexecuted instructions that may follow this. - const Tegra::Shader::ControlCode cc = instr.flow_control_code; - if (cc != Tegra::Shader::ControlCode::T) { - LOG_CRITICAL(HW_GPU, "KIL Control Code used: {}", static_cast<u32>(cc)); - UNREACHABLE(); - } shader.AddLine("if (true) {"); ++shader.scope; shader.AddLine("discard;"); @@ -3559,7 +3606,8 @@ private: break; } case OpCode::Id::OUT_R: { - ASSERT(instr.gpr20.Value() == Register::ZeroIndex); + UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex, + "Stream buffer is not supported"); ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry, "OUT is expected to be used in a geometry shader."); @@ -3586,18 +3634,17 @@ private: break; } default: { - LOG_CRITICAL(HW_GPU, "Unhandled system move: {}", - static_cast<u32>(instr.sys20.Value())); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unhandled system move: {}", + static_cast<u32>(instr.sys20.Value())); } } break; } case OpCode::Id::ISBERD: { - ASSERT(instr.isberd.o == 0); - ASSERT(instr.isberd.skew == 0); - ASSERT(instr.isberd.shift == Tegra::Shader::IsberdShift::None); - ASSERT(instr.isberd.mode == Tegra::Shader::IsberdMode::None); + UNIMPLEMENTED_IF(instr.isberd.o != 0); + UNIMPLEMENTED_IF(instr.isberd.skew != 0); + UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None); + UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None); ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry, "ISBERD is expected to be used in a geometry shader."); LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete"); @@ -3605,13 +3652,13 @@ private: break; } case OpCode::Id::BRA: { - ASSERT_MSG(instr.bra.constant_buffer == 0, - "BRA with constant buffers are not implemented"); - const Tegra::Shader::ControlCode cc = instr.flow_control_code; - if (cc != Tegra::Shader::ControlCode::T) { - LOG_CRITICAL(HW_GPU, "BRA Control Code used: {}", static_cast<u32>(cc)); - UNREACHABLE(); - } + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "BRA with constant buffers are not implemented"); + + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, + "BRA condition code used: {}", static_cast<u32>(cc)); + const u32 target = offset + instr.bra.GetBranchTarget(); shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }"); break; @@ -3634,7 +3681,8 @@ private: // The SSY opcode tells the GPU where to re-converge divergent execution paths, it // sets the target of the jump that the SYNC instruction will make. The SSY opcode // has a similar structure to the BRA opcode. - ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer flow is not supported"); + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer flow is not supported"); const u32 target = offset + instr.bra.GetBranchTarget(); EmitPushToFlowStack(target); @@ -3644,29 +3692,28 @@ private: // PBK pushes to a stack the address where BRK will jump to. This shares stack with // SSY but using SYNC on a PBK address will kill the shader execution. We don't // emulate this because it's very unlikely a driver will emit such invalid shader. - ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer PBK is not supported"); + UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, + "Constant buffer PBK is not supported"); const u32 target = offset + instr.bra.GetBranchTarget(); EmitPushToFlowStack(target); break; } case OpCode::Id::SYNC: { + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, + "SYNC condition code used: {}", static_cast<u32>(cc)); + // The SYNC opcode jumps to the address previously set by the SSY opcode - const Tegra::Shader::ControlCode cc = instr.flow_control_code; - if (cc != Tegra::Shader::ControlCode::T) { - LOG_CRITICAL(HW_GPU, "SYNC Control Code used: {}", static_cast<u32>(cc)); - UNREACHABLE(); - } EmitPopFromFlowStack(); break; } case OpCode::Id::BRK: { // The BRK opcode jumps to the address previously set by the PBK opcode - const Tegra::Shader::ControlCode cc = instr.flow_control_code; - if (cc != Tegra::Shader::ControlCode::T) { - LOG_CRITICAL(HW_GPU, "BRK Control Code used: {}", static_cast<u32>(cc)); - UNREACHABLE(); - } + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, + "BRK condition code used: {}", static_cast<u32>(cc)); + EmitPopFromFlowStack(); break; } @@ -3677,6 +3724,9 @@ private: break; } case OpCode::Id::VMAD: { + UNIMPLEMENTED_IF_MSG(instr.generates_cc, + "Condition codes generation in VMAD is not implemented"); + const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1; const std::string op_a = GetVideoOperandA(instr); const std::string op_b = GetVideoOperandB(instr); @@ -3696,11 +3746,6 @@ private: regs.SetRegisterToInteger(instr.gpr0, result_signed, 1, result, 1, 1, instr.vmad.saturate == 1, 0, Register::Size::Word, instr.vmad.cc); - if (instr.generates_cc) { - LOG_CRITICAL(HW_GPU, "VMAD Generates an unhandled Control Code"); - UNREACHABLE(); - } - break; } case OpCode::Id::VSETP: { @@ -3727,10 +3772,7 @@ private: } break; } - default: { - LOG_CRITICAL(HW_GPU, "Unhandled instruction: {}", opcode->get().GetName()); - UNREACHABLE(); - } + default: { UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName()); } } break; @@ -3855,6 +3897,7 @@ private: Maxwell3D::Regs::ShaderStage stage; const std::string& suffix; u64 local_memory_size; + std::size_t shader_length; ShaderWriter shader; ShaderWriter declarations; @@ -3873,9 +3916,10 @@ std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u Maxwell3D::Regs::ShaderStage stage, const std::string& suffix) { try { - const auto subroutines = - ControlFlowAnalyzer(program_code, main_offset, suffix).GetSubroutines(); - GLSLGenerator generator(subroutines, program_code, main_offset, stage, suffix); + ControlFlowAnalyzer analyzer(program_code, main_offset, suffix); + const auto subroutines = analyzer.GetSubroutines(); + GLSLGenerator generator(subroutines, program_code, main_offset, stage, suffix, + analyzer.GetShaderLength()); return ProgramResult{generator.GetShaderCode(), generator.GetEntries()}; } catch (const DecompileFail& exception) { LOG_ERROR(HW_GPU, "Shader decompilation failed: {}", exception.what()); diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h index 520b9d4e3..b425d98ae 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.h +++ b/src/video_core/renderer_opengl/gl_shader_gen.h @@ -163,6 +163,7 @@ private: struct ShaderEntries { std::vector<ConstBufferEntry> const_buffer_entries; std::vector<SamplerEntry> texture_samplers; + std::size_t shader_length; }; using ProgramResult = std::pair<std::string, ShaderEntries>; diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index d9910c6e8..934f4db78 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -233,6 +233,28 @@ void OpenGLState::ApplyStencilTest() const { config_stencil(GL_BACK, stencil.back, cur_state.stencil.back); } } +// Viewport does not affects glClearBuffer so emulate viewport using scissor test +void OpenGLState::EmulateViewportWithScissor() { + auto& current = viewports[0]; + if (current.scissor.enabled) { + const GLint left = std::max(current.x, current.scissor.x); + const GLint right = + std::max(current.x + current.width, current.scissor.x + current.scissor.width); + const GLint bottom = std::max(current.y, current.scissor.y); + const GLint top = + std::max(current.y + current.height, current.scissor.y + current.scissor.height); + current.scissor.x = std::max(left, 0); + current.scissor.y = std::max(bottom, 0); + current.scissor.width = std::max(right - left, 0); + current.scissor.height = std::max(top - bottom, 0); + } else { + current.scissor.enabled = true; + current.scissor.x = current.x; + current.scissor.y = current.y; + current.scissor.width = current.width; + current.scissor.height = current.height; + } +} void OpenGLState::ApplyViewport() const { if (GLAD_GL_ARB_viewport_array && geometry_shaders.enabled) { @@ -242,7 +264,9 @@ void OpenGLState::ApplyViewport() const { const auto& updated = viewports[i]; if (updated.x != current.x || updated.y != current.y || updated.width != current.width || updated.height != current.height) { - glViewportIndexedf(i, updated.x, updated.y, updated.width, updated.height); + glViewportIndexedf( + i, static_cast<GLfloat>(updated.x), static_cast<GLfloat>(updated.y), + static_cast<GLfloat>(updated.width), static_cast<GLfloat>(updated.height)); } if (updated.depth_range_near != current.depth_range_near || updated.depth_range_far != current.depth_range_far) { @@ -270,8 +294,7 @@ void OpenGLState::ApplyViewport() const { const auto& updated = viewports[0]; if (updated.x != current.x || updated.y != current.y || updated.width != current.width || updated.height != current.height) { - glViewport(static_cast<GLint>(updated.x), static_cast<GLint>(updated.y), - static_cast<GLsizei>(updated.width), static_cast<GLsizei>(updated.height)); + glViewport(updated.x, updated.y, updated.width, updated.height); } if (updated.depth_range_near != current.depth_range_near || updated.depth_range_far != current.depth_range_far) { @@ -339,14 +362,14 @@ void OpenGLState::ApplyTargetBlending(std::size_t target, bool force) const { if (blend_changed || updated.src_rgb_func != current.src_rgb_func || updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) { - glBlendFuncSeparateiARB(static_cast<GLuint>(target), updated.src_rgb_func, - updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func); + glBlendFuncSeparatei(static_cast<GLuint>(target), updated.src_rgb_func, + updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func); } if (blend_changed || updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) { - glBlendEquationSeparateiARB(static_cast<GLuint>(target), updated.rgb_equation, - updated.a_equation); + glBlendEquationSeparatei(static_cast<GLuint>(target), updated.rgb_equation, + updated.a_equation); } } diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index bdc743b0f..032fc43f0 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h @@ -156,10 +156,10 @@ public: } draw; struct viewport { - GLfloat x; - GLfloat y; - GLfloat width; - GLfloat height; + GLint x; + GLint y; + GLint width; + GLint height; GLfloat depth_range_near; // GL_DEPTH_RANGE GLfloat depth_range_far; // GL_DEPTH_RANGE struct { @@ -206,6 +206,7 @@ public: OpenGLState& ResetBuffer(GLuint handle); OpenGLState& ResetVertexArray(GLuint handle); OpenGLState& ResetFramebuffer(GLuint handle); + void EmulateViewportWithScissor(); private: static OpenGLState cur_state; diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 065b3929c..a8833c06e 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -218,14 +218,19 @@ inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) { inline GLenum BlendEquation(Maxwell::Blend::Equation equation) { switch (equation) { case Maxwell::Blend::Equation::Add: + case Maxwell::Blend::Equation::AddGL: return GL_FUNC_ADD; case Maxwell::Blend::Equation::Subtract: + case Maxwell::Blend::Equation::SubtractGL: return GL_FUNC_SUBTRACT; case Maxwell::Blend::Equation::ReverseSubtract: + case Maxwell::Blend::Equation::ReverseSubtractGL: return GL_FUNC_REVERSE_SUBTRACT; case Maxwell::Blend::Equation::Min: + case Maxwell::Blend::Equation::MinGL: return GL_MIN; case Maxwell::Blend::Equation::Max: + case Maxwell::Blend::Equation::MaxGL: return GL_MAX; } LOG_ERROR(Render_OpenGL, "Unimplemented blend equation={}", static_cast<u32>(equation)); diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index ea38da932..4fd0d66c5 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -19,9 +19,9 @@ #include "core/settings.h" #include "core/telemetry_session.h" #include "core/tracer/recorder.h" +#include "video_core/morton.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/renderer_opengl.h" -#include "video_core/utils.h" namespace OpenGL { @@ -304,6 +304,12 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, gl_framebuffer_data.resize(texture.width * texture.height * 4); break; default: + internal_format = GL_RGBA; + texture.gl_format = GL_RGBA; + texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; + gl_framebuffer_data.resize(texture.width * texture.height * 4); + LOG_CRITICAL(Render_OpenGL, "Unknown framebuffer pixel format: {}", + static_cast<u32>(framebuffer.pixel_format)); UNREACHABLE(); } @@ -484,7 +490,7 @@ bool RendererOpenGL::Init() { Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model); Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version); - if (!GLAD_GL_VERSION_3_3) { + if (!GLAD_GL_VERSION_4_3) { return false; } diff --git a/src/video_core/utils.h b/src/video_core/utils.h deleted file mode 100644 index e0a14d48f..000000000 --- a/src/video_core/utils.h +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include "common/common_types.h" - -namespace VideoCore { - -// 8x8 Z-Order coordinate from 2D coordinates -static inline u32 MortonInterleave(u32 x, u32 y) { - static const u32 xlut[] = {0x00, 0x01, 0x04, 0x05, 0x10, 0x11, 0x14, 0x15}; - static const u32 ylut[] = {0x00, 0x02, 0x08, 0x0a, 0x20, 0x22, 0x28, 0x2a}; - return xlut[x % 8] + ylut[y % 8]; -} - -/** - * Calculates the offset of the position of the pixel in Morton order - */ -static inline u32 GetMortonOffset(u32 x, u32 y, u32 bytes_per_pixel) { - // Images are split into 8x8 tiles. Each tile is composed of four 4x4 subtiles each - // of which is composed of four 2x2 subtiles each of which is composed of four texels. - // Each structure is embedded into the next-bigger one in a diagonal pattern, e.g. - // texels are laid out in a 2x2 subtile like this: - // 2 3 - // 0 1 - // - // The full 8x8 tile has the texels arranged like this: - // - // 42 43 46 47 58 59 62 63 - // 40 41 44 45 56 57 60 61 - // 34 35 38 39 50 51 54 55 - // 32 33 36 37 48 49 52 53 - // 10 11 14 15 26 27 30 31 - // 08 09 12 13 24 25 28 29 - // 02 03 06 07 18 19 22 23 - // 00 01 04 05 16 17 20 21 - // - // This pattern is what's called Z-order curve, or Morton order. - - const unsigned int block_height = 8; - const unsigned int coarse_x = x & ~7; - - u32 i = VideoCore::MortonInterleave(x, y); - - const unsigned int offset = coarse_x * block_height; - - return (i + offset) * bytes_per_pixel; -} - -static inline u32 MortonInterleave128(u32 x, u32 y) { - // 128x128 Z-Order coordinate from 2D coordinates - static constexpr u32 xlut[] = { - 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, - 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, - 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, - 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, - 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, - 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, - 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, - 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, - 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, - 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, - 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, - 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, - 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, - 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, - 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, - 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, - 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, - 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, - 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, - 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, - 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, - 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, - 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, - 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, - 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, - 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, - 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, - 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, - 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, - 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, - 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, - 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, - 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, - 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, - 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, - }; - static constexpr u32 ylut[] = { - 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, - 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, - 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, - 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, - 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, - 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, - 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, - 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, - 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, - 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, - 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, - 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, - 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, - 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, - 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, - 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, - 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, - 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, - 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, - 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, - 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, - 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, - 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, - 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, - 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, - 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, - 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, - 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, - 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, - 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, - 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, - 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, - 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, - 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, - 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, - }; - return xlut[x % 128] + ylut[y % 128]; -} - -static inline u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) { - // Calculates the offset of the position of the pixel in Morton order - // Framebuffer images are split into 128x128 tiles. - - const unsigned int block_height = 128; - const unsigned int coarse_x = x & ~127; - - u32 i = MortonInterleave128(x, y); - - const unsigned int offset = coarse_x * block_height; - - return (i + offset) * bytes_per_pixel; -} - -static inline void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, - u32 gl_bytes_per_pixel, u8* morton_data, u8* gl_data, - bool morton_to_gl) { - u8* data_ptrs[2]; - for (unsigned y = 0; y < height; ++y) { - for (unsigned x = 0; x < width; ++x) { - const u32 coarse_y = y & ~127; - u32 morton_offset = - GetMortonOffset128(x, y, bytes_per_pixel) + coarse_y * width * bytes_per_pixel; - u32 gl_pixel_index = (x + y * width) * gl_bytes_per_pixel; - - data_ptrs[morton_to_gl] = morton_data + morton_offset; - data_ptrs[!morton_to_gl] = &gl_data[gl_pixel_index]; - - memcpy(data_ptrs[0], data_ptrs[1], bytes_per_pixel); - } - } -} - -} // namespace VideoCore |