diff options
Diffstat (limited to 'src/video_core')
39 files changed, 624 insertions, 400 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 8ede4ba9b..ff53282c9 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -124,6 +124,8 @@ add_library(video_core STATIC shader/decode.cpp shader/expr.cpp shader/expr.h + shader/memory_util.cpp + shader/memory_util.h shader/node_helper.cpp shader/node_helper.h shader/node.h diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 324dafdcd..16311f05e 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -71,16 +71,22 @@ bool DmaPusher::Step() { gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), command_list_header.size * sizeof(u32)); - for (const CommandHeader& command_header : command_headers) { - - // now, see if we're in the middle of a command - if (dma_state.length_pending) { - // Second word of long non-inc methods command - method count - dma_state.length_pending = 0; - dma_state.method_count = command_header.method_count_; - } else if (dma_state.method_count) { + for (std::size_t index = 0; index < command_headers.size();) { + const CommandHeader& command_header = command_headers[index]; + + if (dma_state.method_count) { // Data word of methods command - CallMethod(command_header.argument); + if (dma_state.non_incrementing) { + const u32 max_write = static_cast<u32>( + std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) - + index); + CallMultiMethod(&command_header.argument, max_write); + dma_state.method_count -= max_write; + index += max_write; + continue; + } else { + CallMethod(command_header.argument); + } if (!dma_state.non_incrementing) { dma_state.method++; @@ -120,6 +126,7 @@ bool DmaPusher::Step() { break; } } + index++; } if (!non_main) { @@ -140,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const { gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count}); } +void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { + gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, + dma_state.method_count); +} + } // namespace Tegra diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index d6188614a..6cef71306 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -75,6 +75,7 @@ private: void SetState(const CommandHeader& command_header); void CallMethod(u32 argument) const; + void CallMultiMethod(const u32* base_start, u32 num_methods) const; std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp index bace6affb..8a47614d2 100644 --- a/src/video_core/engines/fermi_2d.cpp +++ b/src/video_core/engines/fermi_2d.cpp @@ -28,6 +28,12 @@ void Fermi2D::CallMethod(const GPU::MethodCall& method_call) { } } +void Fermi2D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) { + for (std::size_t i = 0; i < amount; i++) { + CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)}); + } +} + static std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) { const u32 line_a = src_2 - src_1; const u32 line_b = dst_2 - dst_1; diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h index dba342c70..939a5966d 100644 --- a/src/video_core/engines/fermi_2d.h +++ b/src/video_core/engines/fermi_2d.h @@ -39,6 +39,9 @@ public: /// Write the value to the register identified by method. void CallMethod(const GPU::MethodCall& method_call); + /// Write multiple values to the register identified by method. + void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending); + enum class Origin : u32 { Center = 0, Corner = 1, diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 368c75a66..00a12175f 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -51,6 +51,13 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) { } } +void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, + u32 methods_pending) { + for (std::size_t i = 0; i < amount; i++) { + CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)}); + } +} + Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const { const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value(); ASSERT(cbuf_mask[regs.tex_cb_index]); diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h index eeb79c56f..fe55fdfd0 100644 --- a/src/video_core/engines/kepler_compute.h +++ b/src/video_core/engines/kepler_compute.h @@ -202,6 +202,9 @@ public: /// Write the value to the register identified by method. void CallMethod(const GPU::MethodCall& method_call); + /// Write multiple values to the register identified by method. + void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending); + Texture::FullTextureInfo GetTexture(std::size_t offset) const; /// Given a texture handle, returns the TSC and TIC entries. diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index 597872e43..586ff15dc 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -41,4 +41,11 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) { } } +void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, + u32 methods_pending) { + for (std::size_t i = 0; i < amount; i++) { + CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)}); + } +} + } // namespace Tegra::Engines diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h index 396fb6e86..bb26fb030 100644 --- a/src/video_core/engines/kepler_memory.h +++ b/src/video_core/engines/kepler_memory.h @@ -40,6 +40,9 @@ public: /// Write the value to the register identified by method. void CallMethod(const GPU::MethodCall& method_call); + /// Write multiple values to the register identified by method. + void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending); + struct Regs { static constexpr size_t NUM_REGS = 0x7F; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 2824ed707..39e3b66a2 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -280,6 +280,58 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { } } +void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, + u32 methods_pending) { + // Methods after 0xE00 are special, they're actually triggers for some microcode that was + // uploaded to the GPU during initialization. + if (method >= MacroRegistersStart) { + // We're trying to execute a macro + if (executing_macro == 0) { + // A macro call must begin by writing the macro method's register, not its argument. + ASSERT_MSG((method % 2) == 0, + "Can't start macro execution by writing to the ARGS register"); + executing_macro = method; + } + + for (std::size_t i = 0; i < amount; i++) { + macro_params.push_back(base_start[i]); + } + + // Call the macro when there are no more parameters in the command buffer + if (amount == methods_pending) { + CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); + macro_params.clear(); + } + return; + } + switch (method) { + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]): + case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): { + ProcessCBMultiData(method, base_start, amount); + break; + } + default: { + for (std::size_t i = 0; i < amount; i++) { + CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)}); + } + } + } +} + void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) { if (mme_draw.current_mode == MMEDrawMode::Undefined) { if (mme_draw.gl_begin_consume) { @@ -570,6 +622,28 @@ void Maxwell3D::StartCBData(u32 method) { ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]); } +void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) { + if (cb_data_state.current != method) { + if (cb_data_state.current != null_cb_data) { + FinishCBData(); + } + constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]); + cb_data_state.start_pos = regs.const_buffer.cb_pos; + cb_data_state.id = method - first_cb_data; + cb_data_state.current = method; + cb_data_state.counter = 0; + } + const std::size_t id = cb_data_state.id; + const std::size_t size = amount; + std::size_t i = 0; + for (; i < size; i++) { + cb_data_state.buffer[id][cb_data_state.counter] = start_base[i]; + cb_data_state.counter++; + } + // Increment the current buffer position. + regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount; +} + void Maxwell3D::FinishCBData() { // Write the input value to the current const buffer at the current position. const GPUVAddr buffer_address = regs.const_buffer.BufferAddress(); diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 59d5752d2..3dfba8197 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -1259,7 +1259,8 @@ public: GPUVAddr LimitAddress() const { return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) | - limit_low); + limit_low) + + 1; } } vertex_array_limit[NumVertexArrays]; @@ -1358,6 +1359,9 @@ public: /// Write the value to the register identified by method. void CallMethod(const GPU::MethodCall& method_call); + /// Write multiple values to the register identified by method. + void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending); + /// Write the value to the register identified by method. void CallMethodFromMME(const GPU::MethodCall& method_call); @@ -1511,6 +1515,7 @@ private: /// Handles a write to the CB_DATA[i] register. void StartCBData(u32 method); void ProcessCBData(u32 value); + void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount); void FinishCBData(); /// Handles a write to the CB_BIND register. diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 3bfed6ab8..6630005b0 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -36,6 +36,13 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) { #undef MAXWELLDMA_REG_INDEX } +void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount, + u32 methods_pending) { + for (std::size_t i = 0; i < amount; i++) { + CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)}); + } +} + void MaxwellDMA::HandleCopy() { LOG_TRACE(HW_GPU, "Requested a DMA copy"); diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 4f40d1d1f..c43ed8194 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -35,6 +35,9 @@ public: /// Write the value to the register identified by method. void CallMethod(const GPU::MethodCall& method_call); + /// Write multiple values to the register identified by method. + void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending); + struct Regs { static constexpr std::size_t NUM_REGS = 0x1D6; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 3b7572d61..b87fd873d 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -9,6 +9,7 @@ #include "core/core_timing_util.h" #include "core/frontend/emu_window.h" #include "core/memory.h" +#include "core/settings.h" #include "video_core/engines/fermi_2d.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/kepler_memory.h" @@ -154,7 +155,10 @@ u64 GPU::GetTicks() const { constexpr u64 gpu_ticks_den = 625; const u64 cpu_ticks = system.CoreTiming().GetTicks(); - const u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count(); + u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count(); + if (Settings::values.use_fast_gpu_time) { + nanoseconds /= 256; + } const u64 nanoseconds_num = nanoseconds / gpu_ticks_den; const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den; return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den; @@ -209,16 +213,32 @@ void GPU::CallMethod(const MethodCall& method_call) { ASSERT(method_call.subchannel < bound_engines.size()); - if (ExecuteMethodOnEngine(method_call)) { + if (ExecuteMethodOnEngine(method_call.method)) { CallEngineMethod(method_call); } else { CallPullerMethod(method_call); } } -bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) { - const auto method = static_cast<BufferMethods>(method_call.method); - return method >= BufferMethods::NonPullerMethods; +void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending) { + LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); + + ASSERT(subchannel < bound_engines.size()); + + if (ExecuteMethodOnEngine(method)) { + CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); + } else { + for (std::size_t i = 0; i < amount; i++) { + CallPullerMethod( + {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)}); + } + } +} + +bool GPU::ExecuteMethodOnEngine(u32 method) { + const auto buffer_method = static_cast<BufferMethods>(method); + return buffer_method >= BufferMethods::NonPullerMethods; } void GPU::CallPullerMethod(const MethodCall& method_call) { @@ -298,6 +318,31 @@ void GPU::CallEngineMethod(const MethodCall& method_call) { } } +void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending) { + const EngineID engine = bound_engines[subchannel]; + + switch (engine) { + case EngineID::FERMI_TWOD_A: + fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::MAXWELL_B: + maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::KEPLER_COMPUTE_B: + kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::MAXWELL_DMA_COPY_A: + maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending); + break; + case EngineID::KEPLER_INLINE_TO_MEMORY_B: + kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending); + break; + default: + UNIMPLEMENTED_MSG("Unimplemented engine"); + } +} + void GPU::ProcessBindMethod(const MethodCall& method_call) { // Bind the current subchannel to the desired engine id. LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 5e3eb94e9..dd51c95b7 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -155,6 +155,10 @@ public: /// Calls a GPU method. void CallMethod(const MethodCall& method_call); + /// Calls a GPU multivalue method. + void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending); + /// Flush all current written commands into the host GPU for execution. void FlushCommands(); /// Synchronizes CPU writes with Host GPU memory. @@ -309,8 +313,12 @@ private: /// Calls a GPU engine method. void CallEngineMethod(const MethodCall& method_call); + /// Calls a GPU engine multivalue method. + void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, + u32 methods_pending); + /// Determines where the method should be executed. - bool ExecuteMethodOnEngine(const MethodCall& method_call); + bool ExecuteMethodOnEngine(u32 method); protected: std::unique_ptr<Tegra::DmaPusher> dma_pusher; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 4c16c89d2..69a74449c 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -186,8 +186,12 @@ void RasterizerOpenGL::SetupVertexBuffer() { const GPUVAddr start = vertex_array.StartAddress(); const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); - ASSERT(end > start); - const u64 size = end - start + 1; + ASSERT(end >= start); + const u64 size = end - start; + if (size == 0) { + glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride); + continue; + } const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size); glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset, vertex_array.stride); @@ -311,8 +315,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { const GPUVAddr start = regs.vertex_array[index].StartAddress(); const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); - ASSERT(end > start); - size += end - start + 1; + size += end - start; + ASSERT(end >= start); } return size; @@ -344,7 +348,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() { texture_cache.GuardRenderTargets(true); - View depth_surface = texture_cache.GetDepthBufferSurface(); + View depth_surface = texture_cache.GetDepthBufferSurface(true); const auto& regs = gpu.regs; UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); @@ -353,7 +357,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() { FramebufferCacheKey key; const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); for (std::size_t index = 0; index < colors_count; ++index) { - View color_surface{texture_cache.GetColorBufferSurface(index)}; + View color_surface{texture_cache.GetColorBufferSurface(index, true)}; if (!color_surface) { continue; } @@ -377,28 +381,52 @@ void RasterizerOpenGL::ConfigureFramebuffers() { glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); } -void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, - bool using_stencil_fb) { +void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) { auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; texture_cache.GuardRenderTargets(true); View color_surface; - if (using_color_fb) { + + if (using_color) { + // Determine if we have to preserve the contents. + // First we have to make sure all clear masks are enabled. + bool preserve_contents = !regs.clear_buffers.R || !regs.clear_buffers.G || + !regs.clear_buffers.B || !regs.clear_buffers.A; const std::size_t index = regs.clear_buffers.RT; - color_surface = texture_cache.GetColorBufferSurface(index); + if (regs.clear_flags.scissor) { + // Then we have to confirm scissor testing clears the whole image. + const auto& scissor = regs.scissor_test[0]; + preserve_contents |= scissor.min_x > 0; + preserve_contents |= scissor.min_y > 0; + preserve_contents |= scissor.max_x < regs.rt[index].width; + preserve_contents |= scissor.max_y < regs.rt[index].height; + } + + color_surface = texture_cache.GetColorBufferSurface(index, preserve_contents); texture_cache.MarkColorBufferInUse(index); } + View depth_surface; - if (using_depth_fb || using_stencil_fb) { - depth_surface = texture_cache.GetDepthBufferSurface(); + if (using_depth_stencil) { + bool preserve_contents = false; + if (regs.clear_flags.scissor) { + // For depth stencil clears we only have to confirm scissor test covers the whole image. + const auto& scissor = regs.scissor_test[0]; + preserve_contents |= scissor.min_x > 0; + preserve_contents |= scissor.min_y > 0; + preserve_contents |= scissor.max_x < regs.zeta_width; + preserve_contents |= scissor.max_y < regs.zeta_height; + } + + depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents); texture_cache.MarkDepthBufferInUse(); } texture_cache.GuardRenderTargets(false); FramebufferCacheKey key; - key.colors[0] = color_surface; - key.zeta = depth_surface; + key.colors[0] = std::move(color_surface); + key.zeta = std::move(depth_surface); state_tracker.NotifyFramebuffer(); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); @@ -418,8 +446,7 @@ void RasterizerOpenGL::Clear() { if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || regs.clear_buffers.A) { use_color = true; - } - if (use_color) { + state_tracker.NotifyColorMask0(); glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0, regs.clear_buffers.B != 0, regs.clear_buffers.A != 0); @@ -457,7 +484,7 @@ void RasterizerOpenGL::Clear() { UNIMPLEMENTED_IF(regs.clear_flags.viewport); - ConfigureClearFramebuffer(use_color, use_depth, use_stencil); + ConfigureClearFramebuffer(use_color, use_depth || use_stencil); if (use_color) { glClearBufferfv(GL_COLOR, 0, regs.clear_color); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index ebd2173eb..87249fb6f 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -95,7 +95,8 @@ private: /// Configures the color and depth framebuffer states. void ConfigureFramebuffers(); - void ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, bool using_stencil_fb); + /// Configures the color and depth framebuffer for clearing. + void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil); /// Configures the current constbuffers to use for the draw command. void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index f63156b8d..9759a7078 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -10,8 +10,6 @@ #include <thread> #include <unordered_set> -#include <boost/functional/hash.hpp> - #include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" @@ -28,76 +26,26 @@ #include "video_core/renderer_opengl/gl_shader_disk_cache.h" #include "video_core/renderer_opengl/gl_state_tracker.h" #include "video_core/renderer_opengl/utils.h" +#include "video_core/shader/memory_util.h" #include "video_core/shader/registry.h" #include "video_core/shader/shader_ir.h" namespace OpenGL { using Tegra::Engines::ShaderType; +using VideoCommon::Shader::GetShaderAddress; +using VideoCommon::Shader::GetShaderCode; +using VideoCommon::Shader::GetUniqueIdentifier; +using VideoCommon::Shader::KERNEL_MAIN_OFFSET; using VideoCommon::Shader::ProgramCode; using VideoCommon::Shader::Registry; using VideoCommon::Shader::ShaderIR; +using VideoCommon::Shader::STAGE_MAIN_OFFSET; namespace { -constexpr u32 STAGE_MAIN_OFFSET = 10; -constexpr u32 KERNEL_MAIN_OFFSET = 0; - constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{}; -/// Gets the address for the specified shader stage program -GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) { - const auto& gpu{system.GPU().Maxwell3D()}; - const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]}; - return gpu.regs.code_address.CodeAddress() + shader_config.offset; -} - -/// Gets if the current instruction offset is a scheduler instruction -constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) { - // Sched instructions appear once every 4 instructions. - constexpr std::size_t SchedPeriod = 4; - const std::size_t absolute_offset = offset - main_offset; - return (absolute_offset % SchedPeriod) == 0; -} - -/// Calculates the size of a program stream -std::size_t CalculateProgramSize(const ProgramCode& program) { - constexpr std::size_t start_offset = 10; - // This is the encoded version of BRA that jumps to itself. All Nvidia - // shaders end with one. - constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL; - constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL; - std::size_t offset = start_offset; - while (offset < program.size()) { - const u64 instruction = program[offset]; - if (!IsSchedInstruction(offset, start_offset)) { - if ((instruction & mask) == self_jumping_branch) { - // End on Maxwell's "nop" instruction - break; - } - if (instruction == 0) { - break; - } - } - offset++; - } - // The last instruction is included in the program size - return std::min(offset + 1, program.size()); -} - -/// Gets the shader program code from memory for the specified address -ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr, - const u8* host_ptr) { - ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); - ASSERT_OR_EXECUTE(host_ptr != nullptr, { - std::fill(code.begin(), code.end(), 0); - return code; - }); - memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64)); - code.resize(CalculateProgramSize(code)); - return code; -} - /// Gets the shader type from a Maxwell program type constexpr GLenum GetGLShaderType(ShaderType shader_type) { switch (shader_type) { @@ -114,17 +62,6 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) { } } -/// Hashes one (or two) program streams -u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code, - const ProgramCode& code_b = {}) { - u64 unique_identifier = boost::hash_value(code); - if (is_a) { - // VertexA programs include two programs - boost::hash_combine(unique_identifier, boost::hash_value(code_b)); - } - return unique_identifier; -} - constexpr const char* GetShaderTypeName(ShaderType shader_type) { switch (shader_type) { case ShaderType::Vertex: @@ -456,11 +393,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { const auto host_ptr{memory_manager.GetPointer(address)}; // No shader found - create a new one - ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)}; + ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)}; ProgramCode code_b; if (program == Maxwell::ShaderProgram::VertexA) { const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; - code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b)); + const u8* host_ptr_b = memory_manager.GetPointer(address_b); + code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false); } const auto unique_identifier = GetUniqueIdentifier( @@ -498,7 +436,7 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { const auto host_ptr{memory_manager.GetPointer(code_addr)}; // No kernel found, create a new one - auto code{GetShaderCode(memory_manager, code_addr, host_ptr)}; + auto code{GetShaderCode(memory_manager, code_addr, host_ptr, true)}; const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)}; const ShaderParameters params{system, disk_cache, device, diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp index be1c31978..a7f256ff9 100644 --- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp @@ -140,6 +140,12 @@ void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size enable.Assign(1); } +void FixedPipelineState::Fill(const Maxwell& regs) { + rasterizer.Fill(regs); + depth_stencil.Fill(regs); + color_blending.Fill(regs); +} + std::size_t FixedPipelineState::Hash() const noexcept { const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this); return static_cast<std::size_t>(hash); @@ -149,15 +155,6 @@ bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcep return std::memcmp(this, &rhs, sizeof *this) == 0; } -FixedPipelineState GetFixedPipelineState(const Maxwell& regs) { - FixedPipelineState fixed_state; - fixed_state.rasterizer.Fill(regs); - fixed_state.depth_stencil.Fill(regs); - fixed_state.color_blending.Fill(regs); - fixed_state.padding = {}; - return fixed_state; -} - u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept { // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8 // If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range. diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h index 9a950f4de..77188b862 100644 --- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h @@ -17,7 +17,7 @@ namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; -struct alignas(32) FixedPipelineState { +struct FixedPipelineState { static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept; static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept; @@ -237,7 +237,8 @@ struct alignas(32) FixedPipelineState { Rasterizer rasterizer; DepthStencil depth_stencil; ColorBlending color_blending; - std::array<u8, 20> padding; + + void Fill(const Maxwell& regs); std::size_t Hash() const noexcept; @@ -250,9 +251,6 @@ struct alignas(32) FixedPipelineState { static_assert(std::has_unique_object_representations_v<FixedPipelineState>); static_assert(std::is_trivially_copyable_v<FixedPipelineState>); static_assert(std::is_trivially_constructible_v<FixedPipelineState>); -static_assert(sizeof(FixedPipelineState) % 32 == 0, "Size is not aligned"); - -FixedPipelineState GetFixedPipelineState(const Maxwell& regs); } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index a4d841e26..c8640762d 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -82,11 +82,6 @@ public: return present_family; } - /// Returns true if the device is integrated with the host CPU. - bool IsIntegrated() const { - return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; - } - /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers. u32 GetApiVersion() const { return properties.apiVersion; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 8332b42aa..45bd1fc6c 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -288,7 +288,7 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa depth_stencil_ci.maxDepthBounds = 0.0f; std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments; - const std::size_t num_attachments = renderpass_params.color_attachments.size(); + const auto num_attachments = static_cast<std::size_t>(renderpass_params.num_color_attachments); for (std::size_t index = 0; index < num_attachments; ++index) { static constexpr std::array COMPONENT_TABLE = { VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT, diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp index 6a9e658bf..b4c650a63 100644 --- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp @@ -118,8 +118,7 @@ private: }; VKMemoryManager::VKMemoryManager(const VKDevice& device) - : device{device}, properties{device.GetPhysical().GetMemoryProperties()}, - is_memory_unified{GetMemoryUnified(properties)} {} + : device{device}, properties{device.GetPhysical().GetMemoryProperties()} {} VKMemoryManager::~VKMemoryManager() = default; @@ -209,16 +208,6 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi return {}; } -bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) { - for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) { - if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) { - // Memory is considered unified when heaps are device local only. - return false; - } - } - return true; -} - VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, const vk::DeviceMemory& memory, u64 begin, u64 end) : device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {} diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h index 5b6858e9b..1af88e3d4 100644 --- a/src/video_core/renderer_vulkan/vk_memory_manager.h +++ b/src/video_core/renderer_vulkan/vk_memory_manager.h @@ -40,11 +40,6 @@ public: /// Commits memory required by the image and binds it. VKMemoryCommit Commit(const vk::Image& image, bool host_visible); - /// Returns true if the memory allocations are done always in host visible and coherent memory. - bool IsMemoryUnified() const { - return is_memory_unified; - } - private: /// Allocates a chunk of memory. bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); @@ -53,12 +48,8 @@ private: VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements, VkMemoryPropertyFlags wanted_properties); - /// Returns true if the device uses an unified memory model. - static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties); - - const VKDevice& device; ///< Device handler. - const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties. - const bool is_memory_unified; ///< True if memory model is unified. + const VKDevice& device; ///< Device handler. + const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties. std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations. }; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 91b1b16a5..9b703a2f0 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -27,12 +27,18 @@ #include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/wrapper.h" #include "video_core/shader/compiler_settings.h" +#include "video_core/shader/memory_util.h" namespace Vulkan { MICROPROFILE_DECLARE(Vulkan_PipelineCache); using Tegra::Engines::ShaderType; +using VideoCommon::Shader::GetShaderAddress; +using VideoCommon::Shader::GetShaderCode; +using VideoCommon::Shader::KERNEL_MAIN_OFFSET; +using VideoCommon::Shader::ProgramCode; +using VideoCommon::Shader::STAGE_MAIN_OFFSET; namespace { @@ -45,60 +51,6 @@ constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ VideoCommon::Shader::CompileDepth::FullDecompile}; -/// Gets the address for the specified shader stage program -GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) { - const auto& gpu{system.GPU().Maxwell3D()}; - const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]}; - return gpu.regs.code_address.CodeAddress() + shader_config.offset; -} - -/// Gets if the current instruction offset is a scheduler instruction -constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) { - // Sched instructions appear once every 4 instructions. - constexpr std::size_t SchedPeriod = 4; - const std::size_t absolute_offset = offset - main_offset; - return (absolute_offset % SchedPeriod) == 0; -} - -/// Calculates the size of a program stream -std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) { - const std::size_t start_offset = is_compute ? 0 : 10; - // This is the encoded version of BRA that jumps to itself. All Nvidia - // shaders end with one. - constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL; - constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL; - std::size_t offset = start_offset; - while (offset < program.size()) { - const u64 instruction = program[offset]; - if (!IsSchedInstruction(offset, start_offset)) { - if ((instruction & mask) == self_jumping_branch) { - // End on Maxwell's "nop" instruction - break; - } - if (instruction == 0) { - break; - } - } - ++offset; - } - // The last instruction is included in the program size - return std::min(offset + 1, program.size()); -} - -/// Gets the shader program code from memory for the specified address -ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr, - const u8* host_ptr, bool is_compute) { - ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); - ASSERT_OR_EXECUTE(host_ptr != nullptr, { - std::fill(program_code.begin(), program_code.end(), 0); - return program_code; - }); - memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(), - program_code.size() * sizeof(u64)); - program_code.resize(CalculateProgramSize(program_code, is_compute)); - return program_code; -} - constexpr std::size_t GetStageFromProgram(std::size_t program) { return program == 0 ? 0 : program - 1; } @@ -161,6 +113,24 @@ u32 FillDescriptorLayout(const ShaderEntries& entries, } // Anonymous namespace +std::size_t GraphicsPipelineCacheKey::Hash() const noexcept { + const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this); + return static_cast<std::size_t>(hash); +} + +bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept { + return std::memcmp(&rhs, this, sizeof *this) == 0; +} + +std::size_t ComputePipelineCacheKey::Hash() const noexcept { + const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this); + return static_cast<std::size_t>(hash); +} + +bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept { + return std::memcmp(&rhs, this, sizeof *this) == 0; +} + CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code, u32 main_offset) @@ -212,9 +182,9 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { const auto host_ptr{memory_manager.GetPointer(program_addr)}; // No shader found - create a new one - constexpr u32 stage_offset = 10; + constexpr u32 stage_offset = STAGE_MAIN_OFFSET; const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1); - auto code = GetShaderCode(memory_manager, program_addr, host_ptr, false); + ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false); shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr, std::move(code), stage_offset); @@ -270,11 +240,10 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach // No shader found - create a new one const auto host_ptr = memory_manager.GetPointer(program_addr); - auto code = GetShaderCode(memory_manager, program_addr, host_ptr, true); - constexpr u32 kernel_main_offset = 0; + ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true); shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute, program_addr, *cpu_addr, std::move(code), - kernel_main_offset); + KERNEL_MAIN_OFFSET); if (cpu_addr) { Register(shader); } else { diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 602a0a340..ebddafb73 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -7,7 +7,6 @@ #include <array> #include <cstddef> #include <memory> -#include <tuple> #include <type_traits> #include <unordered_map> #include <utility> @@ -25,6 +24,7 @@ #include "video_core/renderer_vulkan/vk_resource_manager.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/renderer_vulkan/wrapper.h" +#include "video_core/shader/memory_util.h" #include "video_core/shader/registry.h" #include "video_core/shader/shader_ir.h" #include "video_core/surface.h" @@ -47,46 +47,40 @@ class CachedShader; using Shader = std::shared_ptr<CachedShader>; using Maxwell = Tegra::Engines::Maxwell3D::Regs; -using ProgramCode = std::vector<u64>; - struct GraphicsPipelineCacheKey { FixedPipelineState fixed_state; - std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders; RenderPassParams renderpass_params; + std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders; + u64 padding; // This is necessary for unique object representations - std::size_t Hash() const noexcept { - std::size_t hash = fixed_state.Hash(); - for (const auto& shader : shaders) { - boost::hash_combine(hash, shader); - } - boost::hash_combine(hash, renderpass_params.Hash()); - return hash; - } + std::size_t Hash() const noexcept; - bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept { - return std::tie(fixed_state, shaders, renderpass_params) == - std::tie(rhs.fixed_state, rhs.shaders, rhs.renderpass_params); + bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept; + + bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept { + return !operator==(rhs); } }; +static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>); +static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>); +static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>); struct ComputePipelineCacheKey { - GPUVAddr shader{}; - u32 shared_memory_size{}; - std::array<u32, 3> workgroup_size{}; - - std::size_t Hash() const noexcept { - return static_cast<std::size_t>(shader) ^ - ((static_cast<std::size_t>(shared_memory_size) >> 7) << 40) ^ - static_cast<std::size_t>(workgroup_size[0]) ^ - (static_cast<std::size_t>(workgroup_size[1]) << 16) ^ - (static_cast<std::size_t>(workgroup_size[2]) << 24); - } + GPUVAddr shader; + u32 shared_memory_size; + std::array<u32, 3> workgroup_size; + + std::size_t Hash() const noexcept; + + bool operator==(const ComputePipelineCacheKey& rhs) const noexcept; - bool operator==(const ComputePipelineCacheKey& rhs) const noexcept { - return std::tie(shader, shared_memory_size, workgroup_size) == - std::tie(rhs.shader, rhs.shared_memory_size, rhs.workgroup_size); + bool operator!=(const ComputePipelineCacheKey& rhs) const noexcept { + return !operator==(rhs); } }; +static_assert(std::has_unique_object_representations_v<ComputePipelineCacheKey>); +static_assert(std::is_trivially_copyable_v<ComputePipelineCacheKey>); +static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>); } // namespace Vulkan @@ -113,7 +107,8 @@ namespace Vulkan { class CachedShader final : public RasterizerCacheObject { public: explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, - VAddr cpu_addr, ProgramCode program_code, u32 main_offset); + VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code, + u32 main_offset); ~CachedShader(); GPUVAddr GetGpuAddr() const { @@ -145,7 +140,7 @@ private: Tegra::Engines::ShaderType stage); GPUVAddr gpu_addr{}; - ProgramCode program_code; + VideoCommon::Shader::ProgramCode program_code; VideoCommon::Shader::Registry registry; VideoCommon::Shader::ShaderIR shader_ir; ShaderEntries entries; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 8a1f57891..776053de5 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -316,7 +316,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { query_cache.UpdateCounters(); const auto& gpu = system.GPU().Maxwell3D(); - GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)}; + GraphicsPipelineCacheKey key; + key.fixed_state.Fill(gpu.regs); buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed)); @@ -334,10 +335,11 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { buffer_cache.Unmap(); - const auto texceptions = UpdateAttachments(); + const Texceptions texceptions = UpdateAttachments(); SetupImageTransitions(texceptions, color_attachments, zeta_attachment); key.renderpass_params = GetRenderPassParams(texceptions); + key.padding = 0; auto& pipeline = pipeline_cache.GetGraphicsPipeline(key); scheduler.BindGraphicsPipeline(pipeline.GetHandle()); @@ -453,10 +455,12 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) { query_cache.UpdateCounters(); const auto& launch_desc = system.GPU().KeplerCompute().launch_description; - const ComputePipelineCacheKey key{ - code_addr, - launch_desc.shared_alloc, - {launch_desc.block_dim_x, launch_desc.block_dim_y, launch_desc.block_dim_z}}; + ComputePipelineCacheKey key; + key.shader = code_addr; + key.shared_memory_size = launch_desc.shared_alloc; + key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y, + launch_desc.block_dim_z}; + auto& pipeline = pipeline_cache.GetComputePipeline(key); // Compute dispatches can't be executed inside a renderpass @@ -652,7 +656,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() { Texceptions texceptions; for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { if (update_rendertargets) { - color_attachments[rt] = texture_cache.GetColorBufferSurface(rt); + color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true); } if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { texceptions[rt] = true; @@ -660,7 +664,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() { } if (update_rendertargets) { - zeta_attachment = texture_cache.GetDepthBufferSurface(); + zeta_attachment = texture_cache.GetDepthBufferSurface(true); } if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { texceptions[ZETA_TEXCEPTION_INDEX] = true; @@ -688,7 +692,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers( FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()}; - const auto try_push = [&](const View& view) { + const auto try_push = [&key](const View& view) { if (!view) { return false; } @@ -699,7 +703,9 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers( return true; }; - for (std::size_t index = 0; index < std::size(color_attachments); ++index) { + const auto& regs = system.GPU().Maxwell3D().regs; + const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count); + for (std::size_t index = 0; index < num_attachments; ++index) { if (try_push(color_attachments[index])) { texture_cache.MarkColorBufferInUse(index); } @@ -877,8 +883,12 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex const GPUVAddr start{vertex_array.StartAddress()}; const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()}; - ASSERT(end > start); - const std::size_t size{end - start + 1}; + ASSERT(end >= start); + const std::size_t size{end - start}; + if (size == 0) { + buffer_bindings.AddVertexBinding(DefaultBuffer(), 0); + continue; + } const auto [buffer, offset] = buffer_cache.UploadMemory(start, size); buffer_bindings.AddVertexBinding(buffer, offset); } @@ -1033,8 +1043,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry, const Tegra::Engines::ConstBufferInfo& buffer) { if (!buffer.enabled) { // Set values to zero to unbind buffers - update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0, - sizeof(float)); + update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE); return; } @@ -1057,7 +1066,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd if (size == 0) { // Sometimes global memory pointers don't have a proper size. Upload a dummy entry // because Vulkan doesn't like empty buffers. - constexpr std::size_t dummy_size = 4; + // Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the + // default buffer. + static constexpr std::size_t dummy_size = 4; const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size); update_descriptor_queue.AddBuffer(buffer, 0, dummy_size); return; @@ -1222,7 +1233,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const { const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()}; DEBUG_ASSERT(end >= start); - size += (end - start + 1) * regs.vertex_array[index].enable; + size += (end - start) * regs.vertex_array[index].enable; } return size; } @@ -1245,28 +1256,54 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize( } RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const { - using namespace VideoCore::Surface; - const auto& regs = system.GPU().Maxwell3D().regs; - RenderPassParams renderpass_params; + const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count); + + RenderPassParams params; + params.color_formats = {}; + std::size_t color_texceptions = 0; - for (std::size_t rt = 0; rt < static_cast<std::size_t>(regs.rt_control.count); ++rt) { + std::size_t index = 0; + for (std::size_t rt = 0; rt < num_attachments; ++rt) { const auto& rendertarget = regs.rt[rt]; if (rendertarget.Address() == 0 || rendertarget.format == Tegra::RenderTargetFormat::NONE) { continue; } - renderpass_params.color_attachments.push_back(RenderPassParams::ColorAttachment{ - static_cast<u32>(rt), PixelFormatFromRenderTargetFormat(rendertarget.format), - texceptions[rt]}); + params.color_formats[index] = static_cast<u8>(rendertarget.format); + color_texceptions |= (texceptions[rt] ? 1ULL : 0ULL) << index; + ++index; } + params.num_color_attachments = static_cast<u8>(index); + params.texceptions = static_cast<u8>(color_texceptions); - renderpass_params.has_zeta = regs.zeta_enable; - if (renderpass_params.has_zeta) { - renderpass_params.zeta_pixel_format = PixelFormatFromDepthFormat(regs.zeta.format); - renderpass_params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX]; + params.zeta_format = regs.zeta_enable ? static_cast<u8>(regs.zeta.format) : 0; + params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX]; + return params; +} + +VkBuffer RasterizerVulkan::DefaultBuffer() { + if (default_buffer) { + return *default_buffer; } - return renderpass_params; + VkBufferCreateInfo ci; + ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + ci.pNext = nullptr; + ci.flags = 0; + ci.size = DEFAULT_BUFFER_SIZE; + ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; + ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + ci.queueFamilyIndexCount = 0; + ci.pQueueFamilyIndices = nullptr; + default_buffer = device.GetLogical().CreateBuffer(ci); + default_buffer_commit = memory_manager.Commit(default_buffer, false); + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) { + cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0); + }); + return *default_buffer; } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 2fa46b0cc..d41a7929e 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -155,6 +155,7 @@ private: using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>; static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8; + static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float); void FlushWork(); @@ -247,6 +248,8 @@ private: RenderPassParams GetRenderPassParams(Texceptions texceptions) const; + VkBuffer DefaultBuffer(); + Core::System& system; Core::Frontend::EmuWindow& render_window; VKScreenInfo& screen_info; @@ -271,6 +274,9 @@ private: VKFenceManager fence_manager; VKQueryCache query_cache; + vk::Buffer default_buffer; + VKMemoryCommit default_buffer_commit; + std::array<View, Maxwell::NumRenderTargets> color_attachments; View zeta_attachment; diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp index 4e5286a69..3f71d005e 100644 --- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp @@ -2,9 +2,11 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <cstring> #include <memory> #include <vector> +#include "common/cityhash.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/renderer_vulkan/vk_device.h" @@ -13,6 +15,15 @@ namespace Vulkan { +std::size_t RenderPassParams::Hash() const noexcept { + const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this); + return static_cast<std::size_t>(hash); +} + +bool RenderPassParams::operator==(const RenderPassParams& rhs) const noexcept { + return std::memcmp(&rhs, this, sizeof *this) == 0; +} + VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {} VKRenderPassCache::~VKRenderPassCache() = default; @@ -27,20 +38,22 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) { } vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const { + using namespace VideoCore::Surface; std::vector<VkAttachmentDescription> descriptors; std::vector<VkAttachmentReference> color_references; - for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) { - const auto attachment = params.color_attachments[rt]; - const auto format = - MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, attachment.pixel_format); + const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments); + for (std::size_t rt = 0; rt < num_attachments; ++rt) { + const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]); + const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format); + const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format); ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}", - static_cast<u32>(attachment.pixel_format)); + static_cast<int>(pixel_format)); - // TODO(Rodrigo): Add eMayAlias when it's needed. - const auto color_layout = attachment.is_texception - ? VK_IMAGE_LAYOUT_GENERAL - : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + // TODO(Rodrigo): Add MAY_ALIAS_BIT when it's needed. + const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0 + ? VK_IMAGE_LAYOUT_GENERAL + : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentDescription& descriptor = descriptors.emplace_back(); descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT; descriptor.format = format.format; @@ -58,15 +71,17 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param } VkAttachmentReference zeta_attachment_ref; - if (params.has_zeta) { - const auto format = - MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format); + const bool has_zeta = params.zeta_format != 0; + if (has_zeta) { + const auto guest_format = static_cast<Tegra::DepthFormat>(params.zeta_format); + const PixelFormat pixel_format = PixelFormatFromDepthFormat(guest_format); + const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format); ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}", - static_cast<u32>(params.zeta_pixel_format)); + static_cast<int>(pixel_format)); - const auto zeta_layout = params.zeta_texception - ? VK_IMAGE_LAYOUT_GENERAL - : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + const VkImageLayout zeta_layout = params.zeta_texception != 0 + ? VK_IMAGE_LAYOUT_GENERAL + : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentDescription& descriptor = descriptors.emplace_back(); descriptor.flags = 0; descriptor.format = format.format; @@ -78,7 +93,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param descriptor.initialLayout = zeta_layout; descriptor.finalLayout = zeta_layout; - zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size()); + zeta_attachment_ref.attachment = static_cast<u32>(num_attachments); zeta_attachment_ref.layout = zeta_layout; } @@ -90,7 +105,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size()); subpass_description.pColorAttachments = color_references.data(); subpass_description.pResolveAttachments = nullptr; - subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr; + subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr; subpass_description.preserveAttachmentCount = 0; subpass_description.pPreserveAttachments = nullptr; @@ -101,7 +116,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; } - if (params.has_zeta) { + if (has_zeta) { access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h index 921b6efb5..8b0fec720 100644 --- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h +++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h @@ -4,8 +4,7 @@ #pragma once -#include <memory> -#include <tuple> +#include <type_traits> #include <unordered_map> #include <boost/container/static_vector.hpp> @@ -19,51 +18,25 @@ namespace Vulkan { class VKDevice; -// TODO(Rodrigo): Optimize this structure for faster hashing - struct RenderPassParams { - struct ColorAttachment { - u32 index = 0; - VideoCore::Surface::PixelFormat pixel_format = VideoCore::Surface::PixelFormat::Invalid; - bool is_texception = false; - - std::size_t Hash() const noexcept { - return static_cast<std::size_t>(pixel_format) | - static_cast<std::size_t>(is_texception) << 6 | - static_cast<std::size_t>(index) << 7; - } - - bool operator==(const ColorAttachment& rhs) const noexcept { - return std::tie(index, pixel_format, is_texception) == - std::tie(rhs.index, rhs.pixel_format, rhs.is_texception); - } - }; - - boost::container::static_vector<ColorAttachment, - Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> - color_attachments{}; - // TODO(Rodrigo): Unify has_zeta into zeta_pixel_format and zeta_component_type. - VideoCore::Surface::PixelFormat zeta_pixel_format = VideoCore::Surface::PixelFormat::Invalid; - bool has_zeta = false; - bool zeta_texception = false; - - std::size_t Hash() const noexcept { - std::size_t hash = 0; - for (const auto& rt : color_attachments) { - boost::hash_combine(hash, rt.Hash()); - } - boost::hash_combine(hash, zeta_pixel_format); - boost::hash_combine(hash, has_zeta); - boost::hash_combine(hash, zeta_texception); - return hash; - } + std::array<u8, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_formats; + u8 num_color_attachments; + u8 texceptions; + + u8 zeta_format; + u8 zeta_texception; + + std::size_t Hash() const noexcept; + + bool operator==(const RenderPassParams& rhs) const noexcept; - bool operator==(const RenderPassParams& rhs) const { - return std::tie(color_attachments, zeta_pixel_format, has_zeta, zeta_texception) == - std::tie(rhs.color_attachments, rhs.zeta_pixel_format, rhs.has_zeta, - rhs.zeta_texception); + bool operator!=(const RenderPassParams& rhs) const noexcept { + return !operator==(rhs); } }; +static_assert(std::has_unique_object_representations_v<RenderPassParams>); +static_assert(std::is_trivially_copyable_v<RenderPassParams>); +static_assert(std::is_trivially_constructible_v<RenderPassParams>); } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp index 94d954d7a..45c180221 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp @@ -39,8 +39,7 @@ VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler) - : device{device}, memory_manager{memory_manager}, scheduler{scheduler}, - is_device_integrated{device.IsIntegrated()} {} + : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {} VKStagingBufferPool::~VKStagingBufferPool() = default; @@ -56,9 +55,7 @@ void VKStagingBufferPool::TickFrame() { current_delete_level = (current_delete_level + 1) % NumLevels; ReleaseCache(true); - if (!is_device_integrated) { - ReleaseCache(false); - } + ReleaseCache(false); } VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { @@ -81,7 +78,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v ci.size = 1ULL << log2; ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | - VK_BUFFER_USAGE_INDEX_BUFFER_BIT; + VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = nullptr; @@ -95,7 +92,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v } VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { - return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers; + return host_visible ? host_staging_buffers : device_staging_buffers; } void VKStagingBufferPool::ReleaseCache(bool host_visible) { diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h index a0840ff8c..faf6418fd 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h @@ -71,7 +71,6 @@ private: const VKDevice& device; VKMemoryManager& memory_manager; VKScheduler& scheduler; - const bool is_device_integrated; StagingBuffersCache host_staging_buffers; StagingBuffersCache device_staging_buffers; diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp index e00a3fb70..8d86020f6 100644 --- a/src/video_core/shader/control_flow.cpp +++ b/src/video_core/shader/control_flow.cpp @@ -13,6 +13,7 @@ #include "common/common_types.h" #include "video_core/shader/ast.h" #include "video_core/shader/control_flow.h" +#include "video_core/shader/memory_util.h" #include "video_core/shader/registry.h" #include "video_core/shader/shader_ir.h" @@ -115,17 +116,6 @@ Pred GetPredicate(u32 index, bool negated) { return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL)); } -/** - * Returns whether the instruction at the specified offset is a 'sched' instruction. - * Sched instructions always appear before a sequence of 3 instructions. - */ -constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { - constexpr u32 SchedPeriod = 4; - u32 absolute_offset = offset - main_offset; - - return (absolute_offset % SchedPeriod) == 0; -} - enum class ParseResult : u32 { ControlCaught, BlockEnd, diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp index 87ac9ac6c..1167ff4ec 100644 --- a/src/video_core/shader/decode.cpp +++ b/src/video_core/shader/decode.cpp @@ -13,6 +13,7 @@ #include "video_core/engines/shader_bytecode.h" #include "video_core/engines/shader_header.h" #include "video_core/shader/control_flow.h" +#include "video_core/shader/memory_util.h" #include "video_core/shader/node_helper.h" #include "video_core/shader/shader_ir.h" @@ -23,17 +24,6 @@ using Tegra::Shader::OpCode; namespace { -/** - * Returns whether the instruction at the specified offset is a 'sched' instruction. - * Sched instructions always appear before a sequence of 3 instructions. - */ -constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { - constexpr u32 SchedPeriod = 4; - u32 absolute_offset = offset - main_offset; - - return (absolute_offset % SchedPeriod) == 0; -} - void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver, const std::list<Sampler>& used_samplers) { if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) { diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp new file mode 100644 index 000000000..074f21691 --- /dev/null +++ b/src/video_core/shader/memory_util.cpp @@ -0,0 +1,77 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <cstddef> + +#include <boost/container_hash/hash.hpp> + +#include "common/common_types.h" +#include "core/core.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/memory_manager.h" +#include "video_core/shader/memory_util.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +GPUVAddr GetShaderAddress(Core::System& system, + Tegra::Engines::Maxwell3D::Regs::ShaderProgram program) { + const auto& gpu{system.GPU().Maxwell3D()}; + const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]}; + return gpu.regs.code_address.CodeAddress() + shader_config.offset; +} + +bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) { + // Sched instructions appear once every 4 instructions. + constexpr std::size_t SchedPeriod = 4; + const std::size_t absolute_offset = offset - main_offset; + return (absolute_offset % SchedPeriod) == 0; +} + +std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) { + // This is the encoded version of BRA that jumps to itself. All Nvidia + // shaders end with one. + static constexpr u64 SELF_JUMPING_BRANCH = 0xE2400FFFFF07000FULL; + static constexpr u64 MASK = 0xFFFFFFFFFF7FFFFFULL; + + const std::size_t start_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET; + std::size_t offset = start_offset; + while (offset < program.size()) { + const u64 instruction = program[offset]; + if (!IsSchedInstruction(offset, start_offset)) { + if ((instruction & MASK) == SELF_JUMPING_BRANCH) { + // End on Maxwell's "nop" instruction + break; + } + if (instruction == 0) { + break; + } + } + ++offset; + } + // The last instruction is included in the program size + return std::min(offset + 1, program.size()); +} + +ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr, + const u8* host_ptr, bool is_compute) { + ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); + ASSERT_OR_EXECUTE(host_ptr != nullptr, { return code; }); + memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64)); + code.resize(CalculateProgramSize(code, is_compute)); + return code; +} + +u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code, + const ProgramCode& code_b) { + u64 unique_identifier = boost::hash_value(code); + if (is_a) { + // VertexA programs include two programs + boost::hash_combine(unique_identifier, boost::hash_value(code_b)); + } + return unique_identifier; +} + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/memory_util.h b/src/video_core/shader/memory_util.h new file mode 100644 index 000000000..be90d24fd --- /dev/null +++ b/src/video_core/shader/memory_util.h @@ -0,0 +1,47 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <cstddef> +#include <vector> + +#include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" + +namespace Core { +class System; +} + +namespace Tegra { +class MemoryManager; +} + +namespace VideoCommon::Shader { + +using ProgramCode = std::vector<u64>; + +constexpr u32 STAGE_MAIN_OFFSET = 10; +constexpr u32 KERNEL_MAIN_OFFSET = 0; + +/// Gets the address for the specified shader stage program +GPUVAddr GetShaderAddress(Core::System& system, + Tegra::Engines::Maxwell3D::Regs::ShaderProgram program); + +/// Gets if the current instruction offset is a scheduler instruction +bool IsSchedInstruction(std::size_t offset, std::size_t main_offset); + +/// Calculates the size of a program stream +std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute); + +/// Gets the shader program code from memory for the specified address +ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr, + const u8* host_ptr, bool is_compute); + +/// Hashes one (or two) program streams +u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code, + const ProgramCode& code_b = {}); + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index c6e7bdf50..69de5e68b 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -18,6 +18,7 @@ #include "video_core/engines/shader_header.h" #include "video_core/shader/ast.h" #include "video_core/shader/compiler_settings.h" +#include "video_core/shader/memory_util.h" #include "video_core/shader/node.h" #include "video_core/shader/registry.h" @@ -25,8 +26,6 @@ namespace VideoCommon::Shader { struct ShaderBlock; -using ProgramCode = std::vector<u64>; - constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; class ConstBuffer { diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index 513e9bf49..eb97bfd41 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -153,21 +153,13 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) { return {}; } - s64 current_cursor = cursor; - while (current_cursor > 0) { - // Reduce the cursor in one to avoid infinite loops when the instruction sets the same - // register that it uses as operand - const auto [source, new_cursor] = TrackRegister(gpr, code, current_cursor - 1); - current_cursor = new_cursor; - if (!source) { - continue; - } - const auto [base_address, index, offset] = TrackCbuf(source, code, current_cursor); - if (base_address != nullptr) { - return {base_address, index, offset}; - } + // Reduce the cursor in one to avoid infinite loops when the instruction sets the same + // register that it uses as operand + const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1); + if (!source) { + return {}; } - return {}; + return TrackCbuf(source, code, new_cursor); } if (const auto operation = std::get_if<OperationNode>(&*tracked)) { for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) { diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index cf6bd005a..d2d2846e6 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -143,7 +143,7 @@ public: } const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; - const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); + const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false); if (guard_samplers) { sampled_textures.push_back(surface); } @@ -163,7 +163,7 @@ public: return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; - const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); + const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false); if (guard_samplers) { sampled_textures.push_back(surface); } @@ -178,7 +178,7 @@ public: return any_rt; } - TView GetDepthBufferSurface() { + TView GetDepthBufferSurface(bool preserve_contents) { std::lock_guard lock{mutex}; auto& maxwell3d = system.GPU().Maxwell3D(); if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { @@ -199,7 +199,7 @@ public: return {}; } const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; - auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true); + auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true); if (depth_buffer.target) depth_buffer.target->MarkAsRenderTarget(false, NO_RT); depth_buffer.target = surface_view.first; @@ -209,7 +209,7 @@ public: return surface_view.second; } - TView GetColorBufferSurface(std::size_t index) { + TView GetColorBufferSurface(std::size_t index, bool preserve_contents) { std::lock_guard lock{mutex}; ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); auto& maxwell3d = system.GPU().Maxwell3D(); @@ -239,8 +239,9 @@ public: return {}; } - auto surface_view = GetSurface(gpu_addr, *cpu_addr, - SurfaceParams::CreateForFramebuffer(system, index), true); + auto surface_view = + GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index), + preserve_contents, true); if (render_targets[index].target) { auto& surface = render_targets[index].target; surface->MarkAsRenderTarget(false, NO_RT); @@ -300,9 +301,9 @@ public: const std::optional<VAddr> src_cpu_addr = system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); std::pair<TSurface, TView> dst_surface = - GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false); + GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false); std::pair<TSurface, TView> src_surface = - GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false); + GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false); ImageBlit(src_surface.second, dst_surface.second, copy_config); dst_surface.first->MarkAsModified(true, Tick()); } @@ -532,18 +533,22 @@ private: * @param overlaps The overlapping surfaces registered in the cache. * @param params The parameters for the new surface. * @param gpu_addr The starting address of the new surface. + * @param preserve_contents Indicates that the new surface should be loaded from memory or left + * blank. * @param untopological Indicates to the recycler that the texture has no way to match the * overlaps due to topological reasons. **/ std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, const SurfaceParams& params, const GPUVAddr gpu_addr, + const bool preserve_contents, const MatchTopologyResult untopological) { + const bool do_load = preserve_contents && Settings::IsGPULevelExtreme(); for (auto& surface : overlaps) { Unregister(surface); } switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { case RecycleStrategy::Ignore: { - return InitializeSurface(gpu_addr, params, Settings::IsGPULevelExtreme()); + return InitializeSurface(gpu_addr, params, do_load); } case RecycleStrategy::Flush: { std::sort(overlaps.begin(), overlaps.end(), @@ -553,7 +558,7 @@ private: for (auto& surface : overlaps) { FlushSurface(surface); } - return InitializeSurface(gpu_addr, params); + return InitializeSurface(gpu_addr, params, preserve_contents); } case RecycleStrategy::BufferCopy: { auto new_surface = GetUncachedSurface(gpu_addr, params); @@ -562,7 +567,7 @@ private: } default: { UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!"); - return InitializeSurface(gpu_addr, params); + return InitializeSurface(gpu_addr, params, do_load); } } } @@ -700,11 +705,14 @@ private: * @param params The parameters on the new surface. * @param gpu_addr The starting address of the new surface. * @param cpu_addr The starting address of the new surface on physical memory. + * @param preserve_contents Indicates that the new surface should be loaded from memory or + * left blank. */ std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, const SurfaceParams& params, const GPUVAddr gpu_addr, - const VAddr cpu_addr) { + const VAddr cpu_addr, + bool preserve_contents) { if (params.target == SurfaceTarget::Texture3D) { bool failed = false; if (params.num_levels > 1) { @@ -754,7 +762,7 @@ private: return std::nullopt; } Unregister(surface); - return InitializeSurface(gpu_addr, params); + return InitializeSurface(gpu_addr, params, preserve_contents); } return std::nullopt; } @@ -765,7 +773,7 @@ private: return {{surface, surface->GetMainView()}}; } } - return InitializeSurface(gpu_addr, params); + return InitializeSurface(gpu_addr, params, preserve_contents); } } @@ -788,10 +796,13 @@ private: * * @param gpu_addr The starting address of the candidate surface. * @param params The parameters on the candidate surface. + * @param preserve_contents Indicates that the new surface should be loaded from memory or + * left blank. * @param is_render Whether or not the surface is a render target. **/ std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr, - const SurfaceParams& params, bool is_render) { + const SurfaceParams& params, bool preserve_contents, + bool is_render) { // Step 1 // Check Level 1 Cache for a fast structural match. If candidate surface // matches at certain level we are pretty much done. @@ -800,7 +811,8 @@ private: const auto topological_result = current_surface->MatchesTopology(params); if (topological_result != MatchTopologyResult::FullMatch) { std::vector<TSurface> overlaps{current_surface}; - return RecycleSurface(overlaps, params, gpu_addr, topological_result); + return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, + topological_result); } const auto struct_result = current_surface->MatchesStructure(params); @@ -825,7 +837,7 @@ private: // If none are found, we are done. we just load the surface and create it. if (overlaps.empty()) { - return InitializeSurface(gpu_addr, params); + return InitializeSurface(gpu_addr, params, preserve_contents); } // Step 3 @@ -835,13 +847,15 @@ private: for (const auto& surface : overlaps) { const auto topological_result = surface->MatchesTopology(params); if (topological_result != MatchTopologyResult::FullMatch) { - return RecycleSurface(overlaps, params, gpu_addr, topological_result); + return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, + topological_result); } } // Check if it's a 3D texture if (params.block_depth > 0) { - auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr); + auto surface = + Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents); if (surface) { return *surface; } @@ -861,7 +875,8 @@ private: return *view; } } - return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); + return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, + MatchTopologyResult::FullMatch); } // Now we check if the candidate is a mipmap/layer of the overlap std::optional<TView> view = @@ -885,7 +900,7 @@ private: pair.first->EmplaceView(params, gpu_addr, candidate_size); if (mirage_view) return {pair.first, *mirage_view}; - return RecycleSurface(overlaps, params, gpu_addr, + return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, MatchTopologyResult::FullMatch); } return {current_surface, *view}; @@ -901,7 +916,8 @@ private: } } // We failed all the tests, recycle the overlaps into a new texture. - return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); + return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, + MatchTopologyResult::FullMatch); } /** @@ -1059,10 +1075,10 @@ private: } std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params, - bool do_load = true) { + bool preserve_contents) { auto new_surface{GetUncachedSurface(gpu_addr, params)}; Register(new_surface); - if (do_load) { + if (preserve_contents) { LoadSurface(new_surface); } return {new_surface, new_surface->GetMainView()}; |