summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/dma_pusher.cpp30
-rw-r--r--src/video_core/dma_pusher.h1
-rw-r--r--src/video_core/engines/fermi_2d.cpp6
-rw-r--r--src/video_core/engines/fermi_2d.h3
-rw-r--r--src/video_core/engines/kepler_compute.cpp7
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp7
-rw-r--r--src/video_core/engines/kepler_memory.h3
-rw-r--r--src/video_core/engines/maxwell_3d.cpp74
-rw-r--r--src/video_core/engines/maxwell_3d.h7
-rw-r--r--src/video_core/engines/maxwell_dma.cpp7
-rw-r--r--src/video_core/engines/maxwell_dma.h3
-rw-r--r--src/video_core/engines/shader_bytecode.h3
-rw-r--r--src/video_core/gpu.cpp55
-rw-r--r--src/video_core/gpu.h10
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp12
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp15
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h49
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp89
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp53
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.h59
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h1
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp21
-rw-r--r--src/video_core/shader/decode/arithmetic_half.cpp51
-rw-r--r--src/video_core/shader/decode/arithmetic_integer.cpp4
33 files changed, 445 insertions, 206 deletions
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 324dafdcd..16311f05e 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -71,16 +71,22 @@ bool DmaPusher::Step() {
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
command_list_header.size * sizeof(u32));
- for (const CommandHeader& command_header : command_headers) {
-
- // now, see if we're in the middle of a command
- if (dma_state.length_pending) {
- // Second word of long non-inc methods command - method count
- dma_state.length_pending = 0;
- dma_state.method_count = command_header.method_count_;
- } else if (dma_state.method_count) {
+ for (std::size_t index = 0; index < command_headers.size();) {
+ const CommandHeader& command_header = command_headers[index];
+
+ if (dma_state.method_count) {
// Data word of methods command
- CallMethod(command_header.argument);
+ if (dma_state.non_incrementing) {
+ const u32 max_write = static_cast<u32>(
+ std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
+ index);
+ CallMultiMethod(&command_header.argument, max_write);
+ dma_state.method_count -= max_write;
+ index += max_write;
+ continue;
+ } else {
+ CallMethod(command_header.argument);
+ }
if (!dma_state.non_incrementing) {
dma_state.method++;
@@ -120,6 +126,7 @@ bool DmaPusher::Step() {
break;
}
}
+ index++;
}
if (!non_main) {
@@ -140,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const {
gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
}
+void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
+ gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
+ dma_state.method_count);
+}
+
} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index d6188614a..6cef71306 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,7 @@ private:
void SetState(const CommandHeader& command_header);
void CallMethod(u32 argument) const;
+ void CallMultiMethod(const u32* base_start, u32 num_methods) const;
std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index bace6affb..8a47614d2 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -28,6 +28,12 @@ void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void Fermi2D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
static std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
const u32 line_a = src_2 - src_1;
const u32 line_b = dst_2 - dst_1;
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index dba342c70..939a5966d 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -39,6 +39,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
enum class Origin : u32 {
Center = 0,
Corner = 1,
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 368c75a66..00a12175f 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -51,6 +51,13 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
ASSERT(cbuf_mask[regs.tex_cb_index]);
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index eeb79c56f..fe55fdfd0 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -202,6 +202,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
Texture::FullTextureInfo GetTexture(std::size_t offset) const;
/// Given a texture handle, returns the TSC and TIC entries.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 597872e43..586ff15dc 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -41,4 +41,11 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
} // namespace Tegra::Engines
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 396fb6e86..bb26fb030 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -40,6 +40,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr size_t NUM_REGS = 0x7F;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 2824ed707..39e3b66a2 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -280,6 +280,58 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
}
}
+void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ // Methods after 0xE00 are special, they're actually triggers for some microcode that was
+ // uploaded to the GPU during initialization.
+ if (method >= MacroRegistersStart) {
+ // We're trying to execute a macro
+ if (executing_macro == 0) {
+ // A macro call must begin by writing the macro method's register, not its argument.
+ ASSERT_MSG((method % 2) == 0,
+ "Can't start macro execution by writing to the ARGS register");
+ executing_macro = method;
+ }
+
+ for (std::size_t i = 0; i < amount; i++) {
+ macro_params.push_back(base_start[i]);
+ }
+
+ // Call the macro when there are no more parameters in the command buffer
+ if (amount == methods_pending) {
+ CallMacroMethod(executing_macro, macro_params.size(), macro_params.data());
+ macro_params.clear();
+ }
+ return;
+ }
+ switch (method) {
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
+ case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
+ ProcessCBMultiData(method, base_start, amount);
+ break;
+ }
+ default: {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+ }
+ }
+}
+
void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
if (mme_draw.current_mode == MMEDrawMode::Undefined) {
if (mme_draw.gl_begin_consume) {
@@ -570,6 +622,28 @@ void Maxwell3D::StartCBData(u32 method) {
ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
}
+void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) {
+ if (cb_data_state.current != method) {
+ if (cb_data_state.current != null_cb_data) {
+ FinishCBData();
+ }
+ constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]);
+ cb_data_state.start_pos = regs.const_buffer.cb_pos;
+ cb_data_state.id = method - first_cb_data;
+ cb_data_state.current = method;
+ cb_data_state.counter = 0;
+ }
+ const std::size_t id = cb_data_state.id;
+ const std::size_t size = amount;
+ std::size_t i = 0;
+ for (; i < size; i++) {
+ cb_data_state.buffer[id][cb_data_state.counter] = start_base[i];
+ cb_data_state.counter++;
+ }
+ // Increment the current buffer position.
+ regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount;
+}
+
void Maxwell3D::FinishCBData() {
// Write the input value to the current const buffer at the current position.
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 59d5752d2..3dfba8197 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -1259,7 +1259,8 @@ public:
GPUVAddr LimitAddress() const {
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
- limit_low);
+ limit_low) +
+ 1;
}
} vertex_array_limit[NumVertexArrays];
@@ -1358,6 +1359,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
/// Write the value to the register identified by method.
void CallMethodFromMME(const GPU::MethodCall& method_call);
@@ -1511,6 +1515,7 @@ private:
/// Handles a write to the CB_DATA[i] register.
void StartCBData(u32 method);
void ProcessCBData(u32 value);
+ void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount);
void FinishCBData();
/// Handles a write to the CB_BIND register.
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3bfed6ab8..6630005b0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -36,6 +36,13 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
#undef MAXWELLDMA_REG_INDEX
}
+void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
+ }
+}
+
void MaxwellDMA::HandleCopy() {
LOG_TRACE(HW_GPU, "Requested a DMA copy");
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 4f40d1d1f..c43ed8194 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -35,6 +35,9 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ /// Write multiple values to the register identified by method.
+ void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
+
struct Regs {
static constexpr std::size_t NUM_REGS = 0x1D6;
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 7231597d4..cde3a26b9 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -655,6 +655,7 @@ union Instruction {
}
constexpr Instruction(u64 value) : value{value} {}
+ constexpr Instruction(const Instruction& instr) : value(instr.value) {}
BitField<0, 8, Register> gpr0;
BitField<8, 8, Register> gpr8;
@@ -817,11 +818,9 @@ union Instruction {
BitField<32, 1, u64> saturate;
BitField<49, 2, HalfMerge> merge;
- BitField<43, 1, u64> negate_a;
BitField<44, 1, u64> abs_a;
BitField<47, 2, HalfType> type_a;
- BitField<31, 1, u64> negate_b;
BitField<30, 1, u64> abs_b;
BitField<28, 2, HalfType> type_b;
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 3b7572d61..b87fd873d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -9,6 +9,7 @@
#include "core/core_timing_util.h"
#include "core/frontend/emu_window.h"
#include "core/memory.h"
+#include "core/settings.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/kepler_memory.h"
@@ -154,7 +155,10 @@ u64 GPU::GetTicks() const {
constexpr u64 gpu_ticks_den = 625;
const u64 cpu_ticks = system.CoreTiming().GetTicks();
- const u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
+ if (Settings::values.use_fast_gpu_time) {
+ nanoseconds /= 256;
+ }
const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
@@ -209,16 +213,32 @@ void GPU::CallMethod(const MethodCall& method_call) {
ASSERT(method_call.subchannel < bound_engines.size());
- if (ExecuteMethodOnEngine(method_call)) {
+ if (ExecuteMethodOnEngine(method_call.method)) {
CallEngineMethod(method_call);
} else {
CallPullerMethod(method_call);
}
}
-bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) {
- const auto method = static_cast<BufferMethods>(method_call.method);
- return method >= BufferMethods::NonPullerMethods;
+void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
+
+ ASSERT(subchannel < bound_engines.size());
+
+ if (ExecuteMethodOnEngine(method)) {
+ CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
+ } else {
+ for (std::size_t i = 0; i < amount; i++) {
+ CallPullerMethod(
+ {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
+ }
+ }
+}
+
+bool GPU::ExecuteMethodOnEngine(u32 method) {
+ const auto buffer_method = static_cast<BufferMethods>(method);
+ return buffer_method >= BufferMethods::NonPullerMethods;
}
void GPU::CallPullerMethod(const MethodCall& method_call) {
@@ -298,6 +318,31 @@ void GPU::CallEngineMethod(const MethodCall& method_call) {
}
}
+void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending) {
+ const EngineID engine = bound_engines[subchannel];
+
+ switch (engine) {
+ case EngineID::FERMI_TWOD_A:
+ fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_B:
+ maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_COMPUTE_B:
+ kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::MAXWELL_DMA_COPY_A:
+ maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ case EngineID::KEPLER_INLINE_TO_MEMORY_B:
+ kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented engine");
+ }
+}
+
void GPU::ProcessBindMethod(const MethodCall& method_call) {
// Bind the current subchannel to the desired engine id.
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 5e3eb94e9..dd51c95b7 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -155,6 +155,10 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
+ /// Calls a GPU multivalue method.
+ void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
/// Flush all current written commands into the host GPU for execution.
void FlushCommands();
/// Synchronizes CPU writes with Host GPU memory.
@@ -309,8 +313,12 @@ private:
/// Calls a GPU engine method.
void CallEngineMethod(const MethodCall& method_call);
+ /// Calls a GPU engine multivalue method.
+ void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
+ u32 methods_pending);
+
/// Determines where the method should be executed.
- bool ExecuteMethodOnEngine(const MethodCall& method_call);
+ bool ExecuteMethodOnEngine(u32 method);
protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 4c16c89d2..6fe155bcc 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -186,8 +186,12 @@ void RasterizerOpenGL::SetupVertexBuffer() {
const GPUVAddr start = vertex_array.StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- const u64 size = end - start + 1;
+ ASSERT(end >= start);
+ const u64 size = end - start;
+ if (size == 0) {
+ glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride);
+ continue;
+ }
const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
vertex_array.stride);
@@ -311,8 +315,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
const GPUVAddr start = regs.vertex_array[index].StartAddress();
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
- ASSERT(end > start);
- size += end - start + 1;
+ size += end - start;
+ ASSERT(end >= start);
}
return size;
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index be1c31978..a7f256ff9 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -140,6 +140,12 @@ void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size
enable.Assign(1);
}
+void FixedPipelineState::Fill(const Maxwell& regs) {
+ rasterizer.Fill(regs);
+ depth_stencil.Fill(regs);
+ color_blending.Fill(regs);
+}
+
std::size_t FixedPipelineState::Hash() const noexcept {
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
return static_cast<std::size_t>(hash);
@@ -149,15 +155,6 @@ bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcep
return std::memcmp(this, &rhs, sizeof *this) == 0;
}
-FixedPipelineState GetFixedPipelineState(const Maxwell& regs) {
- FixedPipelineState fixed_state;
- fixed_state.rasterizer.Fill(regs);
- fixed_state.depth_stencil.Fill(regs);
- fixed_state.color_blending.Fill(regs);
- fixed_state.padding = {};
- return fixed_state;
-}
-
u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept {
// OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8
// If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 9fe6bdbf9..77188b862 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -17,7 +17,7 @@ namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-struct alignas(32) FixedPipelineState {
+struct FixedPipelineState {
static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept;
static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept;
@@ -129,7 +129,7 @@ struct alignas(32) FixedPipelineState {
auto& binding = bindings[index];
binding.raw = 0;
binding.enabled.Assign(enabled ? 1 : 0);
- binding.stride.Assign(stride);
+ binding.stride.Assign(static_cast<u16>(stride));
binding_divisors[index] = divisor;
}
@@ -237,7 +237,8 @@ struct alignas(32) FixedPipelineState {
Rasterizer rasterizer;
DepthStencil depth_stencil;
ColorBlending color_blending;
- std::array<u8, 20> padding;
+
+ void Fill(const Maxwell& regs);
std::size_t Hash() const noexcept;
@@ -250,9 +251,6 @@ struct alignas(32) FixedPipelineState {
static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
static_assert(std::is_trivially_copyable_v<FixedPipelineState>);
static_assert(std::is_trivially_constructible_v<FixedPipelineState>);
-static_assert(sizeof(FixedPipelineState) % 32 == 0, "Size is not aligned");
-
-FixedPipelineState GetFixedPipelineState(const Maxwell& regs);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index a4d841e26..c8640762d 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -82,11 +82,6 @@ public:
return present_family;
}
- /// Returns true if the device is integrated with the host CPU.
- bool IsIntegrated() const {
- return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
- }
-
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
u32 GetApiVersion() const {
return properties.apiVersion;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 8332b42aa..45bd1fc6c 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -288,7 +288,7 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
depth_stencil_ci.maxDepthBounds = 0.0f;
std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
- const std::size_t num_attachments = renderpass_params.color_attachments.size();
+ const auto num_attachments = static_cast<std::size_t>(renderpass_params.num_color_attachments);
for (std::size_t index = 0; index < num_attachments; ++index) {
static constexpr std::array COMPONENT_TABLE = {
VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 6a9e658bf..b4c650a63 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -118,8 +118,7 @@ private:
};
VKMemoryManager::VKMemoryManager(const VKDevice& device)
- : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
- is_memory_unified{GetMemoryUnified(properties)} {}
+ : device{device}, properties{device.GetPhysical().GetMemoryProperties()} {}
VKMemoryManager::~VKMemoryManager() = default;
@@ -209,16 +208,6 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
return {};
}
-bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
- for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
- if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
- // Memory is considered unified when heaps are device local only.
- return false;
- }
- }
- return true;
-}
-
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
const vk::DeviceMemory& memory, u64 begin, u64 end)
: device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 5b6858e9b..1af88e3d4 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -40,11 +40,6 @@ public:
/// Commits memory required by the image and binds it.
VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
- /// Returns true if the memory allocations are done always in host visible and coherent memory.
- bool IsMemoryUnified() const {
- return is_memory_unified;
- }
-
private:
/// Allocates a chunk of memory.
bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
@@ -53,12 +48,8 @@ private:
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags wanted_properties);
- /// Returns true if the device uses an unified memory model.
- static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
-
- const VKDevice& device; ///< Device handler.
- const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
- const bool is_memory_unified; ///< True if memory model is unified.
+ const VKDevice& device; ///< Device handler.
+ const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
};
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 8cedeaeba..9b703a2f0 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -113,6 +113,24 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
} // Anonymous namespace
+std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
+std::size_t ComputePipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage,
GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code,
u32 main_offset)
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 2cce53162..ebddafb73 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -7,7 +7,6 @@
#include <array>
#include <cstddef>
#include <memory>
-#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
@@ -50,42 +49,38 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct GraphicsPipelineCacheKey {
FixedPipelineState fixed_state;
- std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
RenderPassParams renderpass_params;
+ std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
+ u64 padding; // This is necessary for unique object representations
- std::size_t Hash() const noexcept {
- std::size_t hash = fixed_state.Hash();
- for (const auto& shader : shaders) {
- boost::hash_combine(hash, shader);
- }
- boost::hash_combine(hash, renderpass_params.Hash());
- return hash;
- }
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
- bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
- return std::tie(fixed_state, shaders, renderpass_params) ==
- std::tie(rhs.fixed_state, rhs.shaders, rhs.renderpass_params);
+ bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
struct ComputePipelineCacheKey {
- GPUVAddr shader{};
- u32 shared_memory_size{};
- std::array<u32, 3> workgroup_size{};
-
- std::size_t Hash() const noexcept {
- return static_cast<std::size_t>(shader) ^
- ((static_cast<std::size_t>(shared_memory_size) >> 7) << 40) ^
- static_cast<std::size_t>(workgroup_size[0]) ^
- (static_cast<std::size_t>(workgroup_size[1]) << 16) ^
- (static_cast<std::size_t>(workgroup_size[2]) << 24);
- }
+ GPUVAddr shader;
+ u32 shared_memory_size;
+ std::array<u32, 3> workgroup_size;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
- bool operator==(const ComputePipelineCacheKey& rhs) const noexcept {
- return std::tie(shader, shared_memory_size, workgroup_size) ==
- std::tie(rhs.shader, rhs.shared_memory_size, rhs.workgroup_size);
+ bool operator!=(const ComputePipelineCacheKey& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<ComputePipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<ComputePipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 8a1f57891..c821b1229 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -316,7 +316,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
query_cache.UpdateCounters();
const auto& gpu = system.GPU().Maxwell3D();
- GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)};
+ GraphicsPipelineCacheKey key;
+ key.fixed_state.Fill(gpu.regs);
buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed));
@@ -334,10 +335,11 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
buffer_cache.Unmap();
- const auto texceptions = UpdateAttachments();
+ const Texceptions texceptions = UpdateAttachments();
SetupImageTransitions(texceptions, color_attachments, zeta_attachment);
key.renderpass_params = GetRenderPassParams(texceptions);
+ key.padding = 0;
auto& pipeline = pipeline_cache.GetGraphicsPipeline(key);
scheduler.BindGraphicsPipeline(pipeline.GetHandle());
@@ -453,10 +455,12 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
query_cache.UpdateCounters();
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
- const ComputePipelineCacheKey key{
- code_addr,
- launch_desc.shared_alloc,
- {launch_desc.block_dim_x, launch_desc.block_dim_y, launch_desc.block_dim_z}};
+ ComputePipelineCacheKey key;
+ key.shader = code_addr;
+ key.shared_memory_size = launch_desc.shared_alloc;
+ key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y,
+ launch_desc.block_dim_z};
+
auto& pipeline = pipeline_cache.GetComputePipeline(key);
// Compute dispatches can't be executed inside a renderpass
@@ -688,7 +692,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
- const auto try_push = [&](const View& view) {
+ const auto try_push = [&key](const View& view) {
if (!view) {
return false;
}
@@ -699,7 +703,9 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
return true;
};
- for (std::size_t index = 0; index < std::size(color_attachments); ++index) {
+ const auto& regs = system.GPU().Maxwell3D().regs;
+ const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
+ for (std::size_t index = 0; index < num_attachments; ++index) {
if (try_push(color_attachments[index])) {
texture_cache.MarkColorBufferInUse(index);
}
@@ -877,8 +883,12 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
const GPUVAddr start{vertex_array.StartAddress()};
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
- ASSERT(end > start);
- const std::size_t size{end - start + 1};
+ ASSERT(end >= start);
+ const std::size_t size{end - start};
+ if (size == 0) {
+ buffer_bindings.AddVertexBinding(DefaultBuffer(), 0);
+ continue;
+ }
const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
buffer_bindings.AddVertexBinding(buffer, offset);
}
@@ -1033,8 +1043,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
const Tegra::Engines::ConstBufferInfo& buffer) {
if (!buffer.enabled) {
// Set values to zero to unbind buffers
- update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
- sizeof(float));
+ update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE);
return;
}
@@ -1057,7 +1066,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
if (size == 0) {
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
// because Vulkan doesn't like empty buffers.
- constexpr std::size_t dummy_size = 4;
+ // Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the
+ // default buffer.
+ static constexpr std::size_t dummy_size = 4;
const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
return;
@@ -1222,7 +1233,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
DEBUG_ASSERT(end >= start);
- size += (end - start + 1) * regs.vertex_array[index].enable;
+ size += (end - start) * regs.vertex_array[index].enable;
}
return size;
}
@@ -1245,28 +1256,54 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize(
}
RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const {
- using namespace VideoCore::Surface;
-
const auto& regs = system.GPU().Maxwell3D().regs;
- RenderPassParams renderpass_params;
+ const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
+
+ RenderPassParams params;
+ params.color_formats = {};
+ std::size_t color_texceptions = 0;
- for (std::size_t rt = 0; rt < static_cast<std::size_t>(regs.rt_control.count); ++rt) {
+ std::size_t index = 0;
+ for (std::size_t rt = 0; rt < num_attachments; ++rt) {
const auto& rendertarget = regs.rt[rt];
if (rendertarget.Address() == 0 || rendertarget.format == Tegra::RenderTargetFormat::NONE) {
continue;
}
- renderpass_params.color_attachments.push_back(RenderPassParams::ColorAttachment{
- static_cast<u32>(rt), PixelFormatFromRenderTargetFormat(rendertarget.format),
- texceptions[rt]});
+ params.color_formats[index] = static_cast<u8>(rendertarget.format);
+ color_texceptions |= (texceptions[rt] ? 1ULL : 0ULL) << index;
+ ++index;
}
+ params.num_color_attachments = static_cast<u8>(index);
+ params.texceptions = static_cast<u8>(color_texceptions);
- renderpass_params.has_zeta = regs.zeta_enable;
- if (renderpass_params.has_zeta) {
- renderpass_params.zeta_pixel_format = PixelFormatFromDepthFormat(regs.zeta.format);
- renderpass_params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
+ params.zeta_format = regs.zeta_enable ? static_cast<u8>(regs.zeta.format) : 0;
+ params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
+ return params;
+}
+
+VkBuffer RasterizerVulkan::DefaultBuffer() {
+ if (default_buffer) {
+ return *default_buffer;
}
- return renderpass_params;
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = DEFAULT_BUFFER_SIZE;
+ ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ default_buffer = device.GetLogical().CreateBuffer(ci);
+ default_buffer_commit = memory_manager.Commit(default_buffer, false);
+
+ scheduler.RequestOutsideRenderPassOperationContext();
+ scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
+ cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0);
+ });
+ return *default_buffer;
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 2fa46b0cc..d41a7929e 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -155,6 +155,7 @@ private:
using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
+ static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float);
void FlushWork();
@@ -247,6 +248,8 @@ private:
RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
+ VkBuffer DefaultBuffer();
+
Core::System& system;
Core::Frontend::EmuWindow& render_window;
VKScreenInfo& screen_info;
@@ -271,6 +274,9 @@ private:
VKFenceManager fence_manager;
VKQueryCache query_cache;
+ vk::Buffer default_buffer;
+ VKMemoryCommit default_buffer_commit;
+
std::array<View, Maxwell::NumRenderTargets> color_attachments;
View zeta_attachment;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 4e5286a69..3f71d005e 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -2,9 +2,11 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <cstring>
#include <memory>
#include <vector>
+#include "common/cityhash.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -13,6 +15,15 @@
namespace Vulkan {
+std::size_t RenderPassParams::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<std::size_t>(hash);
+}
+
+bool RenderPassParams::operator==(const RenderPassParams& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
+
VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
VKRenderPassCache::~VKRenderPassCache() = default;
@@ -27,20 +38,22 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
}
vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
+ using namespace VideoCore::Surface;
std::vector<VkAttachmentDescription> descriptors;
std::vector<VkAttachmentReference> color_references;
- for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) {
- const auto attachment = params.color_attachments[rt];
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, attachment.pixel_format);
+ const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
+ for (std::size_t rt = 0; rt < num_attachments; ++rt) {
+ const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
+ const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
+ const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
- static_cast<u32>(attachment.pixel_format));
+ static_cast<int>(pixel_format));
- // TODO(Rodrigo): Add eMayAlias when it's needed.
- const auto color_layout = attachment.is_texception
- ? VK_IMAGE_LAYOUT_GENERAL
- : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ // TODO(Rodrigo): Add MAY_ALIAS_BIT when it's needed.
+ const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentDescription& descriptor = descriptors.emplace_back();
descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
descriptor.format = format.format;
@@ -58,15 +71,17 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
}
VkAttachmentReference zeta_attachment_ref;
- if (params.has_zeta) {
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format);
+ const bool has_zeta = params.zeta_format != 0;
+ if (has_zeta) {
+ const auto guest_format = static_cast<Tegra::DepthFormat>(params.zeta_format);
+ const PixelFormat pixel_format = PixelFormatFromDepthFormat(guest_format);
+ const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
- static_cast<u32>(params.zeta_pixel_format));
+ static_cast<int>(pixel_format));
- const auto zeta_layout = params.zeta_texception
- ? VK_IMAGE_LAYOUT_GENERAL
- : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ const VkImageLayout zeta_layout = params.zeta_texception != 0
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkAttachmentDescription& descriptor = descriptors.emplace_back();
descriptor.flags = 0;
descriptor.format = format.format;
@@ -78,7 +93,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
descriptor.initialLayout = zeta_layout;
descriptor.finalLayout = zeta_layout;
- zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size());
+ zeta_attachment_ref.attachment = static_cast<u32>(num_attachments);
zeta_attachment_ref.layout = zeta_layout;
}
@@ -90,7 +105,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
subpass_description.pColorAttachments = color_references.data();
subpass_description.pResolveAttachments = nullptr;
- subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr;
+ subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr;
subpass_description.preserveAttachmentCount = 0;
subpass_description.pPreserveAttachments = nullptr;
@@ -101,7 +116,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
- if (params.has_zeta) {
+ if (has_zeta) {
access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
index 921b6efb5..8b0fec720 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
@@ -4,8 +4,7 @@
#pragma once
-#include <memory>
-#include <tuple>
+#include <type_traits>
#include <unordered_map>
#include <boost/container/static_vector.hpp>
@@ -19,51 +18,25 @@ namespace Vulkan {
class VKDevice;
-// TODO(Rodrigo): Optimize this structure for faster hashing
-
struct RenderPassParams {
- struct ColorAttachment {
- u32 index = 0;
- VideoCore::Surface::PixelFormat pixel_format = VideoCore::Surface::PixelFormat::Invalid;
- bool is_texception = false;
-
- std::size_t Hash() const noexcept {
- return static_cast<std::size_t>(pixel_format) |
- static_cast<std::size_t>(is_texception) << 6 |
- static_cast<std::size_t>(index) << 7;
- }
-
- bool operator==(const ColorAttachment& rhs) const noexcept {
- return std::tie(index, pixel_format, is_texception) ==
- std::tie(rhs.index, rhs.pixel_format, rhs.is_texception);
- }
- };
-
- boost::container::static_vector<ColorAttachment,
- Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
- color_attachments{};
- // TODO(Rodrigo): Unify has_zeta into zeta_pixel_format and zeta_component_type.
- VideoCore::Surface::PixelFormat zeta_pixel_format = VideoCore::Surface::PixelFormat::Invalid;
- bool has_zeta = false;
- bool zeta_texception = false;
-
- std::size_t Hash() const noexcept {
- std::size_t hash = 0;
- for (const auto& rt : color_attachments) {
- boost::hash_combine(hash, rt.Hash());
- }
- boost::hash_combine(hash, zeta_pixel_format);
- boost::hash_combine(hash, has_zeta);
- boost::hash_combine(hash, zeta_texception);
- return hash;
- }
+ std::array<u8, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_formats;
+ u8 num_color_attachments;
+ u8 texceptions;
+
+ u8 zeta_format;
+ u8 zeta_texception;
+
+ std::size_t Hash() const noexcept;
+
+ bool operator==(const RenderPassParams& rhs) const noexcept;
- bool operator==(const RenderPassParams& rhs) const {
- return std::tie(color_attachments, zeta_pixel_format, has_zeta, zeta_texception) ==
- std::tie(rhs.color_attachments, rhs.zeta_pixel_format, rhs.has_zeta,
- rhs.zeta_texception);
+ bool operator!=(const RenderPassParams& rhs) const noexcept {
+ return !operator==(rhs);
}
};
+static_assert(std::has_unique_object_representations_v<RenderPassParams>);
+static_assert(std::is_trivially_copyable_v<RenderPassParams>);
+static_assert(std::is_trivially_constructible_v<RenderPassParams>);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 94d954d7a..45c180221 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -39,8 +39,7 @@ VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator
VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler)
- : device{device}, memory_manager{memory_manager}, scheduler{scheduler},
- is_device_integrated{device.IsIntegrated()} {}
+ : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
VKStagingBufferPool::~VKStagingBufferPool() = default;
@@ -56,9 +55,7 @@ void VKStagingBufferPool::TickFrame() {
current_delete_level = (current_delete_level + 1) % NumLevels;
ReleaseCache(true);
- if (!is_device_integrated) {
- ReleaseCache(false);
- }
+ ReleaseCache(false);
}
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
@@ -81,7 +78,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
ci.size = 1ULL << log2;
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = nullptr;
@@ -95,7 +92,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
}
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
- return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers;
+ return host_visible ? host_staging_buffers : device_staging_buffers;
}
void VKStagingBufferPool::ReleaseCache(bool host_visible) {
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index a0840ff8c..faf6418fd 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -71,7 +71,6 @@ private:
const VKDevice& device;
VKMemoryManager& memory_manager;
VKScheduler& scheduler;
- const bool is_device_integrated;
StagingBuffersCache host_staging_buffers;
StagingBuffersCache device_staging_buffers;
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 539f3c974..7f5bc1404 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
#include <exception>
#include <memory>
#include <optional>
@@ -16,6 +17,23 @@ namespace Vulkan::vk {
namespace {
+void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
+ std::stable_sort(devices.begin(), devices.end(), [&](auto lhs, auto rhs) {
+ // This will call Vulkan more than needed, but these calls are cheap.
+ const auto lhs_properties = vk::PhysicalDevice(lhs, dld).GetProperties();
+ const auto rhs_properties = vk::PhysicalDevice(rhs, dld).GetProperties();
+
+ // Prefer discrete GPUs, Nvidia over AMD, AMD over Intel, Intel over the rest.
+ const bool preferred =
+ (lhs_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
+ rhs_properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) ||
+ (lhs_properties.vendorID == 0x10DE && rhs_properties.vendorID != 0x10DE) ||
+ (lhs_properties.vendorID == 0x1002 && rhs_properties.vendorID != 0x1002) ||
+ (lhs_properties.vendorID == 0x8086 && rhs_properties.vendorID != 0x8086);
+ return !preferred;
+ });
+}
+
template <typename T>
bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name,
VkInstance instance = nullptr) noexcept {
@@ -389,7 +407,8 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) {
return std::nullopt;
}
- return physical_devices;
+ SortPhysicalDevices(physical_devices, *dld);
+ return std::make_optional(std::move(physical_devices));
}
DebugCallback Instance::TryCreateDebugCallback(
diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp
index ee7d9a29d..a276aee44 100644
--- a/src/video_core/shader/decode/arithmetic_half.cpp
+++ b/src/video_core/shader/decode/arithmetic_half.cpp
@@ -19,22 +19,46 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
- opcode->get().GetId() == OpCode::Id::HADD2_R) {
+ bool negate_a = false;
+ bool negate_b = false;
+ bool absolute_a = false;
+ bool absolute_b = false;
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::HADD2_R:
if (instr.alu_half.ftz == 0) {
LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
}
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HADD2_C:
+ if (instr.alu_half.ftz == 0) {
+ LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
+ }
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ negate_b = ((instr.value >> 56) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_R:
+ negate_a = ((instr.value >> 43) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 30) & 1) != 0;
+ break;
+ case OpCode::Id::HMUL2_C:
+ negate_b = ((instr.value >> 31) & 1) != 0;
+ absolute_a = ((instr.value >> 44) & 1) != 0;
+ absolute_b = ((instr.value >> 54) & 1) != 0;
+ break;
}
- const bool negate_a =
- opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
- const bool negate_b =
- opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
-
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a);
- op_a = GetOperandAbsNegHalf(op_a, instr.alu_half.abs_a, negate_a);
+ op_a = GetOperandAbsNegHalf(op_a, absolute_a, negate_a);
- auto [type_b, op_b] = [&]() -> std::tuple<HalfType, Node> {
+ auto [type_b, op_b] = [this, instr, opcode]() -> std::pair<HalfType, Node> {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HMUL2_C:
@@ -48,17 +72,16 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
}
}();
op_b = UnpackHalfFloat(op_b, type_b);
- // redeclaration to avoid a bug in clang with reusing local bindings in lambdas
- Node op_b_alt = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b);
+ op_b = GetOperandAbsNegHalf(op_b, absolute_b, negate_b);
- Node value = [&]() {
+ Node value = [this, opcode, op_a, op_b = op_b] {
switch (opcode->get().GetId()) {
case OpCode::Id::HADD2_C:
case OpCode::Id::HADD2_R:
- return Operation(OperationCode::HAdd, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
case OpCode::Id::HMUL2_C:
case OpCode::Id::HMUL2_R:
- return Operation(OperationCode::HMul, PRECISE, op_a, op_b_alt);
+ return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
default:
UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName());
return Immediate(0);
diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp
index 0f4c3103a..9af8c606d 100644
--- a/src/video_core/shader/decode/arithmetic_integer.cpp
+++ b/src/video_core/shader/decode/arithmetic_integer.cpp
@@ -249,8 +249,8 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
}
case OpCode::Id::LEA_IMM: {
const bool neg = instr.lea.imm.neg != 0;
- return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
- GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
+ Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
Immediate(static_cast<u32>(instr.lea.imm.entry_b))};
}
case OpCode::Id::LEA_RZ: {