summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/buffer_cache/word_manager.h2
-rw-r--r--src/video_core/fence_manager.h2
-rw-r--r--src/video_core/rasterizer_accelerated.cpp99
-rw-r--r--src/video_core/rasterizer_accelerated.h29
-rw-r--r--src/video_core/rasterizer_interface.h2
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp45
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h1
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_device.h5
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp14
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_present_manager.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h2
-rw-r--r--src/video_core/shader_cache.cpp4
-rw-r--r--src/video_core/texture_cache/texture_cache.h10
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h2
16 files changed, 150 insertions, 90 deletions
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index 95b752055..a336bde41 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -473,7 +473,7 @@ private:
VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
IteratePages(changed_bits, [&](size_t offset, size_t size) {
rasterizer->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE,
- size * BYTES_PER_PAGE, add_to_rasterizer);
+ size * BYTES_PER_PAGE, add_to_rasterizer ? 1 : -1);
});
}
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 805a89900..c3eda6893 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -270,7 +270,7 @@ private:
std::jthread fence_thread;
- DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
+ DelayedDestructionRing<TFence, 8> delayed_destruction_ring;
};
} // namespace VideoCommon
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp
index 3c9477f6e..f200a650f 100644
--- a/src/video_core/rasterizer_accelerated.cpp
+++ b/src/video_core/rasterizer_accelerated.cpp
@@ -3,7 +3,6 @@
#include <atomic>
-#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/div_ceil.h"
@@ -12,65 +11,61 @@
namespace VideoCore {
-static constexpr u16 IdentityValue = 1;
-
using namespace Core::Memory;
-RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : map{}, cpu_memory{cpu_memory_} {
- // We are tracking CPU memory, which cannot map more than 39 bits.
- const VAddr start_address = 0;
- const VAddr end_address = (1ULL << 39);
- const IntervalType address_space_interval(start_address, end_address);
- const auto value = std::make_pair(address_space_interval, IdentityValue);
-
- map.add(value);
-}
+RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_)
+ : cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {}
RasterizerAccelerated::~RasterizerAccelerated() = default;
-void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) {
- std::scoped_lock lk{map_lock};
-
- // Align sizes.
- addr = Common::AlignDown(addr, YUZU_PAGESIZE);
- size = Common::AlignUp(size, YUZU_PAGESIZE);
-
- // Declare the overall interval we are going to operate on.
- const VAddr start_address = addr;
- const VAddr end_address = addr + size;
- const IntervalType modification_range(start_address, end_address);
-
- // Find the boundaries of where to iterate.
- const auto lower = map.lower_bound(modification_range);
- const auto upper = map.upper_bound(modification_range);
-
- // Iterate over the contained intervals.
- for (auto it = lower; it != upper; it++) {
- // Intersect interval range with modification range.
- const auto current_range = modification_range & it->first;
-
- // Calculate the address and size to operate over.
- const auto current_addr = current_range.lower();
- const auto current_size = current_range.upper() - current_addr;
-
- // Get the current value of the range.
- const auto value = it->second;
+void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
+ u64 uncache_begin = 0;
+ u64 cache_begin = 0;
+ u64 uncache_bytes = 0;
+ u64 cache_bytes = 0;
+
+ std::atomic_thread_fence(std::memory_order_acquire);
+ const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
+ for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
+ std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
+
+ if (delta > 0) {
+ ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
+ } else if (delta < 0) {
+ ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
+ } else {
+ ASSERT_MSG(false, "Delta must be non-zero!");
+ }
- if (cache && value == IdentityValue) {
- // If we are going to cache, and the value is not yet referenced, then cache this range.
- cpu_memory.RasterizerMarkRegionCached(current_addr, current_size, true);
- } else if (!cache && value == IdentityValue + 1) {
- // If we are going to uncache, and this is the last reference, then uncache this range.
- cpu_memory.RasterizerMarkRegionCached(current_addr, current_size, false);
+ // Adds or subtracts 1, as count is a unsigned 8-bit value
+ count.fetch_add(static_cast<u16>(delta), std::memory_order_release);
+
+ // Assume delta is either -1 or 1
+ if (count.load(std::memory_order::relaxed) == 0) {
+ if (uncache_bytes == 0) {
+ uncache_begin = page;
+ }
+ uncache_bytes += YUZU_PAGESIZE;
+ } else if (uncache_bytes > 0) {
+ cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
+ false);
+ uncache_bytes = 0;
+ }
+ if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
+ if (cache_bytes == 0) {
+ cache_begin = page;
+ }
+ cache_bytes += YUZU_PAGESIZE;
+ } else if (cache_bytes > 0) {
+ cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
+ cache_bytes = 0;
}
}
-
- // Update the set.
- const auto value = std::make_pair(modification_range, IdentityValue);
- if (cache) {
- map.add(value);
- } else {
- map.subtract(value);
+ if (uncache_bytes > 0) {
+ cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
+ }
+ if (cache_bytes > 0) {
+ cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
}
}
diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h
index f1968f186..e6c0ea87a 100644
--- a/src/video_core/rasterizer_accelerated.h
+++ b/src/video_core/rasterizer_accelerated.h
@@ -3,8 +3,8 @@
#pragma once
-#include <mutex>
-#include <boost/icl/interval_map.hpp>
+#include <array>
+#include <atomic>
#include "common/common_types.h"
#include "video_core/rasterizer_interface.h"
@@ -21,17 +21,28 @@ public:
explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_);
~RasterizerAccelerated() override;
- void UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) override;
+ void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
private:
- using PageIndex = VAddr;
- using PageReferenceCount = u16;
+ class CacheEntry final {
+ public:
+ CacheEntry() = default;
- using IntervalMap = boost::icl::interval_map<PageIndex, PageReferenceCount>;
- using IntervalType = IntervalMap::interval_type;
+ std::atomic_uint16_t& Count(std::size_t page) {
+ return values[page & 3];
+ }
- IntervalMap map;
- std::mutex map_lock;
+ const std::atomic_uint16_t& Count(std::size_t page) const {
+ return values[page & 3];
+ }
+
+ private:
+ std::array<std::atomic_uint16_t, 4> values{};
+ };
+ static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
+
+ using CachedPages = std::array<CacheEntry, 0x2000000>;
+ std::unique_ptr<CachedPages> cached_pages;
Core::Memory::Memory& cpu_memory;
};
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index fd42d26b5..af1469147 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -162,7 +162,7 @@ public:
}
/// Increase/decrease the number of object in pages touching the specified region
- virtual void UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) {}
+ virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {}
/// Initialize disk cached resources for the game being emulated
virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index a71866b75..b787b6994 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -58,6 +58,9 @@ Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rast
glObjectLabel(GL_BUFFER, buffer.handle, static_cast<GLsizei>(name.size()), name.data());
}
glNamedBufferData(buffer.handle, SizeBytes(), nullptr, GL_DYNAMIC_DRAW);
+ if (runtime.has_unified_vertex_buffers) {
+ glGetNamedBufferParameterui64vNV(buffer.handle, GL_BUFFER_GPU_ADDRESS_NV, &address);
+ }
}
void Buffer::ImmediateUpload(size_t offset, std::span<const u8> data) noexcept {
@@ -109,6 +112,7 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_,
: device{device_}, staging_buffer_pool{staging_buffer_pool_},
has_fast_buffer_sub_data{device.HasFastBufferSubData()},
use_assembly_shaders{device.UseAssemblyShaders()},
+ has_unified_vertex_buffers{device.HasVertexBufferUnifiedMemory()},
stream_buffer{has_fast_buffer_sub_data ? std::nullopt : std::make_optional<StreamBuffer>()} {
GLint gl_max_attributes;
glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &gl_max_attributes);
@@ -210,8 +214,14 @@ void BufferCacheRuntime::ClearBuffer(Buffer& dest_buffer, u32 offset, size_t siz
}
void BufferCacheRuntime::BindIndexBuffer(Buffer& buffer, u32 offset, u32 size) {
- glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle());
- index_buffer_offset = offset;
+ if (has_unified_vertex_buffers) {
+ buffer.MakeResident(GL_READ_ONLY);
+ glBufferAddressRangeNV(GL_ELEMENT_ARRAY_ADDRESS_NV, 0, buffer.HostGpuAddr() + offset,
+ static_cast<GLsizeiptr>(Common::AlignUp(size, 4)));
+ } else {
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle());
+ index_buffer_offset = offset;
+ }
}
void BufferCacheRuntime::BindVertexBuffer(u32 index, Buffer& buffer, u32 offset, u32 size,
@@ -219,8 +229,15 @@ void BufferCacheRuntime::BindVertexBuffer(u32 index, Buffer& buffer, u32 offset,
if (index >= max_attributes) {
return;
}
- glBindVertexBuffer(index, buffer.Handle(), static_cast<GLintptr>(offset),
- static_cast<GLsizei>(stride));
+ if (has_unified_vertex_buffers) {
+ buffer.MakeResident(GL_READ_ONLY);
+ glBindVertexBuffer(index, 0, 0, static_cast<GLsizei>(stride));
+ glBufferAddressRangeNV(GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV, index,
+ buffer.HostGpuAddr() + offset, static_cast<GLsizeiptr>(size));
+ } else {
+ glBindVertexBuffer(index, buffer.Handle(), static_cast<GLintptr>(offset),
+ static_cast<GLsizei>(stride));
+ }
}
void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bindings) {
@@ -233,9 +250,23 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
[](u64 stride) { return static_cast<GLsizei>(stride); });
const u32 count =
std::min(static_cast<u32>(bindings.buffers.size()), max_attributes - bindings.min_index);
- glBindVertexBuffers(bindings.min_index, static_cast<GLsizei>(count), buffer_handles.data(),
- reinterpret_cast<const GLintptr*>(bindings.offsets.data()),
- buffer_strides.data());
+ if (has_unified_vertex_buffers) {
+ for (u32 index = 0; index < count; ++index) {
+ Buffer& buffer = *bindings.buffers[index];
+ buffer.MakeResident(GL_READ_ONLY);
+ glBufferAddressRangeNV(GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV, bindings.min_index + index,
+ buffer.HostGpuAddr() + bindings.offsets[index],
+ static_cast<GLsizeiptr>(bindings.sizes[index]));
+ }
+ static constexpr std::array<size_t, 32> ZEROS{};
+ glBindVertexBuffers(bindings.min_index, static_cast<GLsizei>(count),
+ reinterpret_cast<const GLuint*>(ZEROS.data()),
+ reinterpret_cast<const GLintptr*>(ZEROS.data()), buffer_strides.data());
+ } else {
+ glBindVertexBuffers(bindings.min_index, static_cast<GLsizei>(count), buffer_handles.data(),
+ reinterpret_cast<const GLintptr*>(bindings.offsets.data()),
+ buffer_strides.data());
+ }
}
void BufferCacheRuntime::BindUniformBuffer(size_t stage, u32 binding_index, Buffer& buffer,
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index 71cd45d35..1e8708f59 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -209,6 +209,7 @@ private:
bool has_fast_buffer_sub_data = false;
bool use_assembly_shaders = false;
+ bool has_unified_vertex_buffers = false;
bool use_storage_buffers = false;
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index a6c93068f..993438a27 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -200,6 +200,7 @@ Device::Device(Core::Frontend::EmuWindow& emu_window) {
has_broken_texture_view_formats = is_amd || (!is_linux && is_intel);
has_nv_viewport_array2 = GLAD_GL_NV_viewport_array2;
has_derivative_control = GLAD_GL_ARB_derivative_control;
+ has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory;
has_debugging_tool_attached = IsDebugToolAttached(extensions);
has_depth_buffer_float = HasExtension(extensions, "GL_NV_depth_buffer_float");
has_geometry_shader_passthrough = GLAD_GL_NV_geometry_shader_passthrough;
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index 96034ea4a..a5a6bbbba 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -72,6 +72,10 @@ public:
return has_texture_shadow_lod;
}
+ bool HasVertexBufferUnifiedMemory() const {
+ return has_vertex_buffer_unified_memory;
+ }
+
bool HasASTC() const {
return has_astc;
}
@@ -211,6 +215,7 @@ private:
bool has_vertex_viewport_layer{};
bool has_image_load_formatted{};
bool has_texture_shadow_lod{};
+ bool has_vertex_buffer_unified_memory{};
bool has_astc{};
bool has_variable_aoffi{};
bool has_component_indexing_bug{};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 279e5a4e0..4832c03c5 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -162,14 +162,18 @@ void RasterizerOpenGL::Clear(u32 layer_count) {
SyncFramebufferSRGB();
}
if (regs.clear_surface.Z) {
- ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear Z but buffer is not enabled!");
+ if (regs.zeta_enable != 0) {
+ LOG_DEBUG(Render_OpenGL, "Tried to clear Z but buffer is not enabled!");
+ }
use_depth = true;
state_tracker.NotifyDepthMask();
glDepthMask(GL_TRUE);
}
if (regs.clear_surface.S) {
- ASSERT_MSG(regs.zeta_enable, "Tried to clear stencil but buffer is not enabled!");
+ if (regs.zeta_enable) {
+ LOG_DEBUG(Render_OpenGL, "Tried to clear stencil but buffer is not enabled!");
+ }
use_stencil = true;
}
@@ -1294,15 +1298,13 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum
program->ConfigureTransformFeedback();
UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
- regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation) ||
- regs.IsShaderConfigEnabled(Maxwell::ShaderType::Geometry));
- UNIMPLEMENTED_IF(primitive_mode != GL_POINTS);
+ regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
// We may have to call BeginTransformFeedbackNV here since they seem to call different
// implementations on Nvidia's driver (the pointer is different) but we are using
// ARB_transform_feedback3 features with NV_transform_feedback interactions and the ARB
// extension doesn't define BeginTransformFeedback (without NV) interactions. It just works.
- glBeginTransformFeedback(GL_POINTS);
+ glBeginTransformFeedback(primitive_mode);
}
void RasterizerOpenGL::EndTransformFeedback() {
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 7a4f0c5c1..2933718b6 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -168,6 +168,14 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
if (!GLAD_GL_ARB_seamless_cubemap_per_texture && !GLAD_GL_AMD_seamless_cubemap_per_texture) {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
+ // Enable unified vertex attributes and query vertex buffer address when the driver supports it
+ if (device.HasVertexBufferUnifiedMemory()) {
+ glEnableClientState(GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV);
+ glEnableClientState(GL_ELEMENT_ARRAY_UNIFIED_NV);
+ glMakeNamedBufferResidentNV(vertex_buffer.handle, GL_READ_ONLY);
+ glGetNamedBufferParameterui64vNV(vertex_buffer.handle, GL_BUFFER_GPU_ADDRESS_NV,
+ &vertex_buffer_address);
+ }
}
RendererOpenGL::~RendererOpenGL() = default;
@@ -667,7 +675,13 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) {
offsetof(ScreenRectVertex, tex_coord));
glVertexAttribBinding(PositionLocation, 0);
glVertexAttribBinding(TexCoordLocation, 0);
- glBindVertexBuffer(0, vertex_buffer.handle, 0, sizeof(ScreenRectVertex));
+ if (device.HasVertexBufferUnifiedMemory()) {
+ glBindVertexBuffer(0, 0, 0, sizeof(ScreenRectVertex));
+ glBufferAddressRangeNV(GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV, 0, vertex_buffer_address,
+ sizeof(vertices));
+ } else {
+ glBindVertexBuffer(0, vertex_buffer.handle, 0, sizeof(ScreenRectVertex));
+ }
if (Settings::values.scaling_filter.GetValue() != Settings::ScalingFilter::NearestNeighbor) {
glBindSampler(0, present_sampler.handle);
diff --git a/src/video_core/renderer_vulkan/vk_present_manager.cpp b/src/video_core/renderer_vulkan/vk_present_manager.cpp
index a59e2d2d1..5e7518d96 100644
--- a/src/video_core/renderer_vulkan/vk_present_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_present_manager.cpp
@@ -293,10 +293,10 @@ void PresentManager::RecreateSwapchain(Frame* frame) {
}
void PresentManager::SetImageCount() {
- // We cannot have more than 5 images in flight at any given time.
- // FRAMES_IN_FLIGHT is 7, and the cache TICKS_TO_DESTROY is 6.
+ // We cannot have more than 7 images in flight at any given time.
+ // FRAMES_IN_FLIGHT is 8, and the cache TICKS_TO_DESTROY is 8.
// Mali drivers will give us 6.
- image_count = std::min<size_t>(swapchain.GetImageCount(), 5);
+ image_count = std::min<size_t>(swapchain.GetImageCount(), 7);
}
void PresentManager::CopyToSwapchain(Frame* frame) {
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index e77b576ec..82fce298d 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -31,7 +31,7 @@ struct DescriptorUpdateEntry {
class UpdateDescriptorQueue final {
// This should be plenty for the vast majority of cases. Most desktop platforms only
// provide up to 3 swapchain images.
- static constexpr size_t FRAMES_IN_FLIGHT = 7;
+ static constexpr size_t FRAMES_IN_FLIGHT = 8;
static constexpr size_t FRAME_PAYLOAD_SIZE = 0x20000;
static constexpr size_t PAYLOAD_SIZE = FRAME_PAYLOAD_SIZE * FRAMES_IN_FLIGHT;
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index a109f9cbe..e81cd031b 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -132,7 +132,7 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
storage.push_back(std::move(data));
- rasterizer.UpdatePagesCachedCount(addr, size, true);
+ rasterizer.UpdatePagesCachedCount(addr, size, 1);
}
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
@@ -209,7 +209,7 @@ void ShaderCache::UnmarkMemory(Entry* entry) {
const VAddr addr = entry->addr_start;
const size_t size = entry->addr_end - addr;
- rasterizer.UpdatePagesCachedCount(addr, size, false);
+ rasterizer.UpdatePagesCachedCount(addr, size, -1);
}
void ShaderCache::RemoveShadersFromStorage(std::span<ShaderInfo*> removed_shaders) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index d7941f6a4..0d5a1709f 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -2080,7 +2080,7 @@ void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) {
ASSERT(False(image.flags & ImageFlagBits::Tracked));
image.flags |= ImageFlagBits::Tracked;
if (False(image.flags & ImageFlagBits::Sparse)) {
- rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, true);
+ rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1);
return;
}
if (True(image.flags & ImageFlagBits::Registered)) {
@@ -2091,13 +2091,13 @@ void TextureCache<P>::TrackImage(ImageBase& image, ImageId image_id) {
const auto& map = slot_map_views[map_view_id];
const VAddr cpu_addr = map.cpu_addr;
const std::size_t size = map.size;
- rasterizer.UpdatePagesCachedCount(cpu_addr, size, true);
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
}
return;
}
ForEachSparseSegment(image,
[this]([[maybe_unused]] GPUVAddr gpu_addr, VAddr cpu_addr, size_t size) {
- rasterizer.UpdatePagesCachedCount(cpu_addr, size, true);
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
});
}
@@ -2106,7 +2106,7 @@ void TextureCache<P>::UntrackImage(ImageBase& image, ImageId image_id) {
ASSERT(True(image.flags & ImageFlagBits::Tracked));
image.flags &= ~ImageFlagBits::Tracked;
if (False(image.flags & ImageFlagBits::Sparse)) {
- rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, false);
+ rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, -1);
return;
}
ASSERT(True(image.flags & ImageFlagBits::Registered));
@@ -2117,7 +2117,7 @@ void TextureCache<P>::UntrackImage(ImageBase& image, ImageId image_id) {
const auto& map = slot_map_views[map_view_id];
const VAddr cpu_addr = map.cpu_addr;
const std::size_t size = map.size;
- rasterizer.UpdatePagesCachedCount(cpu_addr, size, false);
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
}
}
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index cbe56e166..6caf75b46 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -474,7 +474,7 @@ private:
};
Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache;
- static constexpr size_t TICKS_TO_DESTROY = 6;
+ static constexpr size_t TICKS_TO_DESTROY = 8;
DelayedDestructionRing<Image, TICKS_TO_DESTROY> sentenced_images;
DelayedDestructionRing<ImageView, TICKS_TO_DESTROY> sentenced_image_view;
DelayedDestructionRing<Framebuffer, TICKS_TO_DESTROY> sentenced_framebuffers;