summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorameerj <52414509+ameerj@users.noreply.github.com>2023-03-07 03:16:17 +0100
committerameerj <52414509+ameerj@users.noreply.github.com>2023-03-07 04:57:45 +0100
commit6b9cc0ed23b15a1b96b322b03feff2153e44a4a9 (patch)
tree6949cedbe90b6d34e2092c8eba57b243defb21dc
parentMerge pull request #9890 from Kelebek1/reverb_fix (diff)
downloadyuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.tar
yuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.tar.gz
yuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.tar.bz2
yuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.tar.lz
yuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.tar.xz
yuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.tar.zst
yuzu-6b9cc0ed23b15a1b96b322b03feff2153e44a4a9.zip
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp24
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp248
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/texture_cache/texture_cache.h86
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h10
8 files changed, 156 insertions, 251 deletions
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index b047e7b3d..4b13e807d 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -762,14 +762,14 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas
Image::~Image() = default;
-void Image::UploadMemory(const ImageBufferMap& map,
+void Image::UploadMemory(GLuint buffer_handle, size_t buffer_offset,
std::span<const VideoCommon::BufferImageCopy> copies) {
const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
if (is_rescaled) {
ScaleDown(true);
}
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, map.buffer);
- glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, map.offset, unswizzled_size_bytes);
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer_handle);
+ glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, buffer_offset, unswizzled_size_bytes);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
@@ -788,21 +788,26 @@ void Image::UploadMemory(const ImageBufferMap& map,
current_image_height = copy.buffer_image_height;
glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, current_image_height);
}
- CopyBufferToImage(copy, map.offset);
+ CopyBufferToImage(copy, buffer_offset);
}
if (is_rescaled) {
ScaleUp();
}
}
-void Image::DownloadMemory(ImageBufferMap& map,
+void Image::UploadMemory(const ImageBufferMap& map,
+ std::span<const VideoCommon::BufferImageCopy> copies) {
+ UploadMemory(map.buffer, map.offset, copies);
+}
+
+void Image::DownloadMemory(GLuint buffer_handle, size_t buffer_offset,
std::span<const VideoCommon::BufferImageCopy> copies) {
const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
if (is_rescaled) {
ScaleDown();
}
glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT); // TODO: Move this to its own API
- glBindBuffer(GL_PIXEL_PACK_BUFFER, map.buffer);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer_handle);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
u32 current_row_length = std::numeric_limits<u32>::max();
@@ -820,13 +825,18 @@ void Image::DownloadMemory(ImageBufferMap& map,
current_image_height = copy.buffer_image_height;
glPixelStorei(GL_PACK_IMAGE_HEIGHT, current_image_height);
}
- CopyImageToBuffer(copy, map.offset);
+ CopyImageToBuffer(copy, buffer_offset);
}
if (is_rescaled) {
ScaleUp(true);
}
}
+void Image::DownloadMemory(ImageBufferMap& map,
+ std::span<const VideoCommon::BufferImageCopy> copies) {
+ DownloadMemory(map.buffer, map.offset, copies);
+}
+
GLuint Image::StorageHandle() noexcept {
switch (info.format) {
case PixelFormat::A8B8G8R8_SRGB:
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index e30875496..911e4607a 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -206,9 +206,15 @@ public:
Image(Image&&) = default;
Image& operator=(Image&&) = default;
+ void UploadMemory(GLuint buffer_handle, size_t buffer_offset,
+ std::span<const VideoCommon::BufferImageCopy> copies);
+
void UploadMemory(const ImageBufferMap& map,
std::span<const VideoCommon::BufferImageCopy> copies);
+ void DownloadMemory(GLuint buffer_handle, size_t buffer_offset,
+ std::span<const VideoCommon::BufferImageCopy> copies);
+
void DownloadMemory(ImageBufferMap& map, std::span<const VideoCommon::BufferImageCopy> copies);
GLuint StorageHandle() noexcept;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index f085d53a1..a00cf1569 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -770,232 +770,44 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
return buffer_cache.DMACopy(src_address, dest_address, amount);
}
-bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
- const Tegra::DMA::ImageOperand& src,
- const Tegra::DMA::BufferOperand& dst) {
+template <bool IS_IMAGE_UPLOAD>
+bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
+ const Tegra::DMA::BufferOperand& buffer_operand,
+ const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- auto query_image = texture_cache.ObtainImage(src, false);
- if (!query_image) {
+ const auto image_id = texture_cache.DmaImageId(image_operand);
+ if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false;
}
- auto* image = query_image->first;
- auto [level, base] = query_image->second;
- const u32 buffer_size = static_cast<u32>(dst.pitch * dst.height);
- const auto [buffer, offset] = buffer_cache.ObtainBuffer(
- dst.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize,
- VideoCommon::ObtainBufferOperation::MarkAsWritten);
-
- const bool is_rescaled = image->IsRescaled();
- if (is_rescaled) {
- image->ScaleDown();
- }
- VkImageSubresourceLayers subresources{
- .aspectMask = image->AspectMask(),
- .mipLevel = level,
- .baseArrayLayer = base,
- .layerCount = 1,
- };
- const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
- const auto convert = [old_bpp = src.bytes_per_pixel, bpp](u32 value) {
- return (old_bpp * value) / bpp;
- };
- const u32 base_x = convert(src.params.origin.x.Value());
- const u32 base_y = src.params.origin.y.Value();
- const u32 length_x = convert(copy_info.length_x);
- const u32 length_y = copy_info.length_y;
- VkOffset3D image_offset{
- .x = static_cast<s32>(base_x),
- .y = static_cast<s32>(base_y),
- .z = 0,
- };
- VkExtent3D image_extent{
- .width = length_x,
- .height = length_y,
- .depth = 1,
- };
- auto buff_info(dst);
- buff_info.pitch = convert(dst.pitch);
- scheduler.RequestOutsideRenderPassOperationContext();
- scheduler.Record([src_image = image->Handle(), dst_buffer = buffer->Handle(),
- buffer_offset = offset, subresources, image_offset, image_extent,
- buff_info](vk::CommandBuffer cmdbuf) {
- const std::array buffer_copy_info{
- VkBufferImageCopy{
- .bufferOffset = buffer_offset,
- .bufferRowLength = buff_info.pitch,
- .bufferImageHeight = buff_info.height,
- .imageSubresource = subresources,
- .imageOffset = image_offset,
- .imageExtent = image_extent,
- },
- };
- const VkImageSubresourceRange range{
- .aspectMask = subresources.aspectMask,
- .baseMipLevel = subresources.mipLevel,
- .levelCount = 1,
- .baseArrayLayer = subresources.baseArrayLayer,
- .layerCount = 1,
- };
- static constexpr VkMemoryBarrier WRITE_BARRIER{
- .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
- .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
- };
- const std::array pre_barriers{
- VkImageMemoryBarrier{
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT,
- .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
- .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
- .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = src_image,
- .subresourceRange = range,
- },
- };
- const std::array post_barriers{
- VkImageMemoryBarrier{
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = 0,
- .dstAccessMask = 0,
- .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
- .newLayout = VK_IMAGE_LAYOUT_GENERAL,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = src_image,
- .subresourceRange = range,
- },
- };
- cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
- 0, {}, {}, pre_barriers);
- cmdbuf.CopyImageToBuffer(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer,
- buffer_copy_info);
- cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
- 0, WRITE_BARRIER, nullptr, post_barriers);
- });
- if (is_rescaled) {
- image->ScaleUp(true);
+ const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
+ static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
+ const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
+ : VideoCommon::ObtainBufferOperation::MarkAsWritten;
+ const auto [buffer, offset] =
+ buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
+
+ const auto [image, copy] = texture_cache.DmaBufferImageCopy(
+ copy_info, buffer_operand, image_operand, image_id, IS_IMAGE_UPLOAD);
+ const std::span copy_span{&copy, 1};
+
+ if constexpr (IS_IMAGE_UPLOAD) {
+ image->UploadMemory(buffer->Handle(), offset, copy_span);
+ } else {
+ image->DownloadMemory(buffer->Handle(), offset, copy_span);
}
return true;
}
+bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
+ const Tegra::DMA::ImageOperand& image_operand,
+ const Tegra::DMA::BufferOperand& buffer_operand) {
+ return DmaBufferImageCopy<false>(copy_info, buffer_operand, image_operand);
+}
+
bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
- const Tegra::DMA::BufferOperand& src,
- const Tegra::DMA::ImageOperand& dst) {
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- auto query_image = texture_cache.ObtainImage(dst, true);
- if (!query_image) {
- return false;
- }
- auto* image = query_image->first;
- auto [level, base] = query_image->second;
- const u32 buffer_size = static_cast<u32>(src.pitch * src.height);
- const auto [buffer, offset] = buffer_cache.ObtainBuffer(
- src.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize,
- VideoCommon::ObtainBufferOperation::DoNothing);
- const bool is_rescaled = image->IsRescaled();
- if (is_rescaled) {
- image->ScaleDown(true);
- }
- VkImageSubresourceLayers subresources{
- .aspectMask = image->AspectMask(),
- .mipLevel = level,
- .baseArrayLayer = base,
- .layerCount = 1,
- };
- const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
- const auto convert = [old_bpp = dst.bytes_per_pixel, bpp](u32 value) {
- return (old_bpp * value) / bpp;
- };
- const u32 base_x = convert(dst.params.origin.x.Value());
- const u32 base_y = dst.params.origin.y.Value();
- const u32 length_x = convert(copy_info.length_x);
- const u32 length_y = copy_info.length_y;
- VkOffset3D image_offset{
- .x = static_cast<s32>(base_x),
- .y = static_cast<s32>(base_y),
- .z = 0,
- };
- VkExtent3D image_extent{
- .width = length_x,
- .height = length_y,
- .depth = 1,
- };
- auto buff_info(src);
- buff_info.pitch = convert(src.pitch);
- scheduler.RequestOutsideRenderPassOperationContext();
- scheduler.Record([dst_image = image->Handle(), src_buffer = buffer->Handle(),
- buffer_offset = offset, subresources, image_offset, image_extent,
- buff_info](vk::CommandBuffer cmdbuf) {
- const std::array buffer_copy_info{
- VkBufferImageCopy{
- .bufferOffset = buffer_offset,
- .bufferRowLength = buff_info.pitch,
- .bufferImageHeight = buff_info.height,
- .imageSubresource = subresources,
- .imageOffset = image_offset,
- .imageExtent = image_extent,
- },
- };
- const VkImageSubresourceRange range{
- .aspectMask = subresources.aspectMask,
- .baseMipLevel = subresources.mipLevel,
- .levelCount = 1,
- .baseArrayLayer = subresources.baseArrayLayer,
- .layerCount = 1,
- };
- static constexpr VkMemoryBarrier READ_BARRIER{
- .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
- .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
- };
- const std::array pre_barriers{
- VkImageMemoryBarrier{
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT,
- .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
- .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
- .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = dst_image,
- .subresourceRange = range,
- },
- };
- const std::array post_barriers{
- VkImageMemoryBarrier{
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = 0,
- .dstAccessMask = 0,
- .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- .newLayout = VK_IMAGE_LAYOUT_GENERAL,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = dst_image,
- .subresourceRange = range,
- },
- };
- cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
- 0, READ_BARRIER, {}, pre_barriers);
- cmdbuf.CopyBufferToImage(src_buffer, dst_image, VK_IMAGE_LAYOUT_GENERAL, buffer_copy_info);
- cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
- 0, nullptr, nullptr, post_barriers);
- });
- if (is_rescaled) {
- image->ScaleUp();
- }
- return true;
+ const Tegra::DMA::BufferOperand& buffer_operand,
+ const Tegra::DMA::ImageOperand& image_operand) {
+ return DmaBufferImageCopy<true>(copy_info, buffer_operand, image_operand);
}
void RasterizerVulkan::UpdateDynamicStates() {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 7746c5434..1659fbc13 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -59,6 +59,11 @@ public:
const Tegra::DMA::ImageOperand& dst) override;
private:
+ template <bool IS_IMAGE_UPLOAD>
+ bool DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
+ const Tegra::DMA::BufferOperand& src,
+ const Tegra::DMA::ImageOperand& dst);
+
BufferCache& buffer_cache;
TextureCache& texture_cache;
Scheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 8a204f93f..bf6389ff1 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -1312,15 +1312,16 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas
Image::~Image() = default;
-void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
+void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
+ std::span<const VideoCommon::BufferImageCopy> copies) {
// TODO: Move this to another API
const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
if (is_rescaled) {
ScaleDown(true);
}
scheduler->RequestOutsideRenderPassOperationContext();
- std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask);
- const VkBuffer src_buffer = map.buffer;
+ std::vector vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
+ const VkBuffer src_buffer = buffer;
const VkImage vk_image = *original_image;
const VkImageAspectFlags vk_aspect_mask = aspect_mask;
const bool is_initialized = std::exchange(initialized, true);
@@ -1333,14 +1334,19 @@ void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImag
}
}
-void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
+void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
+ UploadMemory(map.buffer, map.offset, copies);
+}
+
+void Image::DownloadMemory(VkBuffer buffer, VkDeviceSize offset,
+ std::span<const VideoCommon::BufferImageCopy> copies) {
const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
if (is_rescaled) {
ScaleDown();
}
- std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask);
+ std::vector vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
scheduler->RequestOutsideRenderPassOperationContext();
- scheduler->Record([buffer = map.buffer, image = *original_image, aspect_mask = aspect_mask,
+ scheduler->Record([buffer, image = *original_image, aspect_mask = aspect_mask,
vk_copies](vk::CommandBuffer cmdbuf) {
const VkImageMemoryBarrier read_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@@ -1395,6 +1401,10 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm
}
}
+void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
+ DownloadMemory(map.buffer, map.offset, copies);
+}
+
bool Image::IsRescaled() const noexcept {
return True(flags & ImageFlagBits::Rescaled);
}
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 0ce39616f..d5ee23f8d 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -132,9 +132,15 @@ public:
Image(Image&&) = default;
Image& operator=(Image&&) = default;
+ void UploadMemory(VkBuffer buffer, VkDeviceSize offset,
+ std::span<const VideoCommon::BufferImageCopy> copies);
+
void UploadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferImageCopy> copies);
+ void DownloadMemory(VkBuffer buffer, VkDeviceSize offset,
+ std::span<const VideoCommon::BufferImageCopy> copies);
+
void DownloadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferImageCopy> copies);
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 335338434..8e8b9a5e6 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -745,6 +745,25 @@ void TextureCache<P>::PopAsyncFlushes() {
}
template <class P>
+ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
+ const ImageInfo dst_info(operand);
+ const ImageId dst_id = FindDMAImage(dst_info, operand.address);
+ if (!dst_id) {
+ return NULL_IMAGE_ID;
+ }
+ const auto& image = slot_images[dst_id];
+ if (False(image.flags & ImageFlagBits::GpuModified)) {
+ // No need to waste time on an image that's synced with guest
+ return NULL_IMAGE_ID;
+ }
+ const auto base = image.TryFindBase(operand.address);
+ if (!base) {
+ return NULL_IMAGE_ID;
+ }
+ return dst_id;
+}
+
+template <class P>
bool TextureCache<P>::IsRescaling() const noexcept {
return is_rescaling;
}
@@ -772,6 +791,49 @@ bool TextureCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
}
template <class P>
+std::pair<typename TextureCache<P>::Image*, BufferImageCopy> TextureCache<P>::DmaBufferImageCopy(
+ const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
+ const Tegra::DMA::ImageOperand& image_operand, ImageId image_id, bool modifies_image) {
+ const auto [level, base] = PrepareDmaImage(image_id, image_operand.address, modifies_image);
+ auto* image = &slot_images[image_id];
+ const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
+ const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
+ const auto convert = [old_bpp = image_operand.bytes_per_pixel, bpp](u32 value) {
+ return (old_bpp * value) / bpp;
+ };
+ const u32 base_x = convert(image_operand.params.origin.x.Value());
+ const u32 base_y = image_operand.params.origin.y.Value();
+ const u32 length_x = convert(copy_info.length_x);
+ const u32 length_y = copy_info.length_y;
+
+ const BufferImageCopy copy{
+ .buffer_offset = 0,
+ .buffer_size = buffer_size,
+ .buffer_row_length = convert(buffer_operand.pitch),
+ .buffer_image_height = buffer_operand.height,
+ .image_subresource =
+ {
+ .base_level = static_cast<s32>(level),
+ .base_layer = static_cast<s32>(base),
+ .num_layers = 1,
+ },
+ .image_offset =
+ {
+ .x = static_cast<s32>(base_x),
+ .y = static_cast<s32>(base_y),
+ .z = 0,
+ },
+ .image_extent =
+ {
+ .width = length_x,
+ .height = length_y,
+ .depth = 1,
+ },
+ };
+ return {image, copy};
+}
+
+template <class P>
void TextureCache<P>::RefreshContents(Image& image, ImageId image_id) {
if (False(image.flags & ImageFlagBits::CpuModified)) {
// Only upload modified images
@@ -1405,26 +1467,14 @@ ImageId TextureCache<P>::FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr)
}
template <class P>
-std::optional<std::pair<typename TextureCache<P>::Image*, std::pair<u32, u32>>>
-TextureCache<P>::ObtainImage(const Tegra::DMA::ImageOperand& operand, bool mark_as_modified) {
- ImageInfo dst_info(operand);
- ImageId dst_id = FindDMAImage(dst_info, operand.address);
- if (!dst_id) {
- return std::nullopt;
- }
- auto& image = slot_images[dst_id];
- auto base = image.TryFindBase(operand.address);
- if (!base) {
- return std::nullopt;
- }
- if (False(image.flags & ImageFlagBits::GpuModified)) {
- // No need to waste time on an image that's synced with guest
- return std::nullopt;
- }
+std::pair<u32, u32> TextureCache<P>::PrepareDmaImage(ImageId dst_id, GPUVAddr base_addr,
+ bool mark_as_modified) {
+ const auto& image = slot_images[dst_id];
+ const auto base = image.TryFindBase(base_addr);
PrepareImage(dst_id, mark_as_modified, false);
- auto& new_image = slot_images[dst_id];
+ const auto& new_image = slot_images[dst_id];
lru_cache.Touch(new_image.lru_index, frame_tick);
- return std::make_pair(&new_image, std::make_pair(base->level, base->layer));
+ return std::make_pair(base->level, base->layer);
}
template <class P>
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 848a5d9ea..5a5b4179c 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -209,8 +209,11 @@ public:
/// Pop asynchronous downloads
void PopAsyncFlushes();
- [[nodiscard]] std::optional<std::pair<Image*, std::pair<u32, u32>>> ObtainImage(
- const Tegra::DMA::ImageOperand& operand, bool mark_as_modified);
+ [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
+
+ [[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
+ const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
+ const Tegra::DMA::ImageOperand& image_operand, ImageId image_id, bool modifies_image);
/// Return true when a CPU region is modified from the GPU
[[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
@@ -386,6 +389,9 @@ private:
/// Returns true if the current clear parameters clear the whole image of a given image view
[[nodiscard]] bool IsFullClear(ImageViewId id);
+ [[nodiscard]] std::pair<u32, u32> PrepareDmaImage(ImageId dst_id, GPUVAddr base_addr,
+ bool mark_as_modified);
+
bool ImageCanRescale(ImageBase& image);
void InvalidateScale(Image& image);
bool ScaleUp(Image& image);