summaryrefslogtreecommitdiffstats
path: root/src/video_core/texture_cache/util.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/texture_cache/util.cpp')
-rw-r--r--src/video_core/texture_cache/util.cpp72
1 files changed, 42 insertions, 30 deletions
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 95a5b47d8..9a618a57a 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -24,7 +24,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/surface.h"
-#include "video_core/texture_cache/decode_bc4.h"
+#include "video_core/texture_cache/decode_bc.h"
#include "video_core/texture_cache/format_lookup_table.h"
#include "video_core/texture_cache/formatter.h"
#include "video_core/texture_cache/samples_helper.h"
@@ -61,8 +61,6 @@ using VideoCore::Surface::PixelFormatFromDepthFormat;
using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
using VideoCore::Surface::SurfaceType;
-constexpr u32 CONVERTED_BYTES_PER_BLOCK = BytesPerBlock(PixelFormat::A8B8G8R8_UNORM);
-
struct LevelInfo {
Extent3D size;
Extent3D block;
@@ -329,13 +327,13 @@ template <u32 GOB_EXTENT>
[[nodiscard]] std::optional<SubresourceExtent> ResolveOverlapRightAddress3D(
const ImageInfo& new_info, GPUVAddr gpu_addr, const ImageBase& overlap, bool strict_size) {
- const std::vector<u32> slice_offsets = CalculateSliceOffsets(new_info);
+ const auto slice_offsets = CalculateSliceOffsets(new_info);
const u32 diff = static_cast<u32>(overlap.gpu_addr - gpu_addr);
const auto it = std::ranges::find(slice_offsets, diff);
if (it == slice_offsets.end()) {
return std::nullopt;
}
- const std::vector subresources = CalculateSliceSubresources(new_info);
+ const auto subresources = CalculateSliceSubresources(new_info);
const SubresourceBase base = subresources[std::distance(slice_offsets.begin(), it)];
const ImageInfo& info = overlap.info;
if (!IsBlockLinearSizeCompatible(new_info, info, base.level, 0, strict_size)) {
@@ -612,7 +610,8 @@ u32 CalculateConvertedSizeBytes(const ImageInfo& info) noexcept {
}
return output_size;
}
- return NumBlocksPerLayer(info, TILE_SIZE) * info.resources.layers * CONVERTED_BYTES_PER_BLOCK;
+ return NumBlocksPerLayer(info, TILE_SIZE) * info.resources.layers *
+ ConvertedBytesPerBlock(info.format);
}
u32 CalculateLayerStride(const ImageInfo& info) noexcept {
@@ -655,9 +654,9 @@ LevelArray CalculateMipLevelSizes(const ImageInfo& info) noexcept {
return sizes;
}
-std::vector<u32> CalculateSliceOffsets(const ImageInfo& info) {
+boost::container::small_vector<u32, 16> CalculateSliceOffsets(const ImageInfo& info) {
ASSERT(info.type == ImageType::e3D);
- std::vector<u32> offsets;
+ boost::container::small_vector<u32, 16> offsets;
offsets.reserve(NumSlices(info));
const LevelInfo level_info = MakeLevelInfo(info);
@@ -679,9 +678,10 @@ std::vector<u32> CalculateSliceOffsets(const ImageInfo& info) {
return offsets;
}
-std::vector<SubresourceBase> CalculateSliceSubresources(const ImageInfo& info) {
+boost::container::small_vector<SubresourceBase, 16> CalculateSliceSubresources(
+ const ImageInfo& info) {
ASSERT(info.type == ImageType::e3D);
- std::vector<SubresourceBase> subresources;
+ boost::container::small_vector<SubresourceBase, 16> subresources;
subresources.reserve(NumSlices(info));
for (s32 level = 0; level < info.resources.levels; ++level) {
const s32 depth = AdjustMipSize(info.size.depth, level);
@@ -723,8 +723,10 @@ ImageViewType RenderTargetImageViewType(const ImageInfo& info) noexcept {
}
}
-std::vector<ImageCopy> MakeShrinkImageCopies(const ImageInfo& dst, const ImageInfo& src,
- SubresourceBase base, u32 up_scale, u32 down_shift) {
+boost::container::small_vector<ImageCopy, 16> MakeShrinkImageCopies(const ImageInfo& dst,
+ const ImageInfo& src,
+ SubresourceBase base,
+ u32 up_scale, u32 down_shift) {
ASSERT(dst.resources.levels >= src.resources.levels);
const bool is_dst_3d = dst.type == ImageType::e3D;
@@ -733,7 +735,7 @@ std::vector<ImageCopy> MakeShrinkImageCopies(const ImageInfo& dst, const ImageIn
ASSERT(src.resources.levels == 1);
}
const bool both_2d{src.type == ImageType::e2D && dst.type == ImageType::e2D};
- std::vector<ImageCopy> copies;
+ boost::container::small_vector<ImageCopy, 16> copies;
copies.reserve(src.resources.levels);
for (s32 level = 0; level < src.resources.levels; ++level) {
ImageCopy& copy = copies.emplace_back();
@@ -770,9 +772,10 @@ std::vector<ImageCopy> MakeShrinkImageCopies(const ImageInfo& dst, const ImageIn
return copies;
}
-std::vector<ImageCopy> MakeReinterpretImageCopies(const ImageInfo& src, u32 up_scale,
- u32 down_shift) {
- std::vector<ImageCopy> copies;
+boost::container::small_vector<ImageCopy, 16> MakeReinterpretImageCopies(const ImageInfo& src,
+ u32 up_scale,
+ u32 down_shift) {
+ boost::container::small_vector<ImageCopy, 16> copies;
copies.reserve(src.resources.levels);
const bool is_3d = src.type == ImageType::e3D;
for (s32 level = 0; level < src.resources.levels; ++level) {
@@ -824,9 +827,11 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config
return gpu_memory.GpuToCpuAddress(address, guest_size_bytes).has_value();
}
-std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
- const ImageInfo& info, std::span<const u8> input,
- std::span<u8> output) {
+boost::container::small_vector<BufferImageCopy, 16> UnswizzleImage(Tegra::MemoryManager& gpu_memory,
+ GPUVAddr gpu_addr,
+ const ImageInfo& info,
+ std::span<const u8> input,
+ std::span<u8> output) {
const size_t guest_size_bytes = input.size_bytes();
const u32 bpp_log2 = BytesPerBlockLog2(info.format);
const Extent3D size = info.size;
@@ -861,7 +866,7 @@ std::vector<BufferImageCopy> UnswizzleImage(Tegra::MemoryManager& gpu_memory, GP
info.tile_width_spacing);
size_t guest_offset = 0;
u32 host_offset = 0;
- std::vector<BufferImageCopy> copies(num_levels);
+ boost::container::small_vector<BufferImageCopy, 16> copies(num_levels);
for (s32 level = 0; level < num_levels; ++level) {
const Extent3D level_size = AdjustMipSize(size, level);
@@ -939,7 +944,8 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8
tile_size.height, output.subspan(output_offset));
output_offset += copy.image_extent.width * copy.image_extent.height *
- copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ copy.image_subresource.num_layers *
+ BytesPerBlock(PixelFormat::A8B8G8R8_UNORM);
} else if (astc) {
// BC1 uses 0.5 bytes per texel
// BC3 uses 1 byte per texel
@@ -950,7 +956,8 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8
const u32 plane_dim = copy.image_extent.width * copy.image_extent.height;
const u32 level_size = plane_dim * copy.image_extent.depth *
- copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ copy.image_subresource.num_layers *
+ BytesPerBlock(PixelFormat::A8B8G8R8_UNORM);
decode_scratch.resize_destructive(level_size);
Tegra::Texture::ASTC::Decompress(
@@ -970,15 +977,20 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8
bpp_div;
output_offset += static_cast<u32>(copy.buffer_size);
} else {
- DecompressBC4(input_offset, copy.image_extent, output.subspan(output_offset));
-
+ const Extent3D image_extent{
+ .width = copy.image_extent.width,
+ .height = copy.image_extent.height * copy.image_subresource.num_layers,
+ .depth = copy.image_extent.depth,
+ };
+ DecompressBCn(input_offset, output.subspan(output_offset), image_extent, info.format);
output_offset += copy.image_extent.width * copy.image_extent.height *
- copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ copy.image_subresource.num_layers *
+ ConvertedBytesPerBlock(info.format);
}
}
}
-std::vector<BufferImageCopy> FullDownloadCopies(const ImageInfo& info) {
+boost::container::small_vector<BufferImageCopy, 16> FullDownloadCopies(const ImageInfo& info) {
const Extent3D size = info.size;
const u32 bytes_per_block = BytesPerBlock(info.format);
if (info.type == ImageType::Linear) {
@@ -1006,7 +1018,7 @@ std::vector<BufferImageCopy> FullDownloadCopies(const ImageInfo& info) {
u32 host_offset = 0;
- std::vector<BufferImageCopy> copies(num_levels);
+ boost::container::small_vector<BufferImageCopy, 16> copies(num_levels);
for (s32 level = 0; level < num_levels; ++level) {
const Extent3D level_size = AdjustMipSize(size, level);
const u32 num_blocks_per_layer = NumBlocks(level_size, tile_size);
@@ -1042,10 +1054,10 @@ Extent3D MipBlockSize(const ImageInfo& info, u32 level) {
return AdjustMipBlockSize(num_tiles, level_info.block, level);
}
-std::vector<SwizzleParameters> FullUploadSwizzles(const ImageInfo& info) {
+boost::container::small_vector<SwizzleParameters, 16> FullUploadSwizzles(const ImageInfo& info) {
const Extent2D tile_size = DefaultBlockSize(info.format);
if (info.type == ImageType::Linear) {
- return std::vector{SwizzleParameters{
+ return {SwizzleParameters{
.num_tiles = AdjustTileSize(info.size, tile_size),
.block = {},
.buffer_offset = 0,
@@ -1057,7 +1069,7 @@ std::vector<SwizzleParameters> FullUploadSwizzles(const ImageInfo& info) {
const s32 num_levels = info.resources.levels;
u32 guest_offset = 0;
- std::vector<SwizzleParameters> params(num_levels);
+ boost::container::small_vector<SwizzleParameters, 16> params(num_levels);
for (s32 level = 0; level < num_levels; ++level) {
const Extent3D level_size = AdjustMipSize(size, level);
const Extent3D num_tiles = AdjustTileSize(level_size, tile_size);