summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan/vk_texture_cache.cpp
diff options
context:
space:
mode:
authorReinUsesLisp <reinuseslisp@airmail.cc>2020-03-27 05:33:21 +0100
committerReinUsesLisp <reinuseslisp@airmail.cc>2020-04-11 03:49:02 +0200
commit2905142f47163d0f409c31910cdc234e8797286c (patch)
tree7d5bf00d6b68f58b868e64a0683afd8c09e7f971 /src/video_core/renderer_vulkan/vk_texture_cache.cpp
parentMerge pull request #3594 from ReinUsesLisp/vk-instance (diff)
downloadyuzu-2905142f47163d0f409c31910cdc234e8797286c.tar
yuzu-2905142f47163d0f409c31910cdc234e8797286c.tar.gz
yuzu-2905142f47163d0f409c31910cdc234e8797286c.tar.bz2
yuzu-2905142f47163d0f409c31910cdc234e8797286c.tar.lz
yuzu-2905142f47163d0f409c31910cdc234e8797286c.tar.xz
yuzu-2905142f47163d0f409c31910cdc234e8797286c.tar.zst
yuzu-2905142f47163d0f409c31910cdc234e8797286c.zip
Diffstat (limited to '')
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp362
1 files changed, 210 insertions, 152 deletions
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 5b9b39670..de4c23120 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -17,7 +17,6 @@
#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/morton.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
@@ -25,6 +24,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
#include "video_core/textures/convert.h"
@@ -39,18 +39,18 @@ using VideoCore::Surface::SurfaceTarget;
namespace {
-vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
+VkImageType SurfaceTargetToImage(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
case SurfaceTarget::Texture1DArray:
- return vk::ImageType::e1D;
+ return VK_IMAGE_TYPE_1D;
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture2DArray:
case SurfaceTarget::TextureCubemap:
case SurfaceTarget::TextureCubeArray:
- return vk::ImageType::e2D;
+ return VK_IMAGE_TYPE_2D;
case SurfaceTarget::Texture3D:
- return vk::ImageType::e3D;
+ return VK_IMAGE_TYPE_3D;
case SurfaceTarget::TextureBuffer:
UNREACHABLE();
return {};
@@ -59,35 +59,35 @@ vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
return {};
}
-vk::ImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) {
+VkImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) {
if (pixel_format < PixelFormat::MaxColorFormat) {
- return vk::ImageAspectFlagBits::eColor;
+ return VK_IMAGE_ASPECT_COLOR_BIT;
} else if (pixel_format < PixelFormat::MaxDepthFormat) {
- return vk::ImageAspectFlagBits::eDepth;
+ return VK_IMAGE_ASPECT_DEPTH_BIT;
} else if (pixel_format < PixelFormat::MaxDepthStencilFormat) {
- return vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil;
+ return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
} else {
- UNREACHABLE_MSG("Invalid pixel format={}", static_cast<u32>(pixel_format));
- return vk::ImageAspectFlagBits::eColor;
+ UNREACHABLE_MSG("Invalid pixel format={}", static_cast<int>(pixel_format));
+ return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
-vk::ImageViewType GetImageViewType(SurfaceTarget target) {
+VkImageViewType GetImageViewType(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
- return vk::ImageViewType::e1D;
+ return VK_IMAGE_VIEW_TYPE_1D;
case SurfaceTarget::Texture2D:
- return vk::ImageViewType::e2D;
+ return VK_IMAGE_VIEW_TYPE_2D;
case SurfaceTarget::Texture3D:
- return vk::ImageViewType::e3D;
+ return VK_IMAGE_VIEW_TYPE_3D;
case SurfaceTarget::Texture1DArray:
- return vk::ImageViewType::e1DArray;
+ return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
case SurfaceTarget::Texture2DArray:
- return vk::ImageViewType::e2DArray;
+ return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
case SurfaceTarget::TextureCubemap:
- return vk::ImageViewType::eCube;
+ return VK_IMAGE_VIEW_TYPE_CUBE;
case SurfaceTarget::TextureCubeArray:
- return vk::ImageViewType::eCubeArray;
+ return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case SurfaceTarget::TextureBuffer:
break;
}
@@ -95,73 +95,88 @@ vk::ImageViewType GetImageViewType(SurfaceTarget target) {
return {};
}
-UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
- std::size_t host_memory_size) {
+vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
+ std::size_t host_memory_size) {
// TODO(Rodrigo): Move texture buffer creation to the buffer cache
- const vk::BufferCreateInfo buffer_ci({}, host_memory_size,
- vk::BufferUsageFlagBits::eUniformTexelBuffer |
- vk::BufferUsageFlagBits::eTransferSrc |
- vk::BufferUsageFlagBits::eTransferDst,
- vk::SharingMode::eExclusive, 0, nullptr);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createBufferUnique(buffer_ci, nullptr, dld);
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = static_cast<VkDeviceSize>(host_memory_size);
+ ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ return device.GetLogical().CreateBuffer(ci);
}
-vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
- const SurfaceParams& params,
- vk::Buffer buffer,
- std::size_t host_memory_size) {
+VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
+ const SurfaceParams& params, VkBuffer buffer,
+ std::size_t host_memory_size) {
ASSERT(params.IsBuffer());
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
- return vk::BufferViewCreateInfo({}, buffer, format, 0, host_memory_size);
+ VkBufferViewCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.buffer = buffer;
+ ci.format = MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
+ ci.offset = 0;
+ ci.range = static_cast<VkDeviceSize>(host_memory_size);
+ return ci;
}
-vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
- constexpr auto sample_count = vk::SampleCountFlagBits::e1;
- constexpr auto tiling = vk::ImageTiling::eOptimal;
-
+VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
ASSERT(!params.IsBuffer());
const auto [format, attachable, storage] =
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
- auto image_usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst |
- vk::ImageUsageFlagBits::eTransferSrc;
+ VkImageCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.imageType = SurfaceTargetToImage(params.target);
+ ci.format = format;
+ ci.mipLevels = params.num_levels;
+ ci.arrayLayers = static_cast<u32>(params.GetNumLayers());
+ ci.samples = VK_SAMPLE_COUNT_1_BIT;
+ ci.tiling = VK_IMAGE_TILING_OPTIMAL;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
if (attachable) {
- image_usage |= params.IsPixelFormatZeta() ? vk::ImageUsageFlagBits::eDepthStencilAttachment
- : vk::ImageUsageFlagBits::eColorAttachment;
+ ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
+ : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
if (storage) {
- image_usage |= vk::ImageUsageFlagBits::eStorage;
+ ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
}
- vk::ImageCreateFlags flags;
- vk::Extent3D extent;
switch (params.target) {
case SurfaceTarget::TextureCubemap:
case SurfaceTarget::TextureCubeArray:
- flags |= vk::ImageCreateFlagBits::eCubeCompatible;
+ ci.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
[[fallthrough]];
case SurfaceTarget::Texture1D:
case SurfaceTarget::Texture1DArray:
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture2DArray:
- extent = vk::Extent3D(params.width, params.height, 1);
+ ci.extent = {params.width, params.height, 1};
break;
case SurfaceTarget::Texture3D:
- extent = vk::Extent3D(params.width, params.height, params.depth);
+ ci.extent = {params.width, params.height, params.depth};
break;
case SurfaceTarget::TextureBuffer:
UNREACHABLE();
}
- return vk::ImageCreateInfo(flags, SurfaceTargetToImage(params.target), format, extent,
- params.num_levels, static_cast<u32>(params.GetNumLayers()),
- sample_count, tiling, image_usage, vk::SharingMode::eExclusive, 0,
- nullptr, vk::ImageLayout::eUndefined);
+ return ci;
}
} // Anonymous namespace
@@ -175,15 +190,13 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
if (params.IsBuffer()) {
buffer = CreateBuffer(device, params, host_memory_size);
- commit = memory_manager.Commit(*buffer, false);
+ commit = memory_manager.Commit(buffer, false);
const auto buffer_view_ci =
GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size);
format = buffer_view_ci.format;
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- buffer_view = dev.createBufferViewUnique(buffer_view_ci, nullptr, dld);
+ buffer_view = device.GetLogical().CreateBufferView(buffer_view_ci);
} else {
const auto image_ci = GenerateImageCreateInfo(device, params);
format = image_ci.format;
@@ -221,16 +234,15 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
// We can't copy images to buffers inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext();
- FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead,
- vk::ImageLayout::eTransferSrcOptimal);
+ FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
const auto& buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
// TODO(Rodrigo): Do this in a single copy
for (u32 level = 0; level < params.num_levels; ++level) {
- scheduler.Record([image = image->GetHandle(), buffer = *buffer.handle,
- copy = GetBufferImageCopy(level)](auto cmdbuf, auto& dld) {
- cmdbuf.copyImageToBuffer(image, vk::ImageLayout::eTransferSrcOptimal, buffer, {copy},
- dld);
+ scheduler.Record([image = *image->GetHandle(), buffer = *buffer.handle,
+ copy = GetBufferImageCopy(level)](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, copy);
});
}
scheduler.Finish();
@@ -257,15 +269,27 @@ void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer,
- size = host_memory_size](auto cmdbuf, auto& dld) {
- const vk::BufferCopy copy(0, 0, size);
- cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld);
-
- cmdbuf.pipelineBarrier(
- vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eVertexShader, {}, {},
- {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite,
- vk::AccessFlagBits::eShaderRead, 0, 0, dst_buffer, 0, size)},
- {}, dld);
+ size = host_memory_size](vk::CommandBuffer cmdbuf) {
+ VkBufferCopy copy;
+ copy.srcOffset = 0;
+ copy.dstOffset = 0;
+ copy.size = size;
+ cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
+
+ VkBufferMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ barrier.dstAccessMask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ barrier.srcQueueFamilyIndex = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.dstQueueFamilyIndex = VK_ACCESS_SHADER_READ_BIT;
+ barrier.srcQueueFamilyIndex = 0;
+ barrier.dstQueueFamilyIndex = 0;
+ barrier.buffer = dst_buffer;
+ barrier.offset = 0;
+ barrier.size = size;
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
+ 0, {}, barrier, {});
});
}
@@ -273,43 +297,49 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
const auto& src_buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
- FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferWrite,
- vk::ImageLayout::eTransferDstOptimal);
+ FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
for (u32 level = 0; level < params.num_levels; ++level) {
- vk::BufferImageCopy copy = GetBufferImageCopy(level);
- if (image->GetAspectMask() ==
- (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) {
- vk::BufferImageCopy depth = copy;
- vk::BufferImageCopy stencil = copy;
- depth.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eDepth;
- stencil.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eStencil;
- scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(), depth,
- stencil](auto cmdbuf, auto& dld) {
- cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal,
- {depth, stencil}, dld);
+ const VkBufferImageCopy copy = GetBufferImageCopy(level);
+ if (image->GetAspectMask() == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
+ copy](vk::CommandBuffer cmdbuf) {
+ std::array<VkBufferImageCopy, 2> copies = {copy, copy};
+ copies[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ copies[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ copies);
});
} else {
- scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(),
- copy](auto cmdbuf, auto& dld) {
- cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal,
- {copy}, dld);
+ scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
+ copy](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
});
}
}
}
-vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
- const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
- const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted);
-
- return vk::BufferImageCopy(
- mip_offset, 0, 0,
- {image->GetAspectMask(), level, 0, static_cast<u32>(params.GetNumLayers())}, {0, 0, 0},
- {params.GetMipWidth(level), params.GetMipHeight(level), vk_depth});
+VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
+ VkBufferImageCopy copy;
+ copy.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted);
+ copy.bufferRowLength = 0;
+ copy.bufferImageHeight = 0;
+ copy.imageSubresource.aspectMask = image->GetAspectMask();
+ copy.imageSubresource.mipLevel = level;
+ copy.imageSubresource.baseArrayLayer = 0;
+ copy.imageSubresource.layerCount = static_cast<u32>(params.GetNumLayers());
+ copy.imageOffset.x = 0;
+ copy.imageOffset.y = 0;
+ copy.imageOffset.z = 0;
+ copy.imageExtent.width = params.GetMipWidth(level);
+ copy.imageExtent.height = params.GetMipHeight(level);
+ copy.imageExtent.depth =
+ params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
+ return copy;
}
-vk::ImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
+VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
return {image->GetAspectMask(), 0, params.num_levels, 0,
static_cast<u32>(params.GetNumLayers())};
}
@@ -321,12 +351,12 @@ CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surf
aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface},
base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level},
num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target)
- : vk::ImageViewType{}} {}
+ : VK_IMAGE_VIEW_TYPE_1D} {}
CachedSurfaceView::~CachedSurfaceView() = default;
-vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source,
- SwizzleSource z_source, SwizzleSource w_source) {
+VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source,
+ SwizzleSource z_source, SwizzleSource w_source) {
const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
if (last_image_view && last_swizzle == swizzle) {
return last_image_view;
@@ -351,37 +381,45 @@ vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource
// Games can sample depth or stencil values on textures. This is decided by the swizzle value on
// hardware. To emulate this on Vulkan we specify it in the aspect.
- vk::ImageAspectFlags aspect = aspect_mask;
- if (aspect == (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) {
+ VkImageAspectFlags aspect = aspect_mask;
+ if (aspect == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G);
const bool is_first = x_source == SwizzleSource::R;
switch (params.pixel_format) {
case VideoCore::Surface::PixelFormat::Z24S8:
case VideoCore::Surface::PixelFormat::Z32FS8:
- aspect = is_first ? vk::ImageAspectFlagBits::eDepth : vk::ImageAspectFlagBits::eStencil;
+ aspect = is_first ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_STENCIL_BIT;
break;
case VideoCore::Surface::PixelFormat::S8Z24:
- aspect = is_first ? vk::ImageAspectFlagBits::eStencil : vk::ImageAspectFlagBits::eDepth;
+ aspect = is_first ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
break;
default:
- aspect = vk::ImageAspectFlagBits::eDepth;
+ aspect = VK_IMAGE_ASPECT_DEPTH_BIT;
UNIMPLEMENTED();
}
// Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity
- swizzle_x = vk::ComponentSwizzle::eR;
- swizzle_y = vk::ComponentSwizzle::eG;
- swizzle_z = vk::ComponentSwizzle::eB;
- swizzle_w = vk::ComponentSwizzle::eA;
+ swizzle_x = VK_COMPONENT_SWIZZLE_R;
+ swizzle_y = VK_COMPONENT_SWIZZLE_G;
+ swizzle_z = VK_COMPONENT_SWIZZLE_B;
+ swizzle_w = VK_COMPONENT_SWIZZLE_A;
}
- const vk::ImageViewCreateInfo image_view_ci(
- {}, surface.GetImageHandle(), image_view_type, surface.GetImage().GetFormat(),
- {swizzle_x, swizzle_y, swizzle_z, swizzle_w},
- {aspect, base_level, num_levels, base_layer, num_layers});
+ VkImageViewCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.image = surface.GetImageHandle();
+ ci.viewType = image_view_type;
+ ci.format = surface.GetImage().GetFormat();
+ ci.components = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
+ ci.subresourceRange.aspectMask = aspect;
+ ci.subresourceRange.baseMipLevel = base_level;
+ ci.subresourceRange.levelCount = num_levels;
+ ci.subresourceRange.baseArrayLayer = base_layer;
+ ci.subresourceRange.layerCount = num_layers;
+ image_view = device.GetLogical().CreateImageView(ci);
- const auto dev = device.GetLogical();
- image_view = dev.createImageViewUnique(image_view_ci, nullptr, device.GetDispatchLoader());
return last_image_view = *image_view;
}
@@ -418,25 +456,36 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
scheduler.RequestOutsideRenderPassOperationContext();
src_surface->Transition(copy_params.source_z, copy_params.depth, copy_params.source_level, 1,
- vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead,
- vk::ImageLayout::eTransferSrcOptimal);
- dst_surface->Transition(
- dst_base_layer, num_layers, copy_params.dest_level, 1, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferWrite, vk::ImageLayout::eTransferDstOptimal);
-
- const vk::ImageSubresourceLayers src_subresource(
- src_surface->GetAspectMask(), copy_params.source_level, copy_params.source_z, num_layers);
- const vk::ImageSubresourceLayers dst_subresource(
- dst_surface->GetAspectMask(), copy_params.dest_level, dst_base_layer, num_layers);
- const vk::Offset3D src_offset(copy_params.source_x, copy_params.source_y, 0);
- const vk::Offset3D dst_offset(copy_params.dest_x, copy_params.dest_y, dst_offset_z);
- const vk::Extent3D extent(copy_params.width, copy_params.height, extent_z);
- const vk::ImageCopy copy(src_subresource, src_offset, dst_subresource, dst_offset, extent);
- const vk::Image src_image = src_surface->GetImageHandle();
- const vk::Image dst_image = dst_surface->GetImageHandle();
- scheduler.Record([src_image, dst_image, copy](auto cmdbuf, auto& dld) {
- cmdbuf.copyImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image,
- vk::ImageLayout::eTransferDstOptimal, {copy}, dld);
+ VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ dst_surface->Transition(dst_base_layer, num_layers, copy_params.dest_level, 1,
+ VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+ VkImageCopy copy;
+ copy.srcSubresource.aspectMask = src_surface->GetAspectMask();
+ copy.srcSubresource.mipLevel = copy_params.source_level;
+ copy.srcSubresource.baseArrayLayer = copy_params.source_z;
+ copy.srcSubresource.layerCount = num_layers;
+ copy.srcOffset.x = copy_params.source_x;
+ copy.srcOffset.y = copy_params.source_y;
+ copy.srcOffset.z = 0;
+ copy.dstSubresource.aspectMask = dst_surface->GetAspectMask();
+ copy.dstSubresource.mipLevel = copy_params.dest_level;
+ copy.dstSubresource.baseArrayLayer = dst_base_layer;
+ copy.dstSubresource.layerCount = num_layers;
+ copy.dstOffset.x = copy_params.dest_x;
+ copy.dstOffset.y = copy_params.dest_y;
+ copy.dstOffset.z = dst_offset_z;
+ copy.extent.width = copy_params.width;
+ copy.extent.height = copy_params.height;
+ copy.extent.depth = extent_z;
+
+ const VkImage src_image = src_surface->GetImageHandle();
+ const VkImage dst_image = dst_surface->GetImageHandle();
+ scheduler.Record([src_image, dst_image, copy](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
});
}
@@ -445,25 +494,34 @@ void VKTextureCache::ImageBlit(View& src_view, View& dst_view,
// We can't blit inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext();
- src_view->Transition(vk::ImageLayout::eTransferSrcOptimal, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferRead);
- dst_view->Transition(vk::ImageLayout::eTransferDstOptimal, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferWrite);
-
- const auto& cfg = copy_config;
- const auto src_top_left = vk::Offset3D(cfg.src_rect.left, cfg.src_rect.top, 0);
- const auto src_bot_right = vk::Offset3D(cfg.src_rect.right, cfg.src_rect.bottom, 1);
- const auto dst_top_left = vk::Offset3D(cfg.dst_rect.left, cfg.dst_rect.top, 0);
- const auto dst_bot_right = vk::Offset3D(cfg.dst_rect.right, cfg.dst_rect.bottom, 1);
- const vk::ImageBlit blit(src_view->GetImageSubresourceLayers(), {src_top_left, src_bot_right},
- dst_view->GetImageSubresourceLayers(), {dst_top_left, dst_bot_right});
+ src_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT);
+ dst_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_WRITE_BIT);
+
+ VkImageBlit blit;
+ blit.srcSubresource = src_view->GetImageSubresourceLayers();
+ blit.srcOffsets[0].x = copy_config.src_rect.left;
+ blit.srcOffsets[0].y = copy_config.src_rect.top;
+ blit.srcOffsets[0].z = 0;
+ blit.srcOffsets[1].x = copy_config.src_rect.right;
+ blit.srcOffsets[1].y = copy_config.src_rect.bottom;
+ blit.srcOffsets[1].z = 1;
+ blit.dstSubresource = dst_view->GetImageSubresourceLayers();
+ blit.dstOffsets[0].x = copy_config.dst_rect.left;
+ blit.dstOffsets[0].y = copy_config.dst_rect.top;
+ blit.dstOffsets[0].z = 0;
+ blit.dstOffsets[1].x = copy_config.dst_rect.right;
+ blit.dstOffsets[1].y = copy_config.dst_rect.bottom;
+ blit.dstOffsets[1].z = 1;
+
const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit,
- is_linear](auto cmdbuf, auto& dld) {
- cmdbuf.blitImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image,
- vk::ImageLayout::eTransferDstOptimal, {blit},
- is_linear ? vk::Filter::eLinear : vk::Filter::eNearest, dld);
+ is_linear](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BlitImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, blit,
+ is_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST);
});
}