From 86f6b6b7b2d930e8203114332b04a5c49a780b06 Mon Sep 17 00:00:00 2001 From: Liam Date: Thu, 10 Aug 2023 21:34:43 -0400 Subject: vfs: expand support for NCA reading --- .../fssystem/fssystem_bucket_tree_template_impl.h | 170 +++++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h (limited to 'src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h') diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h new file mode 100644 index 000000000..030b2916b --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" + +namespace FileSys { + +template +Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info, + const ContinuousReadingParam& param) const { + static_assert(std::is_trivial_v>); + + // Validate our preconditions. + ASSERT(this->IsInitialized()); + ASSERT(out_info != nullptr); + ASSERT(m_entry_size == sizeof(EntryType)); + + // Reset the output. + out_info->Reset(); + + // If there's nothing to read, we're done. + R_SUCCEED_IF(param.size == 0); + + // If we're reading a fragment, we're done. + R_SUCCEED_IF(param.entry.IsFragment()); + + // Validate the first entry. + auto entry = param.entry; + auto cur_offset = param.offset; + R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange); + + // Create a pooled buffer for our scan. + PooledBuffer pool(m_node_size, 1); + char* buffer = nullptr; + + s64 entry_storage_size = m_entry_storage->GetSize(); + + // Read the node. + if (m_node_size <= pool.GetSize()) { + buffer = pool.GetBuffer(); + const auto ofs = param.entry_set.index * static_cast(m_node_size); + R_UNLESS(m_node_size + ofs <= static_cast(entry_storage_size), + ResultInvalidBucketTreeNodeEntryCount); + + m_entry_storage->Read(reinterpret_cast(buffer), m_node_size, ofs); + } + + // Calculate extents. + const auto end_offset = cur_offset + static_cast(param.size); + s64 phys_offset = entry.GetPhysicalOffset(); + + // Start merge tracking. + s64 merge_size = 0; + s64 readable_size = 0; + bool merged = false; + + // Iterate. + auto entry_index = param.entry_index; + for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) { + // If we're past the end, we're done. + if (end_offset <= cur_offset) { + break; + } + + // Validate the entry offset. + const auto entry_offset = entry.GetVirtualOffset(); + R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset); + + // Get the next entry. + EntryType next_entry = {}; + s64 next_entry_offset; + + if (entry_index + 1 < entry_count) { + if (buffer != nullptr) { + const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1); + std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size); + } else { + const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size, + m_entry_size, entry_index + 1); + m_entry_storage->ReadObject(std::addressof(next_entry), ofs); + } + + next_entry_offset = next_entry.GetVirtualOffset(); + R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset); + } else { + next_entry_offset = param.entry_set.offset; + } + + // Validate the next entry offset. + R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset); + + // Determine the much data there is. + const auto data_size = next_entry_offset - cur_offset; + ASSERT(data_size > 0); + + // Determine how much data we should read. + const auto remaining_size = end_offset - cur_offset; + const size_t read_size = static_cast(std::min(data_size, remaining_size)); + ASSERT(read_size <= param.size); + + // Update our merge tracking. + if (entry.IsFragment()) { + // If we can't merge, stop looping. + if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) { + break; + } + + // Otherwise, add the current size to the merge size. + merge_size += read_size; + } else { + // If we can't merge, stop looping. + if (phys_offset != entry.GetPhysicalOffset()) { + break; + } + + // Add the size to the readable amount. + readable_size += merge_size + read_size; + ASSERT(readable_size <= static_cast(param.size)); + + // Update whether we've merged. + merged |= merge_size > 0; + merge_size = 0; + } + + // Advance. + cur_offset += read_size; + ASSERT(cur_offset <= end_offset); + + phys_offset += next_entry_offset - entry_offset; + entry = next_entry; + } + + // If we merged, set our readable size. + if (merged) { + out_info->SetReadSize(static_cast(readable_size)); + } + out_info->SetSkipCount(entry_index - param.entry_index); + + R_SUCCEED(); +} + +template +Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, + size_t size) const { + static_assert(std::is_trivial_v); + ASSERT(this->IsValid()); + + // Create our parameters. + ContinuousReadingParam param = { + .offset = offset, + .size = size, + .entry_set = m_entry_set.header, + .entry_index = m_entry_index, + .offsets{}, + .entry{}, + }; + std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets), + sizeof(BucketTree::Offsets)); + std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType)); + + // Scan. + R_RETURN(m_tree->ScanContinuousReading(out_info, param)); +} + +} // namespace FileSys -- cgit v1.2.3