From 2931101e6f5aa755566ef40f6e6dc71909fd3e92 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 30 Jan 2022 22:26:01 +0100 Subject: NVDRV: Refactor Host1x --- src/core/hle/service/nvdrv/core/container.cpp | 8 ++--- src/core/hle/service/nvdrv/core/container.h | 10 ++++-- src/core/hle/service/nvdrv/core/nvmap.cpp | 33 ++++++++----------- src/core/hle/service/nvdrv/core/nvmap.h | 18 ++++++++--- .../hle/service/nvdrv/core/syncpoint_manager.cpp | 6 ++-- .../hle/service/nvdrv/core/syncpoint_manager.h | 12 ++++--- .../service/nvdrv/devices/nvhost_nvdec_common.cpp | 37 +++++++--------------- src/core/hle/service/nvdrv/nvdrv.cpp | 2 +- 8 files changed, 62 insertions(+), 64 deletions(-) (limited to 'src/core/hle/service') diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 97b5b2c86..fbd66f001 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -6,18 +6,18 @@ #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::NvCore { struct ContainerImpl { - ContainerImpl(Tegra::GPU& gpu_) : file{}, manager{gpu_} {} + ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {} NvMap file; SyncpointManager manager; }; -Container::Container(Tegra::GPU& gpu_) { - impl = std::make_unique(gpu_); +Container::Container(Tegra::Host1x::Host1x& host1x_) { + impl = std::make_unique(host1x_); } Container::~Container() = default; diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 91ac2305a..da75d74ff 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -8,8 +8,12 @@ #include namespace Tegra { -class GPU; -} + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra namespace Service::Nvidia::NvCore { @@ -20,7 +24,7 @@ struct ContainerImpl; class Container { public: - Container(Tegra::GPU& gpu_); + Container(Tegra::Host1x::Host1x& host1x); ~Container(); NvMap& GetNvMapFile(); diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 1126daeb5..9acec7ba6 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -7,6 +7,7 @@ #include "common/logging/log.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/memory.h" +#include "video_core/host1x/host1x.h" using Core::Memory::YUZU_PAGESIZE; @@ -61,7 +62,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) { return NvResult::Success; } -NvMap::NvMap() = default; +NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} void NvMap::AddHandle(std::shared_ptr handle_description) { std::scoped_lock lock(handles_lock); @@ -77,12 +78,11 @@ void NvMap::UnmapHandle(Handle& handle_description) { } // Free and unmap the handle from the SMMU - /* - state.soc->smmu.Unmap(handle_description.pin_virt_address, - static_cast(handle_description.aligned_size)); - smmuAllocator.Free(handle_description.pin_virt_address, - static_cast(handle_description.aligned_size)); handle_description.pin_virt_address = 0; - */ + host1x.MemoryManager().Unmap(static_cast(handle_description.pin_virt_address), + handle_description.aligned_size); + host1x.Allocator().Free(handle_description.pin_virt_address, + static_cast(handle_description.aligned_size)); + handle_description.pin_virt_address = 0; } bool NvMap::TryRemoveHandle(const Handle& handle_description) { @@ -131,12 +131,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { } u32 NvMap::PinHandle(NvMap::Handle::Id handle) { - UNIMPLEMENTED_MSG("pinning"); - return 0; - /* auto handle_description{GetHandle(handle)}; - if (!handle_description) - [[unlikely]] return 0; + if (!handle_description) [[unlikely]] + return 0; std::scoped_lock lock(handle_description->mutex); if (!handle_description->pins) { @@ -157,8 +154,10 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { // If not then allocate some space and map it u32 address{}; + auto& smmu_allocator = host1x.Allocator(); + auto& smmu_memory_manager = host1x.MemoryManager(); while (!(address = - smmuAllocator.Allocate(static_cast(handle_description->aligned_size)))) { + smmu_allocator.Allocate(static_cast(handle_description->aligned_size)))) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { @@ -172,19 +171,16 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { } } - state.soc->smmu.Map(address, handle_description->GetPointer(), - static_cast(handle_description->aligned_size)); + smmu_memory_manager.Map(static_cast(address), handle_description->address, + handle_description->aligned_size); handle_description->pin_virt_address = address; } handle_description->pins++; return handle_description->pin_virt_address; - */ } void NvMap::UnpinHandle(Handle::Id handle) { - UNIMPLEMENTED_MSG("Unpinning"); - /* auto handle_description{GetHandle(handle)}; if (!handle_description) return; @@ -199,7 +195,6 @@ void NvMap::UnpinHandle(Handle::Id handle) { unmap_queue.push_back(handle_description); handle_description->unmap_queue_entry = std::prev(unmap_queue.end()); } - */ } std::optional NvMap::FreeHandle(Handle::Id handle, bool internal_session) { diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 5e6c73589..5acdc961e 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -15,6 +15,14 @@ #include "common/common_types.h" #include "core/hle/service/nvdrv/nvdata.h" +namespace Tegra { + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra + namespace Service::Nvidia::NvCore { /** * @brief The nvmap core class holds the global state for nvmap and provides methods to manage @@ -90,15 +98,17 @@ public: }; private: - std::list> unmap_queue; - std::mutex unmap_queue_lock; //!< Protects access to `unmap_queue` + std::list> unmap_queue{}; + std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue` - std::unordered_map> handles; //!< Main owning map of handles + std::unordered_map> + handles{}; //!< Main owning map of handles std::mutex handles_lock; //!< Protects access to `handles` static constexpr u32 HandleIdIncrement{ 4}; //!< Each new handle ID is an increment of 4 from the previous std::atomic next_handle_id{HandleIdIncrement}; + Tegra::Host1x::Host1x& host1x; void AddHandle(std::shared_ptr handle); @@ -125,7 +135,7 @@ public: bool was_uncached; //!< If the handle was allocated as uncached }; - NvMap(); + NvMap(Tegra::Host1x::Host1x& host1x); /** * @brief Creates an unallocated handle of the given size diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index ff6cbb37e..61e00448c 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -3,16 +3,16 @@ #include "common/assert.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" -#include "video_core/gpu.h" +#include "video_core/host1x/host1x.h" namespace Service::Nvidia::NvCore { -SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} +SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} SyncpointManager::~SyncpointManager() = default; u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) { - syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id); + syncpoints[syncpoint_id].min = host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id); return GetSyncpointMin(syncpoint_id); } diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index cf7f0b4be..f332edc6e 100644 --- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -10,14 +10,18 @@ #include "core/hle/service/nvdrv/nvdata.h" namespace Tegra { -class GPU; -} + +namespace Host1x { +class Host1x; +} // namespace Host1x + +} // namespace Tegra namespace Service::Nvidia::NvCore { class SyncpointManager final { public: - explicit SyncpointManager(Tegra::GPU& gpu_); + explicit SyncpointManager(Tegra::Host1x::Host1x& host1x); ~SyncpointManager(); /** @@ -78,7 +82,7 @@ private: std::array syncpoints{}; - Tegra::GPU& gpu; + Tegra::Host1x::Host1x& host1x; }; } // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 77e6a1cd6..b17589aa3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -13,6 +13,7 @@ #include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" #include "core/memory.h" +#include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" @@ -140,29 +141,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); - auto& gpu = system.GPU(); - for (auto& cmd_buffer : cmd_buffer_handles) { - auto object{nvmap.GetHandle(cmd_buffer.map_handle)}; - if (!object) { - LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); - std::memcpy(output.data(), ¶ms, output.size()); - return NvResult::InvalidState; - } - if (object->dma_map_addr == 0) { - // NVDEC and VIC memory is in the 32-bit address space - // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space - const GPUVAddr low_addr = - gpu.MemoryManager().MapAllocate32(object->address, object->size); - object->dma_map_addr = static_cast(low_addr); - // Ensure that the dma_map_addr is indeed in the lower 32-bit address space. - ASSERT(object->dma_map_addr == low_addr); - } - if (!object->dma_map_addr) { - LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); - } else { - cmd_buffer.map_address = static_cast(object->dma_map_addr); - } + cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle); } std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), @@ -172,11 +152,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector& input, std::vecto } NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector& input, std::vector& output) { - // This is intntionally stubbed. - // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame - // addresses, and risk invalidating data before the async GPU thread is done with it + IoctlMapBuffer params{}; + std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); + std::vector cmd_buffer_handles(params.num_entries); + + SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); + for (auto& cmd_buffer : cmd_buffer_handles) { + nvmap.UnpinHandle(cmd_buffer.map_handle); + } + std::memset(output.data(), 0, output.size()); - LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); return NvResult::Success; } diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index b39a4c6db..8a9f3c717 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -71,7 +71,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { + : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} { builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { std::shared_ptr device = std::make_shared(system, *this, container); -- cgit v1.2.3