summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2023-04-23 21:55:16 +0200
committerFernando Sahmkow <fsahmkow27@gmail.com>2023-04-23 22:04:14 +0200
commite29ced29fa54bf36a06545c53d1d523f3a31f883 (patch)
treed3c33325727bdd080d95d7f95018ae710f1dbf2f /src/video_core
parentAccuracy Normal: reduce accuracy further for perf improvements in Project Lime (diff)
downloadyuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.tar
yuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.tar.gz
yuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.tar.bz2
yuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.tar.lz
yuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.tar.xz
yuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.tar.zst
yuzu-e29ced29fa54bf36a06545c53d1d523f3a31f883.zip
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/query_cache.h122
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp12
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h6
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp2
7 files changed, 118 insertions, 45 deletions
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index cd339b99d..2a14cc36a 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -17,13 +17,19 @@
#include "common/assert.h"
#include "common/settings.h"
+#include "core/memory.h"
#include "video_core/control/channel_state_cache.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
+#include "video_core/texture_cache/slot_vector.h"
namespace VideoCommon {
+using AsyncJobId = SlotId;
+
+static constexpr AsyncJobId NULL_ASYNC_JOB_ID{0};
+
template <class QueryCache, class HostCounter>
class CounterStreamBase {
public:
@@ -93,9 +99,13 @@ private:
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
public:
- explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_)
- : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
- VideoCore::QueryType::SamplesPassed}}} {}
+ explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
+ Core::Memory::Memory& cpu_memory_)
+ : rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, streams{
+ {CounterStream{static_cast<QueryCache&>(*this),
+ VideoCore::QueryType::SamplesPassed}}} {
+ (void) slot_async_jobs.insert(); // Null value
+ }
void InvalidateRegion(VAddr addr, std::size_t size) {
std::unique_lock lock{mutex};
@@ -126,10 +136,15 @@ public:
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
}
- query->BindCounter(Stream(type).Current(), timestamp);
- if (Settings::values.use_asynchronous_gpu_emulation.GetValue()) {
- AsyncFlushQuery(*cpu_addr);
+ auto result = query->BindCounter(Stream(type).Current());
+ if (result) {
+ auto async_job_id = query->GetAsyncJob();
+ auto& async_job = slot_async_jobs[async_job_id];
+ async_job.collected = true;
+ async_job.value = *result;
+ query->SetAsyncJob(NULL_ASYNC_JOB_ID);
}
+ AsyncFlushQuery(query, timestamp, lock);
}
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
@@ -201,15 +216,25 @@ public:
committed_flushes.pop_front();
return;
}
- for (VAddr query_address : *flush_list) {
- FlushAndRemoveRegion(query_address, 4);
+ for (AsyncJobId async_job_id : *flush_list) {
+ AsyncJob& async_job = slot_async_jobs[async_job_id];
+ if (!async_job.collected) {
+ FlushAndRemoveRegion(async_job.query_location, 2, true);
+ }
}
committed_flushes.pop_front();
}
private:
+ struct AsyncJob {
+ bool collected = false;
+ u64 value = 0;
+ VAddr query_location = 0;
+ std::optional<u64> timestamp{};
+ };
+
/// Flushes a memory range to guest memory and removes it from the cache.
- void FlushAndRemoveRegion(VAddr addr, std::size_t size) {
+ void FlushAndRemoveRegion(VAddr addr, std::size_t size, bool async = false) {
const u64 addr_begin = addr;
const u64 addr_end = addr_begin + size;
const auto in_range = [addr_begin, addr_end](const CachedQuery& query) {
@@ -230,7 +255,16 @@ private:
continue;
}
rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1);
- query.Flush();
+ AsyncJobId async_job_id = query.GetAsyncJob();
+ auto flush_result = query.Flush(async);
+ if (async_job_id == NULL_ASYNC_JOB_ID) {
+ ASSERT_MSG(false, "This should not be reachable at all");
+ continue;
+ }
+ AsyncJob& async_job = slot_async_jobs[async_job_id];
+ async_job.collected = true;
+ async_job.value = flush_result;
+ query.SetAsyncJob(NULL_ASYNC_JOB_ID);
}
std::erase_if(contents, in_range);
}
@@ -257,17 +291,43 @@ private:
return found != std::end(contents) ? &*found : nullptr;
}
- void AsyncFlushQuery(VAddr addr) {
+ void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
+ std::unique_lock<std::recursive_mutex>& lock) {
+ const AsyncJobId new_async_job_id = slot_async_jobs.insert();
+ AsyncJob& async_job = slot_async_jobs[new_async_job_id];
+ query->SetAsyncJob(new_async_job_id);
+ async_job.query_location = query->GetCpuAddr();
+ async_job.collected = false;
+
if (!uncommitted_flushes) {
- uncommitted_flushes = std::make_shared<std::vector<VAddr>>();
+ uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
}
- uncommitted_flushes->push_back(addr);
+ uncommitted_flushes->push_back(new_async_job_id);
+ lock.unlock();
+ std::function<void()> operation([this, new_async_job_id, timestamp] {
+ std::unique_lock local_lock{mutex};
+ AsyncJob& async_job = slot_async_jobs[new_async_job_id];
+ if (timestamp) {
+ u64 timestamp_value = *timestamp;
+ cpu_memory.WriteBlockUnsafe(async_job.query_location + sizeof(u64),
+ &timestamp_value, sizeof(8));
+ cpu_memory.WriteBlockUnsafe(async_job.query_location, &async_job.value, sizeof(8));
+ } else {
+ u32 small_value = static_cast<u32>(async_job.value);
+ cpu_memory.WriteBlockUnsafe(async_job.query_location, &small_value, sizeof(u32));
+ }
+ slot_async_jobs.erase(new_async_job_id);
+ });
+ rasterizer.SyncOperation(std::move(operation));
}
static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
static constexpr unsigned YUZU_PAGEBITS = 12;
+ SlotVector<AsyncJob> slot_async_jobs;
+
VideoCore::RasterizerInterface& rasterizer;
+ Core::Memory::Memory& cpu_memory;
mutable std::recursive_mutex mutex;
@@ -275,8 +335,8 @@ private:
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
- std::shared_ptr<std::vector<VAddr>> uncommitted_flushes{};
- std::list<std::shared_ptr<std::vector<VAddr>>> committed_flushes;
+ std::shared_ptr<std::vector<AsyncJobId>> uncommitted_flushes{};
+ std::list<std::shared_ptr<std::vector<AsyncJobId>>> committed_flushes;
};
template <class QueryCache, class HostCounter>
@@ -295,12 +355,12 @@ public:
virtual ~HostCounterBase() = default;
/// Returns the current value of the query.
- u64 Query() {
+ u64 Query(bool async = false) {
if (result) {
return *result;
}
- u64 value = BlockingQuery() + base_result;
+ u64 value = BlockingQuery(async) + base_result;
if (dependency) {
value += dependency->Query();
dependency = nullptr;
@@ -321,7 +381,7 @@ public:
protected:
/// Returns the value of query from the backend API blocking as needed.
- virtual u64 BlockingQuery() const = 0;
+ virtual u64 BlockingQuery(bool async = false) const = 0;
private:
std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value.
@@ -344,26 +404,23 @@ public:
CachedQueryBase& operator=(const CachedQueryBase&) = delete;
/// Flushes the query to guest memory.
- virtual void Flush() {
+ virtual u64 Flush(bool async = false) {
// When counter is nullptr it means that it's just been reset. We are supposed to write a
// zero in these cases.
- const u64 value = counter ? counter->Query() : 0;
- std::memcpy(host_ptr, &value, sizeof(u64));
-
- if (timestamp) {
- std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
- }
+ const u64 value = counter ? counter->Query(async) : 0;
+ return value;
}
/// Binds a counter to this query.
- void BindCounter(std::shared_ptr<HostCounter> counter_, std::optional<u64> timestamp_) {
+ std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_) {
+ std::optional<u64> result{};
if (counter) {
// If there's an old counter set it means the query is being rewritten by the game.
// To avoid losing the data forever, flush here.
- Flush();
+ result = std::make_optional(Flush());
}
counter = std::move(counter_);
- timestamp = timestamp_;
+ return result;
}
VAddr GetCpuAddr() const noexcept {
@@ -378,6 +435,14 @@ public:
return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE;
}
+ void SetAsyncJob(AsyncJobId assigned_async_job_) {
+ assigned_async_job = assigned_async_job_;
+ }
+
+ AsyncJobId GetAsyncJob() const {
+ return assigned_async_job;
+ }
+
protected:
/// Returns true when querying the counter may potentially block.
bool WaitPending() const noexcept {
@@ -393,6 +458,7 @@ private:
u8* host_ptr; ///< Writable host pointer.
std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree.
std::optional<u64> timestamp; ///< Timestamp to flush to guest memory.
+ AsyncJobId assigned_async_job;
};
} // namespace VideoCommon
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index 5070db441..99d7347f5 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -26,8 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
} // Anonymous namespace
-QueryCache::QueryCache(RasterizerOpenGL& rasterizer_)
- : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {}
+QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
+ : QueryCacheBase(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
QueryCache::~QueryCache() = default;
@@ -74,7 +74,7 @@ void HostCounter::EndQuery() {
glEndQuery(GetTarget(type));
}
-u64 HostCounter::BlockingQuery() const {
+u64 HostCounter::BlockingQuery([[maybe_unused]] bool async) const {
GLint64 value;
glGetQueryObjecti64v(query.handle, GL_QUERY_RESULT, &value);
return static_cast<u64>(value);
@@ -96,7 +96,7 @@ CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept {
return *this;
}
-void CachedQuery::Flush() {
+u64 CachedQuery::Flush([[maybe_unused]] bool async) {
// Waiting for a query while another query of the same target is enabled locks Nvidia's driver.
// To avoid this disable and re-enable keeping the dependency stream.
// But we only have to do this if we have pending waits to be done.
@@ -106,11 +106,13 @@ void CachedQuery::Flush() {
stream.Update(false);
}
- VideoCommon::CachedQueryBase<HostCounter>::Flush();
+ auto result = VideoCommon::CachedQueryBase<HostCounter>::Flush();
if (slice_counter) {
stream.Update(true);
}
+
+ return result;
}
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 14ce59990..872513f22 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -28,7 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
class QueryCache final
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit QueryCache(RasterizerOpenGL& rasterizer_);
+ explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_);
~QueryCache();
OGLQuery AllocateQuery(VideoCore::QueryType type);
@@ -51,7 +51,7 @@ public:
void EndQuery();
private:
- u64 BlockingQuery() const override;
+ u64 BlockingQuery(bool async = false) const override;
QueryCache& cache;
const VideoCore::QueryType type;
@@ -70,7 +70,7 @@ public:
CachedQuery(const CachedQuery&) = delete;
CachedQuery& operator=(const CachedQuery&) = delete;
- void Flush() override;
+ u64 Flush(bool async = false) override;
private:
QueryCache* cache;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 90e35e307..967aa4306 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -63,7 +63,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
state_tracker, gpu.ShaderNotify()),
- query_cache(*this), accelerate_dma(buffer_cache, texture_cache),
+ query_cache(*this, cpu_memory_), accelerate_dma(buffer_cache, texture_cache),
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
blit_image(program_manager_) {}
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 0701e572b..d67490449 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -66,9 +66,10 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
}
}
-QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
+QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_,
+ Core::Memory::Memory& cpu_memory_, const Device& device_,
Scheduler& scheduler_)
- : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_},
+ : QueryCacheBase{rasterizer_, cpu_memory_}, device{device_}, scheduler{scheduler_},
query_pools{
QueryPool{device_, scheduler_, QueryType::SamplesPassed},
} {}
@@ -100,7 +101,8 @@ HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> depend
cache.GetScheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
const bool use_precise = Settings::IsGPULevelHigh();
logical->ResetQueryPool(query.first, query.second, 1);
- cmdbuf.BeginQuery(query.first, query.second, use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0);
+ cmdbuf.BeginQuery(query.first, query.second,
+ use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0);
});
}
@@ -113,8 +115,10 @@ void HostCounter::EndQuery() {
[query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); });
}
-u64 HostCounter::BlockingQuery() const {
- cache.GetScheduler().Wait(tick);
+u64 HostCounter::BlockingQuery(bool async) const {
+ if (!async) {
+ cache.GetScheduler().Wait(tick);
+ }
u64 data;
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
query.first, query.second, 1, sizeof(data), &data, sizeof(data),
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index 26762ee09..c1b9552eb 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -52,7 +52,8 @@ private:
class QueryCache final
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
+ explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_,
+ Core::Memory::Memory& cpu_memory_, const Device& device_,
Scheduler& scheduler_);
~QueryCache();
@@ -83,7 +84,7 @@ public:
void EndQuery();
private:
- u64 BlockingQuery() const override;
+ u64 BlockingQuery(bool async = false) const override;
QueryCache& cache;
const VideoCore::QueryType type;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index f366fdd2a..2d865729a 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -172,7 +172,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
- query_cache{*this, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler),
+ query_cache{*this, cpu_memory_, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler),
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
wfi_event(device.GetLogical().CreateEvent()) {
scheduler.SetQueryCache(query_cache);