summaryrefslogtreecommitdiffstats
path: root/src/video_core/shader
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/shader')
-rw-r--r--src/video_core/shader/async_shaders.cpp14
-rw-r--r--src/video_core/shader/async_shaders.h10
2 files changed, 15 insertions, 9 deletions
diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp
index fb94ac2e7..84d86c32f 100644
--- a/src/video_core/shader/async_shaders.cpp
+++ b/src/video_core/shader/async_shaders.cpp
@@ -59,7 +59,6 @@ void AsyncShaders::KillWorkers() {
}
bool AsyncShaders::HasWorkQueued() {
- std::shared_lock lock(queue_mutex);
return !pending_queue.empty();
}
@@ -118,26 +117,31 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
cpu_addr};
std::unique_lock lock(queue_mutex);
pending_queue.push_back(std::move(params));
+ cv.notify_one();
}
void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) {
using namespace std::chrono_literals;
while (!is_thread_exiting.load(std::memory_order_relaxed)) {
+ std::unique_lock<std::mutex> lock(queue_mutex);
+ cv.wait(lock, [&] { return HasWorkQueued() || is_thread_exiting; });
+ if (is_thread_exiting) {
+ return;
+ }
+
// Partial lock to allow all threads to read at the same time
if (!HasWorkQueued()) {
continue;
}
- // Complete lock for pulling workload
- queue_mutex.lock();
// Another thread beat us, just unlock and wait for the next load
if (pending_queue.empty()) {
- queue_mutex.unlock();
continue;
}
// Pull work from queue
WorkerParams work = std::move(pending_queue.front());
pending_queue.pop_front();
- queue_mutex.unlock();
+
+ lock.unlock();
if (work.backend == AsyncShaders::Backend::OpenGL ||
work.backend == AsyncShaders::Backend::GLASM) {
diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h
index 26bc38326..2f5ee94ad 100644
--- a/src/video_core/shader/async_shaders.h
+++ b/src/video_core/shader/async_shaders.h
@@ -4,6 +4,7 @@
#pragma once
+#include <condition_variable>
#include <deque>
#include <memory>
#include <shared_mutex>
@@ -59,9 +60,6 @@ public:
// Force end all threads
void KillWorkers();
- /// Check our worker queue to see if we have any work queued already
- bool HasWorkQueued();
-
/// Check to see if any shaders have actually been compiled
bool HasCompletedWork();
@@ -81,6 +79,9 @@ public:
private:
void ShaderCompilerThread(Core::Frontend::GraphicsContext* context);
+ /// Check our worker queue to see if we have any work queued already
+ bool HasWorkQueued();
+
struct WorkerParams {
AsyncShaders::Backend backend;
OpenGL::Device device;
@@ -94,7 +95,8 @@ private:
VAddr cpu_address;
};
- std::shared_mutex queue_mutex;
+ std::condition_variable cv;
+ std::mutex queue_mutex;
std::shared_mutex completed_mutex;
std::atomic<bool> is_thread_exiting{};
std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list;