summaryrefslogtreecommitdiffstats
path: root/src/audio_core/sink/sink_stream.cpp
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/audio_core/sink/sink_stream.cpp87
1 files changed, 42 insertions, 45 deletions
diff --git a/src/audio_core/sink/sink_stream.cpp b/src/audio_core/sink/sink_stream.cpp
index 06c2a876e..404dcd0e9 100644
--- a/src/audio_core/sink/sink_stream.cpp
+++ b/src/audio_core/sink/sink_stream.cpp
@@ -14,10 +14,11 @@
#include "common/fixed_point.h"
#include "common/settings.h"
#include "core/core.h"
+#include "core/core_timing.h"
namespace AudioCore::Sink {
-void SinkStream::AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) {
+void SinkStream::AppendBuffer(SinkBuffer& buffer, std::span<s16> samples) {
if (type == StreamType::In) {
queue.enqueue(buffer);
queued_buffers++;
@@ -35,7 +36,7 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) {
if (system_channels == 6 && device_channels == 2) {
// We're given 6 channels, but our device only outputs 2, so downmix.
- constexpr std::array<f32, 4> down_mix_coeff{1.0f, 0.707f, 0.251f, 0.707f};
+ static constexpr std::array<f32, 4> down_mix_coeff{1.0f, 0.707f, 0.251f, 0.707f};
for (u32 read_index = 0, write_index = 0; read_index < samples.size();
read_index += system_channels, write_index += device_channels) {
@@ -65,15 +66,16 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) {
static_cast<s16>(std::clamp(right_sample, min, max));
}
- samples.resize(samples.size() / system_channels * device_channels);
+ samples = samples.subspan(0, samples.size() / system_channels * device_channels);
} else if (system_channels == 2 && device_channels == 6) {
// We need moar samples! Not all games will provide 6 channel audio.
// TODO: Implement some upmixing here. Currently just passthrough, with other
// channels left as silence.
- std::vector<s16> new_samples(samples.size() / system_channels * device_channels, 0);
+ auto new_size = samples.size() / system_channels * device_channels;
+ tmp_samples.resize_destructive(new_size);
- for (u32 read_index = 0, write_index = 0; read_index < samples.size();
+ for (u32 read_index = 0, write_index = 0; read_index < new_size;
read_index += system_channels, write_index += device_channels) {
const auto left_sample{static_cast<s16>(std::clamp(
static_cast<s32>(
@@ -81,7 +83,7 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) {
volume),
min, max))};
- new_samples[write_index + static_cast<u32>(Channels::FrontLeft)] = left_sample;
+ tmp_samples[write_index + static_cast<u32>(Channels::FrontLeft)] = left_sample;
const auto right_sample{static_cast<s16>(std::clamp(
static_cast<s32>(
@@ -89,9 +91,9 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::vector<s16>& samples) {
volume),
min, max))};
- new_samples[write_index + static_cast<u32>(Channels::FrontRight)] = right_sample;
+ tmp_samples[write_index + static_cast<u32>(Channels::FrontRight)] = right_sample;
}
- samples = std::move(new_samples);
+ samples = std::span<s16>(tmp_samples);
} else if (volume != 1.0f) {
for (u32 i = 0; i < samples.size(); i++) {
@@ -149,10 +151,6 @@ void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t n
return;
}
- if (queued_buffers > max_queue_size) {
- Stall();
- }
-
while (frames_written < num_frames) {
// If the playing buffer has been consumed or has no frames, we need a new one
if (playing_buffer.consumed || playing_buffer.frames == 0) {
@@ -187,10 +185,6 @@ void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t n
}
std::memcpy(&last_frame[0], &input_buffer[(frames_written - 1) * frame_size], frame_size_bytes);
-
- if (queued_buffers <= max_queue_size) {
- Unstall();
- }
}
void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames) {
@@ -198,31 +192,22 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
const std::size_t frame_size = num_channels;
const std::size_t frame_size_bytes = frame_size * sizeof(s16);
size_t frames_written{0};
+ size_t actual_frames_written{0};
// If we're paused or going to shut down, we don't want to consume buffers as coretiming is
// paused and we'll desync, so just play silence.
if (system.IsPaused() || system.IsShuttingDown()) {
- constexpr std::array<s16, 6> silence{};
+ if (system.IsShuttingDown()) {
+ release_cv.notify_one();
+ }
+
+ static constexpr std::array<s16, 6> silence{};
for (size_t i = frames_written; i < num_frames; i++) {
std::memcpy(&output_buffer[i * frame_size], &silence[0], frame_size_bytes);
}
return;
}
- // Due to many frames being queued up with nvdec (5 frames or so?), a lot of buffers also get
- // queued up (30+) but not all at once, which causes constant stalling here, so just let the
- // video play out without attempting to stall.
- // Can hopefully remove this later with a more complete NVDEC implementation.
- const auto nvdec_active{system.AudioCore().IsNVDECActive()};
-
- // Core timing cannot be paused in single-core mode, so Stall ends up being called over and over
- // and never recovers to a normal state, so just skip attempting to sync things on single-core.
- if (system.IsMulticore() && !nvdec_active && queued_buffers > max_queue_size) {
- Stall();
- } else if (system.IsMulticore() && queued_buffers <= max_queue_size) {
- Unstall();
- }
-
while (frames_written < num_frames) {
// If the playing buffer has been consumed or has no frames, we need a new one
if (playing_buffer.consumed || playing_buffer.frames == 0) {
@@ -237,6 +222,10 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
}
// Successfully dequeued a new buffer.
queued_buffers--;
+
+ { std::unique_lock lk{release_mutex}; }
+
+ release_cv.notify_one();
}
// Get the minimum frames available between the currently playing buffer, and the
@@ -248,6 +237,7 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
frames_available * frame_size);
frames_written += frames_available;
+ actual_frames_written += frames_available;
playing_buffer.frames_played += frames_available;
// If that's all the frames in the current buffer, add its samples and mark it as
@@ -260,26 +250,33 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
std::memcpy(&last_frame[0], &output_buffer[(frames_written - 1) * frame_size],
frame_size_bytes);
- if (system.IsMulticore() && queued_buffers <= max_queue_size) {
- Unstall();
+ {
+ std::scoped_lock lk{sample_count_lock};
+ last_sample_count_update_time = system.CoreTiming().GetGlobalTimeNs();
+ min_played_sample_count = max_played_sample_count;
+ max_played_sample_count += actual_frames_written;
}
}
-void SinkStream::Stall() {
- std::scoped_lock lk{stall_guard};
- if (stalled_lock) {
- return;
- }
- stalled_lock = system.StallProcesses();
+u64 SinkStream::GetExpectedPlayedSampleCount() {
+ std::scoped_lock lk{sample_count_lock};
+ auto cur_time{system.CoreTiming().GetGlobalTimeNs()};
+ auto time_delta{cur_time - last_sample_count_update_time};
+ auto exp_played_sample_count{min_played_sample_count +
+ (TargetSampleRate * time_delta) / std::chrono::seconds{1}};
+
+ // Add 15ms of latency in sample reporting to allow for some leeway in scheduler timings
+ return std::min<u64>(exp_played_sample_count, max_played_sample_count) + TargetSampleCount * 3;
}
-void SinkStream::Unstall() {
- std::scoped_lock lk{stall_guard};
- if (!stalled_lock) {
- return;
+void SinkStream::WaitFreeSpace(std::stop_token stop_token) {
+ std::unique_lock lk{release_mutex};
+ release_cv.wait_for(lk, std::chrono::milliseconds(5),
+ [this]() { return queued_buffers < max_queue_size; });
+ if (queued_buffers > max_queue_size + 3) {
+ Common::CondvarWait(release_cv, lk, stop_token,
+ [this] { return queued_buffers < max_queue_size; });
}
- system.UnstallProcesses();
- stalled_lock.unlock();
}
} // namespace AudioCore::Sink