Optimize Audio by using Circular Buffers, Handle Device Disconnection and Fix Some Bugs

This optimizes a lot of audio by using a circular buffer rather than queues. In addition to handling device disconnection using oboe callbacks and fix bugs in regards to audio saturation.
This commit is contained in:
◱ PixelyIon 2020-04-18 03:05:31 +05:30 committed by ◱ PixelyIon
parent 4e4ed5aac0
commit fb1a158e8f
8 changed files with 270 additions and 97 deletions

View File

@ -5,67 +5,68 @@
namespace skyline::audio { namespace skyline::audio {
Audio::Audio(const DeviceState &state) : state(state), oboe::AudioStreamCallback() { Audio::Audio(const DeviceState &state) : state(state), oboe::AudioStreamCallback() {
oboe::AudioStreamBuilder builder; builder.setChannelCount(constant::ChannelCount);
builder.setSampleRate(constant::SampleRate);
builder.setChannelCount(constant::ChannelCount) builder.setFormat(constant::PcmFormat);
->setSampleRate(constant::SampleRate) builder.setFramesPerCallback(constant::MixBufferSize);
->setFormat(constant::PcmFormat) builder.setUsage(oboe::Usage::Game);
->setCallback(this) builder.setCallback(this);
->openManagedStream(outputStream);
builder.openManagedStream(outputStream);
outputStream->requestStart(); outputStream->requestStart();
} }
Audio::~Audio() {
outputStream->close();
}
std::shared_ptr<AudioTrack> Audio::OpenTrack(const int channelCount, const int sampleRate, const std::function<void()> &releaseCallback) { std::shared_ptr<AudioTrack> Audio::OpenTrack(const int channelCount, const int sampleRate, const std::function<void()> &releaseCallback) {
std::shared_ptr<AudioTrack> track = std::make_shared<AudioTrack>(channelCount, sampleRate, releaseCallback); std::lock_guard trackGuard(trackMutex);
auto track = std::make_shared<AudioTrack>(channelCount, sampleRate, releaseCallback);
audioTracks.push_back(track); audioTracks.push_back(track);
return track; return track;
} }
void Audio::CloseTrack(std::shared_ptr<AudioTrack> &track) { void Audio::CloseTrack(std::shared_ptr<AudioTrack> &track) {
std::lock_guard trackGuard(trackMutex);
audioTracks.erase(std::remove(audioTracks.begin(), audioTracks.end(), track), audioTracks.end()); audioTracks.erase(std::remove(audioTracks.begin(), audioTracks.end(), track), audioTracks.end());
track.reset(); track.reset();
} }
oboe::DataCallbackResult Audio::onAudioReady(oboe::AudioStream *audioStream, void *audioData, int32_t numFrames) { oboe::DataCallbackResult Audio::onAudioReady(oboe::AudioStream *audioStream, void *audioData, int32_t numFrames) {
i16 *destBuffer = static_cast<i16 *>(audioData); i16 *destBuffer = static_cast<i16 *>(audioData);
uint setIndex = 0; size_t streamSamples = static_cast<size_t>(numFrames) * audioStream->getChannelCount();
size_t sampleI16Size = static_cast<size_t>(numFrames) * audioStream->getChannelCount(); size_t writtenSamples = 0;
std::unique_lock trackLock(trackMutex);
for (auto &track : audioTracks) { for (auto &track : audioTracks) {
if (track->playbackState == AudioOutState::Stopped) if (track->playbackState == AudioOutState::Stopped)
continue; continue;
track->bufferLock.lock(); std::lock_guard bufferGuard(track->bufferLock);
std::queue<i16> &srcBuffer = track->sampleQueue; auto trackSamples = track->samples.Read(destBuffer, streamSamples, [](i16 *source, i16 *destination) {
size_t amount = std::min(srcBuffer.size(), sampleI16Size); *destination = Saturate<i16, i32>(static_cast<u32>(*destination) + static_cast<u32>(*source));
}, writtenSamples);
for (size_t i = 0; i < amount; i++) { writtenSamples = std::max(trackSamples, writtenSamples);
if (setIndex == i) {
destBuffer[i] = srcBuffer.front();
setIndex++;
} else {
destBuffer[i] += srcBuffer.front();
}
srcBuffer.pop(); track->sampleCounter += trackSamples;
}
track->sampleCounter += amount;
track->CheckReleasedBuffers(); track->CheckReleasedBuffers();
track->bufferLock.unlock();
} }
if (sampleI16Size > setIndex) trackLock.unlock();
memset(destBuffer, 0, (sampleI16Size - setIndex) * 2);
if (streamSamples > writtenSamples)
memset(destBuffer + writtenSamples, 0, (streamSamples - writtenSamples) * sizeof(i16));
return oboe::DataCallbackResult::Continue; return oboe::DataCallbackResult::Continue;
} }
void Audio::onErrorAfterClose(oboe::AudioStream *audioStream, oboe::Result error) {
if (error == oboe::Result::ErrorDisconnected) {
builder.openManagedStream(outputStream);
outputStream->requestStart();
}
}
} }

View File

@ -17,17 +17,14 @@ namespace skyline::audio {
class Audio : public oboe::AudioStreamCallback { class Audio : public oboe::AudioStreamCallback {
private: private:
const DeviceState &state; //!< The state of the device const DeviceState &state; //!< The state of the device
oboe::AudioStreamBuilder builder; //!< The audio stream builder, used to open
oboe::ManagedStream outputStream; //!< The output oboe audio stream oboe::ManagedStream outputStream; //!< The output oboe audio stream
std::vector<std::shared_ptr<audio::AudioTrack>> audioTracks; //!< Vector containing a pointer of every open audio track std::vector<std::shared_ptr<audio::AudioTrack>> audioTracks; //!< A vector of shared_ptr to every open audio track
Mutex trackMutex; //!< This mutex is used to ensure that audioTracks isn't modified while it is being used
public: public:
Audio(const DeviceState &state); Audio(const DeviceState &state);
/**
* @brief The destructor for the audio class
*/
~Audio();
/** /**
* @brief Opens a new track that can be used to play sound * @brief Opens a new track that can be used to play sound
* @param channelCount The amount channels that are present in the track * @param channelCount The amount channels that are present in the track
@ -50,5 +47,12 @@ namespace skyline::audio {
* @param numFrames The amount of frames the sample data needs to contain * @param numFrames The amount of frames the sample data needs to contain
*/ */
oboe::DataCallbackResult onAudioReady(oboe::AudioStream *audioStream, void *audioData, int32_t numFrames); oboe::DataCallbackResult onAudioReady(oboe::AudioStream *audioStream, void *audioData, int32_t numFrames);
/**
* @brief The callback oboe uses to notify the application about stream closure
* @param audioStream The audio stream we are being called by
* @param error The error due to which the stream is being closed
*/
void onErrorAfterClose(oboe::AudioStream *audioStream, oboe::Result error);
}; };
} }

View File

@ -5,12 +5,14 @@
#include <oboe/Oboe.h> #include <oboe/Oboe.h>
#include <common.h> #include <common.h>
#include <array>
namespace skyline { namespace skyline {
namespace constant { namespace constant {
constexpr auto SampleRate = 48000; //!< The common sampling rate to use for audio output constexpr auto SampleRate = 48000; //!< The common sampling rate to use for audio output
constexpr auto ChannelCount = 2; //!< The common amount of channels to use for audio output constexpr auto ChannelCount = 2; //!< The common amount of channels to use for audio output
constexpr auto PcmFormat = oboe::AudioFormat::I16; //!< The common PCM data format to use for audio output constexpr auto PcmFormat = oboe::AudioFormat::I16; //!< The common PCM data format to use for audio output
constexpr size_t MixBufferSize = 960; //!< The size of the mix buffer by default
}; };
namespace audio { namespace audio {
@ -44,5 +46,161 @@ namespace skyline {
u64 finalSample; //!< The final sample this buffer will be played in u64 finalSample; //!< The final sample this buffer will be played in
bool released; //!< If the buffer has been released bool released; //!< If the buffer has been released
}; };
/**
* @brief This saturates the specified value according to the numeric limits of Out
* @tparam Out The return value type and the numeric limit clamp
* @tparam Intermediate The intermediate type that is converted to from In before clamping
* @tparam In The input value type
* @param value The value to saturate
* @return The saturated value
*/
template<typename Out, typename Intermediate, typename In>
inline Out Saturate(In value) {
return static_cast<Out>(std::clamp(static_cast<Intermediate>(value), static_cast<Intermediate>(std::numeric_limits<Out>::min()), static_cast<Intermediate>(std::numeric_limits<Out>::max())));
}
/**
* @brief This class is used to abstract an array into a circular buffer
* @tparam Type The type of elements stored in the buffer
* @tparam Size The maximum size of the circular buffer
*/
template<typename Type, size_t Size>
class CircularBuffer {
private:
std::array<Type, Size> array{}; //!< The internal array holding the circular buffer
Type *start{array.begin()}; //!< The start/oldest element of the internal array
Type *end{array.begin()}; //!< The end/newest element of the internal array
bool empty{true}; //!< This boolean is used to differentiate between the buffer being full or empty
Mutex mtx; //!< The mutex ensures that the buffer operations don't overlap
public:
/**
* @brief This reads data from this buffer into the specified buffer
* @param address The address to write buffer data into
* @param maxSize The maximum amount of data to write in units of Type
* @param function If this is specified, then this is called rather than memcpy
* @return The amount of data written into the input buffer in units of Type
*/
inline size_t Read(Type *address, ssize_t maxSize, void function(Type *, Type *) = {}, ssize_t copyOffset = -1) {
std::lock_guard guard(mtx);
if (empty)
return 0;
ssize_t size{}, sizeBegin{}, sizeEnd{};
if (start < end) {
sizeEnd = std::min(end - start, maxSize);
size = sizeEnd;
} else {
sizeEnd = std::min(array.end() - start, maxSize);
sizeBegin = std::min(end - array.begin(), maxSize - sizeEnd);
size = sizeBegin + sizeEnd;
}
if (function && copyOffset) {
auto sourceEnd = start + ((copyOffset != -1) ? copyOffset : sizeEnd);
for (auto source = start, destination = address; source < sourceEnd; source++, destination++)
function(source, destination);
if (copyOffset != -1) {
std::memcpy(address + copyOffset, start + copyOffset, (sizeEnd - copyOffset) * sizeof(Type));
copyOffset -= sizeEnd;
}
} else {
std::memcpy(address, start, sizeEnd * sizeof(Type));
}
address += sizeEnd;
if (sizeBegin) {
if (function && copyOffset) {
auto sourceEnd = array.begin() + ((copyOffset != -1) ? copyOffset : sizeBegin);
for (auto source = array.begin(), destination = address; source < sourceEnd; source++, destination++)
function(source, destination);
if (copyOffset != -1)
std::memcpy(array.begin() + copyOffset, address + copyOffset, (sizeBegin - copyOffset) * sizeof(Type));
} else {
std::memcpy(address, array.begin(), sizeBegin * sizeof(Type));
}
start = array.begin() + sizeBegin;
} else {
start += sizeEnd;
}
if (start == end)
empty = true;
return static_cast<size_t>(size);
}
/**
* @brief This appends data from the specified buffer into this buffer
* @param address The address of the buffer
* @param size The size of the buffer in units of Type
*/
inline void Append(Type *address, ssize_t size) {
std::lock_guard guard(mtx);
while (size) {
if (start <= end && end != array.end()) {
auto sizeEnd = std::min(array.end() - end, size);
std::memcpy(end, address, sizeEnd * sizeof(Type));
address += sizeEnd;
size -= sizeEnd;
end += sizeEnd;
} else {
auto sizePreStart = (end == array.end()) ? std::min(start - array.begin(), size) : std::min(start - end, size);
auto sizePostStart = std::min(array.end() - start, size - sizePreStart);
if (sizePreStart)
std::memcpy((end == array.end()) ? array.begin() : end, address, sizePreStart * sizeof(Type));
if (end == array.end())
end = array.begin() + sizePreStart;
else
end += sizePreStart;
address += sizePreStart;
size -= sizePreStart;
if (sizePostStart)
std::memcpy(end, address, sizePostStart * sizeof(Type));
if (start == array.end())
start = array.begin() + sizePostStart;
else
start += sizePostStart;
if (end == array.end())
end = array.begin() + sizePostStart;
else
end += sizePostStart;
address += sizePostStart;
size -= sizePostStart;
}
empty = false;
}
}
/**
* @brief This appends data from a vector to the buffer
* @param sampleData A reference to a vector containing the data to be appended
*/
inline void Append(const std::vector<Type> &data) {
Append(const_cast<Type *>(data.data()), data.size());
}
};
} }
} }

View File

@ -43,24 +43,21 @@ namespace skyline::audio {
return bufferIds; return bufferIds;
} }
void AudioTrack::AppendBuffer(const std::vector<i16> &sampleData, u64 tag) { void AudioTrack::AppendBuffer(u64 tag, const i16* address, u64 size) {
BufferIdentifier identifier; BufferIdentifier identifier;
identifier.released = false; identifier.released = false;
identifier.tag = tag; identifier.tag = tag;
if (identifierQueue.empty()) if (identifiers.empty())
identifier.finalSample = sampleData.size(); identifier.finalSample = size;
else else
identifier.finalSample = sampleData.size() + identifierQueue.front().finalSample; identifier.finalSample = size + identifiers.front().finalSample;
bufferLock.lock(); std::lock_guard guard(bufferLock);
identifierQueue.push_front(identifier); identifiers.push_front(identifier);
for (const auto &sample : sampleData) samples.Append(const_cast<i16 *>(address), size);
sampleQueue.push(sample);
bufferLock.unlock();
} }
void AudioTrack::CheckReleasedBuffers() { void AudioTrack::CheckReleasedBuffers() {

View File

@ -22,7 +22,7 @@ namespace skyline::audio {
const u32 sampleRate; //!< The sample rate of the track const u32 sampleRate; //!< The sample rate of the track
public: public:
std::queue<i16> sampleQueue; //!< Queue of all appended buffer data CircularBuffer<i16, constant::SampleRate * constant::ChannelCount * 10> samples; //!< A vector of all appended audio samples
Mutex bufferLock; //!< This mutex ensures that appending to buffers doesn't overlap Mutex bufferLock; //!< This mutex ensures that appending to buffers doesn't overlap
AudioOutState playbackState{AudioOutState::Stopped}; //!< The current state of playback AudioOutState playbackState{AudioOutState::Stopped}; //!< The current state of playback
@ -63,10 +63,20 @@ namespace skyline::audio {
/** /**
* @brief Appends audio samples to the output buffer * @brief Appends audio samples to the output buffer
* @param sampleData Reference to a vector containing I16 format pcm data
* @param tag The tag of the buffer * @param tag The tag of the buffer
* @param address The address of the audio buffer
* @param size The size of the audio buffer in i16 units
*/ */
void AppendBuffer(const std::vector<i16> &sampleData, u64 tag); void AppendBuffer(u64 tag, const i16* address, u64 size);
/**
* @brief Appends audio samples to the output buffer
* @param tag The tag of the buffer
* @param sampleData A reference to a vector containing I16 format PCM data
*/
void AppendBuffer(u64 tag, const std::vector<i16> &sampleData = {}) {
AppendBuffer(tag, sampleData.data(), sampleData.size());
}
/** /**
* @brief Checks if any buffers have been released and calls the appropriate callback for them * @brief Checks if any buffers have been released and calls the appropriate callback for them

View File

@ -47,10 +47,15 @@ namespace skyline::service::audio {
state.logger->Debug("IAudioOut: Appending buffer with address: 0x{:X}, size: 0x{:X}", data.sampleBufferPtr, data.sampleSize); state.logger->Debug("IAudioOut: Appending buffer with address: 0x{:X}, size: 0x{:X}", data.sampleBufferPtr, data.sampleSize);
if(sampleRate != constant::SampleRate) {
tmpSampleBuffer.resize(data.sampleSize / sizeof(i16)); tmpSampleBuffer.resize(data.sampleSize / sizeof(i16));
state.process->ReadMemory(tmpSampleBuffer.data(), data.sampleBufferPtr, data.sampleSize); state.process->ReadMemory(tmpSampleBuffer.data(), data.sampleBufferPtr, data.sampleSize);
resampler.ResampleBuffer(tmpSampleBuffer, static_cast<double>(sampleRate) / constant::SampleRate, channelCount); resampler.ResampleBuffer(tmpSampleBuffer, static_cast<double>(sampleRate) / constant::SampleRate, channelCount);
track->AppendBuffer(tmpSampleBuffer, tag);
track->AppendBuffer(tag, tmpSampleBuffer);
} else {
track->AppendBuffer(tag, state.process->GetPointer<i16>(data.sampleBufferPtr), data.sampleSize);
}
} }
void IAudioOut::RegisterBufferEvent(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) { void IAudioOut::RegisterBufferEvent(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {

View File

@ -5,8 +5,8 @@
#include "IAudioRenderer.h" #include "IAudioRenderer.h"
namespace skyline::service::audio::IAudioRenderer { namespace skyline::service::audio::IAudioRenderer {
IAudioRenderer::IAudioRenderer(const DeviceState &state, ServiceManager &manager, AudioRendererParams &params) IAudioRenderer::IAudioRenderer(const DeviceState &state, ServiceManager &manager, AudioRendererParameters &parameters)
: releaseEvent(std::make_shared<type::KEvent>(state)), rendererParams(params), memoryPoolCount(params.effectCount + params.voiceCount * 4), samplesPerBuffer(state.settings->GetInt("audren_buffer_size")), BaseService(state, manager, Service::audio_IAudioRenderer, "audio:IAudioRenderer", { : releaseEvent(std::make_shared<type::KEvent>(state)), parameters(parameters), BaseService(state, manager, Service::audio_IAudioRenderer, "audio:IAudioRenderer", {
{0x0, SFUNC(IAudioRenderer::GetSampleRate)}, {0x0, SFUNC(IAudioRenderer::GetSampleRate)},
{0x1, SFUNC(IAudioRenderer::GetSampleCount)}, {0x1, SFUNC(IAudioRenderer::GetSampleCount)},
{0x2, SFUNC(IAudioRenderer::GetMixBufferCount)}, {0x2, SFUNC(IAudioRenderer::GetMixBufferCount)},
@ -16,17 +16,17 @@ namespace skyline::service::audio::IAudioRenderer {
{0x6, SFUNC(IAudioRenderer::Stop)}, {0x6, SFUNC(IAudioRenderer::Stop)},
{0x7, SFUNC(IAudioRenderer::QuerySystemEvent)}, {0x7, SFUNC(IAudioRenderer::QuerySystemEvent)},
}) { }) {
track = state.audio->OpenTrack(constant::ChannelCount, params.sampleRate, [this]() { this->releaseEvent->Signal(); }); track = state.audio->OpenTrack(constant::ChannelCount, parameters.sampleRate, [this]() { releaseEvent->Signal(); });
track->Start(); track->Start();
memoryPools.resize(memoryPoolCount); memoryPools.resize(parameters.effectCount + parameters.voiceCount * 4);
effects.resize(rendererParams.effectCount); effects.resize(parameters.effectCount);
voices.resize(rendererParams.voiceCount, Voice(state)); voices.resize(parameters.voiceCount, Voice(state));
// Fill track with empty samples that we will triple buffer // Fill track with empty samples that we will triple buffer
track->AppendBuffer(std::vector<i16>(), 0); track->AppendBuffer(0);
track->AppendBuffer(std::vector<i16>(), 1); track->AppendBuffer(1);
track->AppendBuffer(std::vector<i16>(), 2); track->AppendBuffer(2);
} }
IAudioRenderer::~IAudioRenderer() { IAudioRenderer::~IAudioRenderer() {
@ -34,15 +34,15 @@ namespace skyline::service::audio::IAudioRenderer {
} }
void IAudioRenderer::GetSampleRate(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) { void IAudioRenderer::GetSampleRate(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
response.Push<u32>(rendererParams.sampleRate); response.Push<u32>(parameters.sampleRate);
} }
void IAudioRenderer::GetSampleCount(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) { void IAudioRenderer::GetSampleCount(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
response.Push<u32>(rendererParams.sampleCount); response.Push<u32>(parameters.sampleCount);
} }
void IAudioRenderer::GetMixBufferCount(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) { void IAudioRenderer::GetMixBufferCount(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
response.Push<u32>(rendererParams.subMixCount); response.Push<u32>(parameters.subMixCount);
} }
void IAudioRenderer::GetState(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) { void IAudioRenderer::GetState(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
@ -57,6 +57,7 @@ namespace skyline::service::audio::IAudioRenderer {
inputAddress += sizeof(UpdateDataHeader); inputAddress += sizeof(UpdateDataHeader);
inputAddress += inputHeader.behaviorSize; // Unused inputAddress += inputHeader.behaviorSize; // Unused
auto memoryPoolCount = memoryPools.size();
std::vector<MemoryPoolIn> memoryPoolsIn(memoryPoolCount); std::vector<MemoryPoolIn> memoryPoolsIn(memoryPoolCount);
state.process->ReadMemory(memoryPoolsIn.data(), inputAddress, memoryPoolCount * sizeof(MemoryPoolIn)); state.process->ReadMemory(memoryPoolsIn.data(), inputAddress, memoryPoolCount * sizeof(MemoryPoolIn));
inputAddress += inputHeader.memoryPoolSize; inputAddress += inputHeader.memoryPoolSize;
@ -65,15 +66,15 @@ namespace skyline::service::audio::IAudioRenderer {
memoryPools[i].ProcessInput(memoryPoolsIn[i]); memoryPools[i].ProcessInput(memoryPoolsIn[i]);
inputAddress += inputHeader.voiceResourceSize; inputAddress += inputHeader.voiceResourceSize;
std::vector<VoiceIn> voicesIn(rendererParams.voiceCount); std::vector<VoiceIn> voicesIn(parameters.voiceCount);
state.process->ReadMemory(voicesIn.data(), inputAddress, rendererParams.voiceCount * sizeof(VoiceIn)); state.process->ReadMemory(voicesIn.data(), inputAddress, parameters.voiceCount * sizeof(VoiceIn));
inputAddress += inputHeader.voiceSize; inputAddress += inputHeader.voiceSize;
for (auto i = 0; i < voicesIn.size(); i++) for (auto i = 0; i < voicesIn.size(); i++)
voices[i].ProcessInput(voicesIn[i]); voices[i].ProcessInput(voicesIn[i]);
std::vector<EffectIn> effectsIn(rendererParams.effectCount); std::vector<EffectIn> effectsIn(parameters.effectCount);
state.process->ReadMemory(effectsIn.data(), inputAddress, rendererParams.effectCount * sizeof(EffectIn)); state.process->ReadMemory(effectsIn.data(), inputAddress, parameters.effectCount * sizeof(EffectIn));
for (auto i = 0; i < effectsIn.size(); i++) for (auto i = 0; i < effectsIn.size(); i++)
effects[i].ProcessInput(effectsIn[i]); effects[i].ProcessInput(effectsIn[i]);
@ -83,10 +84,10 @@ namespace skyline::service::audio::IAudioRenderer {
UpdateDataHeader outputHeader{ UpdateDataHeader outputHeader{
.revision = constant::RevMagic, .revision = constant::RevMagic,
.behaviorSize = 0xb0, .behaviorSize = 0xb0,
.memoryPoolSize = (rendererParams.effectCount + rendererParams.voiceCount * 4) * static_cast<u32>(sizeof(MemoryPoolOut)), .memoryPoolSize = (parameters.effectCount + parameters.voiceCount * 4) * static_cast<u32>(sizeof(MemoryPoolOut)),
.voiceSize = rendererParams.voiceCount * static_cast<u32>(sizeof(VoiceOut)), .voiceSize = parameters.voiceCount * static_cast<u32>(sizeof(VoiceOut)),
.effectSize = rendererParams.effectCount * static_cast<u32>(sizeof(EffectOut)), .effectSize = parameters.effectCount * static_cast<u32>(sizeof(EffectOut)),
.sinkSize = rendererParams.sinkCount * 0x20, .sinkSize = parameters.sinkCount * 0x20,
.performanceManagerSize = 0x10, .performanceManagerSize = 0x10,
.elapsedFrameCountInfoSize = 0x0 .elapsedFrameCountInfoSize = 0x0
}; };
@ -129,38 +130,37 @@ namespace skyline::service::audio::IAudioRenderer {
for (auto &tag : released) { for (auto &tag : released) {
MixFinalBuffer(); MixFinalBuffer();
track->AppendBuffer(sampleBuffer, tag); track->AppendBuffer(tag, sampleBuffer.data(), sampleBuffer.size());
} }
} }
void IAudioRenderer::MixFinalBuffer() { void IAudioRenderer::MixFinalBuffer() {
int setIndex = 0; u32 writtenSamples = 0;
sampleBuffer.resize(static_cast<size_t>(samplesPerBuffer * constant::ChannelCount));
for (auto &voice : voices) { for (auto &voice : voices) {
if (!voice.Playable()) if (!voice.Playable())
continue; continue;
int bufferOffset = 0; u32 bufferOffset{};
int pendingSamples = samplesPerBuffer; u32 pendingSamples = constant::MixBufferSize;
while (pendingSamples > 0) { while (pendingSamples > 0) {
int voiceBufferSize = 0; u32 voiceBufferOffset{};
int voiceBufferOffset = 0; u32 voiceBufferSize{};
std::vector<i16> &voiceSamples = voice.GetBufferData(pendingSamples, voiceBufferOffset, voiceBufferSize); auto &voiceSamples = voice.GetBufferData(pendingSamples, voiceBufferOffset, voiceBufferSize);
if (voiceBufferSize == 0) if (voiceBufferSize == 0)
break; break;
pendingSamples -= voiceBufferSize / constant::ChannelCount; pendingSamples -= voiceBufferSize / constant::ChannelCount;
for (int i = voiceBufferOffset; i < voiceBufferOffset + voiceBufferSize; i++) { for (auto index = voiceBufferOffset; index < voiceBufferOffset + voiceBufferSize; index++) {
if (setIndex == bufferOffset) { if (writtenSamples == bufferOffset) {
sampleBuffer[bufferOffset] = static_cast<i16>(std::clamp(static_cast<int>(static_cast<float>(voiceSamples[i]) * voice.volume), static_cast<int>(std::numeric_limits<i16>::min()), static_cast<int>(std::numeric_limits<i16>::max()))); sampleBuffer[bufferOffset] = skyline::audio::Saturate<i16, i32>(voiceSamples[index] * voice.volume);
setIndex++; writtenSamples++;
} else { } else {
sampleBuffer[bufferOffset] += static_cast<i16>(std::clamp(static_cast<int>(sampleBuffer[voiceSamples[i]]) + static_cast<int>(static_cast<float>(voiceSamples[i]) * voice.volume), static_cast<int>(std::numeric_limits<i16>::min()), static_cast<int>(std::numeric_limits<i16>::max()))); sampleBuffer[bufferOffset] = skyline::audio::Saturate<i16, i32>(sampleBuffer[bufferOffset] + (voiceSamples[index] * voice.volume));
} }
bufferOffset++; bufferOffset++;

View File

@ -21,7 +21,7 @@ namespace skyline {
/** /**
* @brief The parameters used to configure an IAudioRenderer * @brief The parameters used to configure an IAudioRenderer
*/ */
struct AudioRendererParams { struct AudioRendererParameters {
u32 sampleRate; //!< The sample rate to use for the renderer u32 sampleRate; //!< The sample rate to use for the renderer
u32 sampleCount; //!< The buffer sample count u32 sampleCount; //!< The buffer sample count
u32 mixBufferCount; //!< The amount of mix buffers to use u32 mixBufferCount; //!< The amount of mix buffers to use
@ -36,7 +36,7 @@ namespace skyline {
u32 _unk0_; u32 _unk0_;
u32 revision; //!< The revision of audren to use u32 revision; //!< The revision of audren to use
}; };
static_assert(sizeof(AudioRendererParams) == 0x34); static_assert(sizeof(AudioRendererParameters) == 0x34);
/** /**
* @brief Header containing information about the software side audren implementation * @brief Header containing information about the software side audren implementation
@ -63,21 +63,19 @@ namespace skyline {
*/ */
class IAudioRenderer : public BaseService { class IAudioRenderer : public BaseService {
private: private:
AudioRendererParams rendererParams; //!< The parameters to use for the renderer AudioRendererParameters parameters; //!< The parameters to use for the renderer
RevisionInfo revisionInfo{}; //!< Stores info about supported features for the audren revision used RevisionInfo revisionInfo{}; //!< Stores info about supported features for the audren revision used
std::shared_ptr<skyline::audio::AudioTrack> track; //!< The audio track associated with the audio renderer std::shared_ptr<skyline::audio::AudioTrack> track; //!< The audio track associated with the audio renderer
std::shared_ptr<type::KEvent> releaseEvent; //!< The KEvent that is signalled when a buffer has been released std::shared_ptr<type::KEvent> releaseEvent; //!< The KEvent that is signalled when a buffer has been released
std::vector<MemoryPool> memoryPools; //!< An vector of all memory pools that the guest may need std::vector<MemoryPool> memoryPools; //!< An vector of all memory pools that the guest may need
std::vector<Effect> effects; //!< An vector of all effects that the guest may need std::vector<Effect> effects; //!< An vector of all effects that the guest may need
std::vector<Voice> voices; //!< An vector of all voices that the guest may need std::vector<Voice> voices; //!< An vector of all voices that the guest may need
std::vector<i16> sampleBuffer; //!< The final output data that is appended to the stream std::array<i16, constant::MixBufferSize * constant::ChannelCount> sampleBuffer; //!< The final output data that is appended to the stream
skyline::audio::AudioOutState playbackState{skyline::audio::AudioOutState::Stopped}; //!< The current state of playback skyline::audio::AudioOutState playbackState{skyline::audio::AudioOutState::Stopped}; //!< The current state of playback
const size_t memoryPoolCount; //!< The amount of memory pools the guest may need
const int samplesPerBuffer; //!< The amount of samples each appended buffer should contain
/** /**
* @brief Obtains new sample data from voices and mixes it together into the sample buffer * @brief Obtains new sample data from voices and mixes it together into the sample buffer
* @return The amount of samples present in the buffer
*/ */
void MixFinalBuffer(); void MixFinalBuffer();
@ -88,9 +86,9 @@ namespace skyline {
public: public:
/** /**
* @param params The parameters to use for rendering * @param parameters The parameters to use for rendering
*/ */
IAudioRenderer(const DeviceState &state, ServiceManager &manager, AudioRendererParams &params); IAudioRenderer(const DeviceState &state, ServiceManager &manager, AudioRendererParameters &parameters);
/** /**
* @brief Closes the audio track * @brief Closes the audio track