3 Commits

Author SHA1 Message Date
Ray
11e3e6e0b9 REVIEWED: Formating, tested sound examples 2026-02-21 09:09:43 +01:00
0c91f230fd Audio: Fix a glitch at the end of a sound. (#5578)
This is happening because the processing function keeps reading audio
data from the AudioBuffer even after it has been marked as stopped.

There is also an error in ReadAudioBufferFramesInInternalFormat() where
if it is called on a stopped sound, it'll still return audio frames.
This has also been addressed with this commit.
2026-02-21 08:04:32 +01:00
005ff74eb0 Audio: Improvements to device configuration (#5577)
* Audio: Stop setting capture config options.

Since the device is being configured as a playback device, all capture
config options are unused and therefore need to not be set.

* Audio: Stop pre-silencing the miniaudio output buffer.

raylib already manually silences the output buffer prior to mixing so
there is no reason to have miniaudio also do it. It can therefore be
disabled via the device config to make data processing slightly more
efficient.

* Audio: Stop forcing fixed sized processing callbacks.

There is no requirement for raylib to have guaranteed fixed sized
audio processing. By disabling it, audio processing can be made more
efficient by not having to run the data through an internal intermediary
buffer.

* Audio: Make the period size (latency) configurable.

The default period size is 10ms, but this is inappropriate for certain
platforms so it is useful to be able to allow those platforms to
configure the period size as required.

* Audio: Fix documentation for pan.

The pan if -1..1, not 0..1.
2026-02-21 08:02:40 +01:00
2 changed files with 20 additions and 18 deletions

View File

@ -303,6 +303,7 @@
#define AUDIO_DEVICE_FORMAT ma_format_f32 // Device output format (miniaudio: float-32bit) #define AUDIO_DEVICE_FORMAT ma_format_f32 // Device output format (miniaudio: float-32bit)
#define AUDIO_DEVICE_CHANNELS 2 // Device output channels: stereo #define AUDIO_DEVICE_CHANNELS 2 // Device output channels: stereo
#define AUDIO_DEVICE_SAMPLE_RATE 0 // Device sample rate (device default) #define AUDIO_DEVICE_SAMPLE_RATE 0 // Device sample rate (device default)
#define AUDIO_DEVICE_PERIOD_SIZE_IN_FRAMES 0 // Device period size (controls latency, 0 defaults to 10ms)
#define MAX_AUDIO_BUFFER_POOL_CHANNELS 16 // Maximum number of audio pool channels #define MAX_AUDIO_BUFFER_POOL_CHANNELS 16 // Maximum number of audio pool channels

View File

@ -290,6 +290,9 @@ typedef struct tagBITMAPINFOHEADER {
#ifndef AUDIO_DEVICE_SAMPLE_RATE #ifndef AUDIO_DEVICE_SAMPLE_RATE
#define AUDIO_DEVICE_SAMPLE_RATE 0 // Device output sample rate #define AUDIO_DEVICE_SAMPLE_RATE 0 // Device output sample rate
#endif #endif
#ifndef AUDIO_DEVICE_PERIOD_SIZE_IN_FRAMES
#define AUDIO_DEVICE_PERIOD_SIZE_IN_FRAMES 0 // Device latency. 0 defaults to 10ms
#endif
#ifndef MAX_AUDIO_BUFFER_POOL_CHANNELS #ifndef MAX_AUDIO_BUFFER_POOL_CHANNELS
#define MAX_AUDIO_BUFFER_POOL_CHANNELS 16 // Audio pool channels #define MAX_AUDIO_BUFFER_POOL_CHANNELS 16 // Audio pool channels
@ -349,7 +352,7 @@ struct rAudioBuffer {
float volume; // Audio buffer volume float volume; // Audio buffer volume
float pitch; // Audio buffer pitch float pitch; // Audio buffer pitch
float pan; // Audio buffer pan (0.0f to 1.0f) float pan; // Audio buffer pan (-1.0f to 1.0f)
bool playing; // Audio buffer state: AUDIO_PLAYING bool playing; // Audio buffer state: AUDIO_PLAYING
bool paused; // Audio buffer state: AUDIO_PAUSED bool paused; // Audio buffer state: AUDIO_PAUSED
@ -477,12 +480,12 @@ void InitAudioDevice(void)
config.playback.pDeviceID = NULL; // NULL for the default playback AUDIO.System.device config.playback.pDeviceID = NULL; // NULL for the default playback AUDIO.System.device
config.playback.format = AUDIO_DEVICE_FORMAT; config.playback.format = AUDIO_DEVICE_FORMAT;
config.playback.channels = AUDIO_DEVICE_CHANNELS; config.playback.channels = AUDIO_DEVICE_CHANNELS;
config.capture.pDeviceID = NULL; // NULL for the default capture AUDIO.System.device
config.capture.format = ma_format_s16;
config.capture.channels = 1;
config.sampleRate = AUDIO_DEVICE_SAMPLE_RATE; config.sampleRate = AUDIO_DEVICE_SAMPLE_RATE;
config.periodSizeInFrames = AUDIO_DEVICE_PERIOD_SIZE_IN_FRAMES;
config.dataCallback = OnSendAudioDataToDevice; config.dataCallback = OnSendAudioDataToDevice;
config.pUserData = NULL; config.pUserData = NULL;
config.noPreSilencedOutputBuffer = true; // raylib pre-silences the output buffer manually
config.noFixedSizedCallback = true; // raylib does not require fixed sized callback guarantees. This bypasses an internal intermediary buffer
result = ma_device_init(&AUDIO.System.context, &config, &AUDIO.System.device); result = ma_device_init(&AUDIO.System.context, &config, &AUDIO.System.device);
if (result != MA_SUCCESS) if (result != MA_SUCCESS)
@ -2374,6 +2377,9 @@ static void OnLog(void *pUserData, ma_uint32 level, const char *pMessage)
// Reads audio data from an AudioBuffer object in internal format // Reads audio data from an AudioBuffer object in internal format
static ma_uint32 ReadAudioBufferFramesInInternalFormat(AudioBuffer *audioBuffer, void *framesOut, ma_uint32 frameCount) static ma_uint32 ReadAudioBufferFramesInInternalFormat(AudioBuffer *audioBuffer, void *framesOut, ma_uint32 frameCount)
{ {
// Don't read anything if the sound is not playing
if (!audioBuffer->playing) return 0;
// Using audio buffer callback // Using audio buffer callback
if (audioBuffer->callback) if (audioBuffer->callback)
{ {
@ -2510,27 +2516,22 @@ static ma_uint32 ReadAudioBufferFramesInMixingFormat(AudioBuffer *audioBuffer, f
// When the guess is overestimated, that's when it gets more complicated. In this case, any overflow // When the guess is overestimated, that's when it gets more complicated. In this case, any overflow
// needs to be stored in a buffer for later processing by the next read. // needs to be stored in a buffer for later processing by the next read.
ma_uint32 estimatedInputFrameCount = (ma_uint32)(((float)audioBuffer->converter.resampler.sampleRateIn / audioBuffer->converter.resampler.sampleRateOut) * outputFramesToProcessThisIteration); ma_uint32 estimatedInputFrameCount = (ma_uint32)(((float)audioBuffer->converter.resampler.sampleRateIn / audioBuffer->converter.resampler.sampleRateOut) * outputFramesToProcessThisIteration);
if (estimatedInputFrameCount == 0) if (estimatedInputFrameCount == 0) estimatedInputFrameCount = 1; // Make sure at least one input frame is read.
{ if (estimatedInputFrameCount > inputBufferFrameCap) estimatedInputFrameCount = inputBufferFrameCap;
estimatedInputFrameCount = 1; // Make sure at least one input frame is read.
}
if (estimatedInputFrameCount > inputBufferFrameCap) ma_uint32 inputFramesInInternalFormatCount = ReadAudioBufferFramesInInternalFormat(audioBuffer, inputBuffer, estimatedInputFrameCount);
{
estimatedInputFrameCount = inputBufferFrameCap;
}
estimatedInputFrameCount = ReadAudioBufferFramesInInternalFormat(audioBuffer, inputBuffer, estimatedInputFrameCount); ma_uint64 inputFramesProcessedThisIteration = inputFramesInInternalFormatCount;
ma_uint64 inputFramesProcessedThisIteration = estimatedInputFrameCount;
ma_uint64 outputFramesProcessedThisIteration = outputFramesToProcessThisIteration; ma_uint64 outputFramesProcessedThisIteration = outputFramesToProcessThisIteration;
ma_data_converter_process_pcm_frames(&audioBuffer->converter, inputBuffer, &inputFramesProcessedThisIteration, runningFramesOut, &outputFramesProcessedThisIteration); ma_data_converter_process_pcm_frames(&audioBuffer->converter, inputBuffer, &inputFramesProcessedThisIteration, runningFramesOut, &outputFramesProcessedThisIteration);
if (estimatedInputFrameCount > inputFramesProcessedThisIteration) totalOutputFramesProcessed += (ma_uint32)outputFramesProcessedThisIteration;
if (inputFramesInInternalFormatCount > inputFramesProcessedThisIteration)
{ {
// Getting here means the estimated input frame count was overestimated. The residual needs // Getting here means the estimated input frame count was overestimated. The residual needs
// be stored for later use. // be stored for later use.
ma_uint64 residualFrameCount = estimatedInputFrameCount - inputFramesProcessedThisIteration; ma_uint64 residualFrameCount = inputFramesInInternalFormatCount - inputFramesProcessedThisIteration;
// A safety check to make sure the capacity of the residual cache is not exceeded. // A safety check to make sure the capacity of the residual cache is not exceeded.
if (residualFrameCount > AUDIO_BUFFER_RESIDUAL_CAPACITY) if (residualFrameCount > AUDIO_BUFFER_RESIDUAL_CAPACITY)
@ -2542,7 +2543,7 @@ static ma_uint32 ReadAudioBufferFramesInMixingFormat(AudioBuffer *audioBuffer, f
audioBuffer->converterResidualCount = residualFrameCount; audioBuffer->converterResidualCount = residualFrameCount;
} }
totalOutputFramesProcessed += (ma_uint32)outputFramesProcessedThisIteration; if (inputFramesInInternalFormatCount < estimatedInputFrameCount) break; // Reached the end of the sound
} }
} }