renamed everything so it is obvious what it does
This commit is contained in:
parent
86fbf4fd8f
commit
76ff4d220e
3 changed files with 190 additions and 220 deletions
380
src/audio.c
380
src/audio.c
|
@ -77,10 +77,10 @@
|
||||||
// Types and Structures Definition
|
// Types and Structures Definition
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be
|
// Used to create custom audio streams that are not bound to a specific file. There can be
|
||||||
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to
|
// no more than 4 concurrent mixchannels in use. This is due to each active mixc being tied to
|
||||||
// a dedicated mix channel. All audio is 32bit floating point in stereo.
|
// a dedicated mix channel.
|
||||||
typedef struct AudioContext_t {
|
typedef struct MixChannel_t {
|
||||||
unsigned short sampleRate; // default is 48000
|
unsigned short sampleRate; // default is 48000
|
||||||
unsigned char channels; // 1=mono,2=stereo
|
unsigned char channels; // 1=mono,2=stereo
|
||||||
unsigned char mixChannel; // 0-3 or mixA-mixD, each mix channel can receive up to one dedicated audio stream
|
unsigned char mixChannel; // 0-3 or mixA-mixD, each mix channel can receive up to one dedicated audio stream
|
||||||
|
@ -89,14 +89,14 @@ typedef struct AudioContext_t {
|
||||||
ALenum alFormat; // openAL format specifier
|
ALenum alFormat; // openAL format specifier
|
||||||
ALuint alSource; // openAL source
|
ALuint alSource; // openAL source
|
||||||
ALuint alBuffer[MAX_STREAM_BUFFERS]; // openAL sample buffer
|
ALuint alBuffer[MAX_STREAM_BUFFERS]; // openAL sample buffer
|
||||||
} AudioContext_t;
|
} MixChannel_t;
|
||||||
|
|
||||||
// Music type (file streaming from memory)
|
// Music type (file streaming from memory)
|
||||||
// NOTE: Anything longer than ~10 seconds should be streamed...
|
// NOTE: Anything longer than ~10 seconds should be streamed into a mix channel...
|
||||||
typedef struct Music {
|
typedef struct Music {
|
||||||
stb_vorbis *stream;
|
stb_vorbis *stream;
|
||||||
jar_xm_context_t *chipctx; // Stores jar_xm context
|
jar_xm_context_t *chipctx; // Stores jar_xm mixc
|
||||||
AudioContext_t *ctx; // audio context
|
MixChannel_t *mixc; // mix channel
|
||||||
|
|
||||||
int totalSamplesLeft;
|
int totalSamplesLeft;
|
||||||
float totalLengthSeconds;
|
float totalLengthSeconds;
|
||||||
|
@ -111,9 +111,9 @@ typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType;
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
// Global Variables Definition
|
// Global Variables Definition
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
static AudioContext_t* mixChannelsActive_g[MAX_AUDIO_CONTEXTS]; // What mix channels are currently active
|
static MixChannel_t* mixChannelsActive_g[MAX_AUDIO_CONTEXTS]; // What mix channels are currently active
|
||||||
static bool musicEnabled_g = false;
|
static bool musicEnabled_g = false;
|
||||||
static Music currentMusic[MAX_MUSIC_STREAMS]; // Current music loaded, up to two can play at the same time
|
static Music currentMusic[MAX_MUSIC_STREAMS]; // Current music loaded, up to two can play at the same time
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
// Module specific Functions Declaration
|
// Module specific Functions Declaration
|
||||||
|
@ -122,13 +122,17 @@ static Wave LoadWAV(const char *fileName); // Load WAV file
|
||||||
static Wave LoadOGG(char *fileName); // Load OGG file
|
static Wave LoadOGG(char *fileName); // Load OGG file
|
||||||
static void UnloadWave(Wave wave); // Unload wave data
|
static void UnloadWave(Wave wave); // Unload wave data
|
||||||
|
|
||||||
static bool BufferMusicStream(int index); // Fill music buffers with data
|
static bool BufferMusicStream(int index, int numBuffers); // Fill music buffers with data
|
||||||
static void EmptyMusicStream(int index); // Empty music buffers
|
static void EmptyMusicStream(int index); // Empty music buffers
|
||||||
|
|
||||||
static unsigned short FillAlBufferWithSilence(AudioContext_t *context, ALuint buffer);// fill buffer with zeros, returns number processed
|
|
||||||
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // pass two arrays of the same legnth in
|
static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint); // For streaming into mix channels.
|
||||||
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // pass two arrays of same length in
|
static void CloseMixChannel(MixChannel_t* mixc); // Frees mix channel
|
||||||
static bool isMusicStreamReady(int index); // Checks if music buffer is ready to be refilled
|
static unsigned short BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements); // Pushes more audio data into mixc mix channel, if NULL is passed it pauses
|
||||||
|
static unsigned short FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer); // Fill buffer with zeros, returns number processed
|
||||||
|
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // Pass two arrays of the same legnth in
|
||||||
|
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // Pass two arrays of same length in
|
||||||
|
static int IsMusicStreamReadyForBuffering(int index); // Checks if music buffer is ready to be refilled
|
||||||
|
|
||||||
#if defined(AUDIO_STANDALONE)
|
#if defined(AUDIO_STANDALONE)
|
||||||
const char *GetExtension(const char *fileName); // Get the extension for a filename
|
const char *GetExtension(const char *fileName); // Get the extension for a filename
|
||||||
|
@ -139,7 +143,7 @@ void TraceLog(int msgType, const char *text, ...); // Outputs a trace log messa
|
||||||
// Module Functions Definition - Audio Device initialization and Closing
|
// Module Functions Definition - Audio Device initialization and Closing
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Initialize audio device and context
|
// Initialize audio device and mixc
|
||||||
void InitAudioDevice(void)
|
void InitAudioDevice(void)
|
||||||
{
|
{
|
||||||
// Open and initialize a device with default settings
|
// Open and initialize a device with default settings
|
||||||
|
@ -155,7 +159,7 @@ void InitAudioDevice(void)
|
||||||
|
|
||||||
alcCloseDevice(device);
|
alcCloseDevice(device);
|
||||||
|
|
||||||
TraceLog(ERROR, "Could not setup audio context");
|
TraceLog(ERROR, "Could not setup mix channel");
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER));
|
TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER));
|
||||||
|
@ -171,14 +175,14 @@ void CloseAudioDevice(void)
|
||||||
{
|
{
|
||||||
for(int index=0; index<MAX_MUSIC_STREAMS; index++)
|
for(int index=0; index<MAX_MUSIC_STREAMS; index++)
|
||||||
{
|
{
|
||||||
if(currentMusic[index].ctx) StopMusicStream(index); // Stop music streaming and close current stream
|
if(currentMusic[index].mixc) StopMusicStream(index); // Stop music streaming and close current stream
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ALCdevice *device;
|
ALCdevice *device;
|
||||||
ALCcontext *context = alcGetCurrentContext();
|
ALCcontext *context = alcGetCurrentContext();
|
||||||
|
|
||||||
if (context == NULL) TraceLog(WARNING, "Could not get current audio context for closing");
|
if (context == NULL) TraceLog(WARNING, "Could not get current mix channel for closing");
|
||||||
|
|
||||||
device = alcGetContextsDevice(context);
|
device = alcGetContextsDevice(context);
|
||||||
|
|
||||||
|
@ -203,186 +207,141 @@ bool IsAudioDeviceReady(void)
|
||||||
// Module Functions Definition - Custom audio output
|
// Module Functions Definition - Custom audio output
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing
|
// For streaming into mix channels.
|
||||||
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
|
// The mixChannel is what audio muxing channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
|
||||||
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
|
// exmple usage is InitMixChannel(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
|
||||||
AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint)
|
static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint)
|
||||||
{
|
{
|
||||||
if(mixChannel >= MAX_AUDIO_CONTEXTS) return NULL;
|
if(mixChannel >= MAX_AUDIO_CONTEXTS) return NULL;
|
||||||
if(!IsAudioDeviceReady()) InitAudioDevice();
|
if(!IsAudioDeviceReady()) InitAudioDevice();
|
||||||
|
|
||||||
if(!mixChannelsActive_g[mixChannel]){
|
if(!mixChannelsActive_g[mixChannel]){
|
||||||
AudioContext_t *ac = (AudioContext_t*)malloc(sizeof(AudioContext_t));
|
MixChannel_t *mixc = (MixChannel_t*)malloc(sizeof(MixChannel_t));
|
||||||
ac->sampleRate = sampleRate;
|
mixc->sampleRate = sampleRate;
|
||||||
ac->channels = channels;
|
mixc->channels = channels;
|
||||||
ac->mixChannel = mixChannel;
|
mixc->mixChannel = mixChannel;
|
||||||
ac->floatingPoint = floatingPoint;
|
mixc->floatingPoint = floatingPoint;
|
||||||
mixChannelsActive_g[mixChannel] = ac;
|
mixChannelsActive_g[mixChannel] = mixc;
|
||||||
|
|
||||||
// setup openAL format
|
// setup openAL format
|
||||||
if(channels == 1)
|
if(channels == 1)
|
||||||
{
|
{
|
||||||
if(floatingPoint)
|
if(floatingPoint)
|
||||||
ac->alFormat = AL_FORMAT_MONO_FLOAT32;
|
mixc->alFormat = AL_FORMAT_MONO_FLOAT32;
|
||||||
else
|
else
|
||||||
ac->alFormat = AL_FORMAT_MONO16;
|
mixc->alFormat = AL_FORMAT_MONO16;
|
||||||
}
|
}
|
||||||
else if(channels == 2)
|
else if(channels == 2)
|
||||||
{
|
{
|
||||||
if(floatingPoint)
|
if(floatingPoint)
|
||||||
ac->alFormat = AL_FORMAT_STEREO_FLOAT32;
|
mixc->alFormat = AL_FORMAT_STEREO_FLOAT32;
|
||||||
else
|
else
|
||||||
ac->alFormat = AL_FORMAT_STEREO16;
|
mixc->alFormat = AL_FORMAT_STEREO16;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an audio source
|
// Create an audio source
|
||||||
alGenSources(1, &ac->alSource);
|
alGenSources(1, &mixc->alSource);
|
||||||
alSourcef(ac->alSource, AL_PITCH, 1);
|
alSourcef(mixc->alSource, AL_PITCH, 1);
|
||||||
alSourcef(ac->alSource, AL_GAIN, 1);
|
alSourcef(mixc->alSource, AL_GAIN, 1);
|
||||||
alSource3f(ac->alSource, AL_POSITION, 0, 0, 0);
|
alSource3f(mixc->alSource, AL_POSITION, 0, 0, 0);
|
||||||
alSource3f(ac->alSource, AL_VELOCITY, 0, 0, 0);
|
alSource3f(mixc->alSource, AL_VELOCITY, 0, 0, 0);
|
||||||
|
|
||||||
// Create Buffer
|
// Create Buffer
|
||||||
alGenBuffers(MAX_STREAM_BUFFERS, ac->alBuffer);
|
alGenBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer);
|
||||||
|
|
||||||
//fill buffers
|
//fill buffers
|
||||||
int x;
|
int x;
|
||||||
for(x=0;x<MAX_STREAM_BUFFERS;x++)
|
for(x=0;x<MAX_STREAM_BUFFERS;x++)
|
||||||
FillAlBufferWithSilence(ac, ac->alBuffer[x]);
|
FillAlBufferWithSilence(mixc, mixc->alBuffer[x]);
|
||||||
|
|
||||||
alSourceQueueBuffers(ac->alSource, MAX_STREAM_BUFFERS, ac->alBuffer);
|
alSourceQueueBuffers(mixc->alSource, MAX_STREAM_BUFFERS, mixc->alBuffer);
|
||||||
alSourcePlay(ac->alSource);
|
mixc->playing = true;
|
||||||
ac->playing = true;
|
alSourcePlay(mixc->alSource);
|
||||||
|
|
||||||
return ac;
|
return mixc;
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Frees buffer in audio context
|
// Frees buffer in mix channel
|
||||||
void CloseAudioContext(AudioContext ctx)
|
static void CloseMixChannel(MixChannel_t* mixc)
|
||||||
{
|
{
|
||||||
AudioContext_t *context = (AudioContext_t*)ctx;
|
if(mixc){
|
||||||
if(context){
|
alSourceStop(mixc->alSource);
|
||||||
alSourceStop(context->alSource);
|
mixc->playing = false;
|
||||||
context->playing = false;
|
|
||||||
|
|
||||||
//flush out all queued buffers
|
//flush out all queued buffers
|
||||||
ALuint buffer = 0;
|
ALuint buffer = 0;
|
||||||
int queued = 0;
|
int queued = 0;
|
||||||
alGetSourcei(context->alSource, AL_BUFFERS_QUEUED, &queued);
|
alGetSourcei(mixc->alSource, AL_BUFFERS_QUEUED, &queued);
|
||||||
while (queued > 0)
|
while (queued > 0)
|
||||||
{
|
{
|
||||||
alSourceUnqueueBuffers(context->alSource, 1, &buffer);
|
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer);
|
||||||
queued--;
|
queued--;
|
||||||
}
|
}
|
||||||
|
|
||||||
//delete source and buffers
|
//delete source and buffers
|
||||||
alDeleteSources(1, &context->alSource);
|
alDeleteSources(1, &mixc->alSource);
|
||||||
alDeleteBuffers(MAX_STREAM_BUFFERS, context->alBuffer);
|
alDeleteBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer);
|
||||||
mixChannelsActive_g[context->mixChannel] = NULL;
|
mixChannelsActive_g[mixc->mixChannel] = NULL;
|
||||||
free(context);
|
free(mixc);
|
||||||
ctx = NULL;
|
mixc = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pushes more audio data into context mix channel, if none are ever pushed then zeros are fed in.
|
// Pushes more audio data into mixc mix channel, only one buffer per call
|
||||||
// Call "UpdateAudioContext(ctx, NULL, 0)" if you want to pause the audio.
|
// Call "BufferMixChannel(mixc, NULL, 0)" if you want to pause the audio.
|
||||||
// @Returns number of samples that where processed.
|
// @Returns number of samples that where processed.
|
||||||
unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements)
|
static unsigned short BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements)
|
||||||
{
|
{
|
||||||
AudioContext_t *context = (AudioContext_t*)ctx;
|
if(!mixc || mixChannelsActive_g[mixc->mixChannel] != mixc) return 0; // when there is two channels there must be an even number of samples
|
||||||
|
|
||||||
if(!context || (context->channels == 2 && numberElements % 2 != 0)) return 0; // when there is two channels there must be an even number of samples
|
|
||||||
|
|
||||||
if (!data || !numberElements)
|
if (!data || !numberElements)
|
||||||
{ // pauses audio until data is given
|
{ // pauses audio until data is given
|
||||||
alSourcePause(context->alSource);
|
if(mixc->playing){
|
||||||
context->playing = false;
|
alSourcePause(mixc->alSource);
|
||||||
|
mixc->playing = false;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
else
|
else if(!mixc->playing)
|
||||||
{ // restart audio otherwise
|
{ // restart audio otherwise
|
||||||
ALint state;
|
alSourcePlay(mixc->alSource);
|
||||||
alGetSourcei(context->alSource, AL_SOURCE_STATE, &state);
|
mixc->playing = true;
|
||||||
if (state != AL_PLAYING){
|
|
||||||
alSourcePlay(context->alSource);
|
|
||||||
context->playing = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (context && context->playing && mixChannelsActive_g[context->mixChannel] == context)
|
|
||||||
|
ALuint buffer = 0;
|
||||||
|
|
||||||
|
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer);
|
||||||
|
if(!buffer) return 0;
|
||||||
|
if(mixc->floatingPoint) // process float buffers
|
||||||
{
|
{
|
||||||
ALint processed = 0;
|
float *ptr = (float*)data;
|
||||||
ALuint buffer = 0;
|
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(float), mixc->sampleRate);
|
||||||
unsigned short numberProcessed = 0;
|
|
||||||
unsigned short numberRemaining = numberElements;
|
|
||||||
|
|
||||||
|
|
||||||
alGetSourcei(context->alSource, AL_BUFFERS_PROCESSED, &processed); // Get the number of already processed buffers (if any)
|
|
||||||
if(!processed) return 0; // nothing to process, queue is still full
|
|
||||||
|
|
||||||
|
|
||||||
while (processed > 0)
|
|
||||||
{
|
|
||||||
if(context->floatingPoint) // process float buffers
|
|
||||||
{
|
|
||||||
float *ptr = (float*)data;
|
|
||||||
alSourceUnqueueBuffers(context->alSource, 1, &buffer);
|
|
||||||
if(numberRemaining >= MUSIC_BUFFER_SIZE_FLOAT)
|
|
||||||
{
|
|
||||||
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), context->sampleRate);
|
|
||||||
numberProcessed+=MUSIC_BUFFER_SIZE_FLOAT;
|
|
||||||
numberRemaining-=MUSIC_BUFFER_SIZE_FLOAT;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], numberRemaining*sizeof(float), context->sampleRate);
|
|
||||||
numberProcessed+=numberRemaining;
|
|
||||||
numberRemaining=0;
|
|
||||||
}
|
|
||||||
alSourceQueueBuffers(context->alSource, 1, &buffer);
|
|
||||||
processed--;
|
|
||||||
}
|
|
||||||
else if(!context->floatingPoint) // process short buffers
|
|
||||||
{
|
|
||||||
short *ptr = (short*)data;
|
|
||||||
alSourceUnqueueBuffers(context->alSource, 1, &buffer);
|
|
||||||
if(numberRemaining >= MUSIC_BUFFER_SIZE_SHORT)
|
|
||||||
{
|
|
||||||
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], MUSIC_BUFFER_SIZE_FLOAT*sizeof(short), context->sampleRate);
|
|
||||||
numberProcessed+=MUSIC_BUFFER_SIZE_SHORT;
|
|
||||||
numberRemaining-=MUSIC_BUFFER_SIZE_SHORT;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], numberRemaining*sizeof(short), context->sampleRate);
|
|
||||||
numberProcessed+=numberRemaining;
|
|
||||||
numberRemaining=0;
|
|
||||||
}
|
|
||||||
alSourceQueueBuffers(context->alSource, 1, &buffer);
|
|
||||||
processed--;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return numberProcessed;
|
|
||||||
}
|
}
|
||||||
return 0;
|
else // process short buffers
|
||||||
|
{
|
||||||
|
short *ptr = (short*)data;
|
||||||
|
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(short), mixc->sampleRate);
|
||||||
|
}
|
||||||
|
alSourceQueueBuffers(mixc->alSource, 1, &buffer);
|
||||||
|
|
||||||
|
return numberElements;
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill buffer with zeros, returns number processed
|
// fill buffer with zeros, returns number processed
|
||||||
static unsigned short FillAlBufferWithSilence(AudioContext_t *context, ALuint buffer)
|
static unsigned short FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer)
|
||||||
{
|
{
|
||||||
if(context->floatingPoint){
|
if(mixc->floatingPoint){
|
||||||
float pcm[MUSIC_BUFFER_SIZE_FLOAT] = {0.f};
|
float pcm[MUSIC_BUFFER_SIZE_FLOAT] = {0.f};
|
||||||
alBufferData(buffer, context->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), context->sampleRate);
|
alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), mixc->sampleRate);
|
||||||
return MUSIC_BUFFER_SIZE_FLOAT;
|
return MUSIC_BUFFER_SIZE_FLOAT;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
short pcm[MUSIC_BUFFER_SIZE_SHORT] = {0};
|
short pcm[MUSIC_BUFFER_SIZE_SHORT] = {0};
|
||||||
alBufferData(buffer, context->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), context->sampleRate);
|
alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), mixc->sampleRate);
|
||||||
return MUSIC_BUFFER_SIZE_SHORT;
|
return MUSIC_BUFFER_SIZE_SHORT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -417,6 +376,28 @@ static void ResampleByteToFloat(char *chars, float *floats, unsigned short len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// used to output raw audio streams, returns negative numbers on error
|
||||||
|
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint)
|
||||||
|
{
|
||||||
|
int mixIndex;
|
||||||
|
for(mixIndex = 0; mixIndex < MAX_AUDIO_CONTEXTS; mixIndex++) // find empty mix channel slot
|
||||||
|
{
|
||||||
|
if(mixChannelsActive_g[mixIndex] == NULL) break;
|
||||||
|
else if(mixIndex = MAX_AUDIO_CONTEXTS - 1) return -1; // error
|
||||||
|
}
|
||||||
|
|
||||||
|
if(InitMixChannel(sampleRate, mixIndex, channels, floatingPoint))
|
||||||
|
return mixIndex;
|
||||||
|
else
|
||||||
|
return -2; // error
|
||||||
|
}
|
||||||
|
|
||||||
|
void CloseRawAudioContext(RawAudioContext ctx)
|
||||||
|
{
|
||||||
|
if(mixChannelsActive_g[ctx])
|
||||||
|
CloseMixChannel(mixChannelsActive_g[ctx]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
|
@ -807,14 +788,14 @@ int PlayMusicStream(int musicIndex, char *fileName)
|
||||||
currentMusic[musicIndex].totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[musicIndex].stream);
|
currentMusic[musicIndex].totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[musicIndex].stream);
|
||||||
|
|
||||||
if (info.channels == 2){
|
if (info.channels == 2){
|
||||||
currentMusic[musicIndex].ctx = InitAudioContext(info.sample_rate, mixIndex, 2, false);
|
currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 2, false);
|
||||||
currentMusic[musicIndex].ctx->playing = true;
|
currentMusic[musicIndex].mixc->playing = true;
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
currentMusic[musicIndex].ctx = InitAudioContext(info.sample_rate, mixIndex, 1, false);
|
currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 1, false);
|
||||||
currentMusic[musicIndex].ctx->playing = true;
|
currentMusic[musicIndex].mixc->playing = true;
|
||||||
}
|
}
|
||||||
if(!currentMusic[musicIndex].ctx) return 4; // error
|
if(!currentMusic[musicIndex].mixc) return 4; // error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (strcmp(GetExtension(fileName),"xm") == 0)
|
else if (strcmp(GetExtension(fileName),"xm") == 0)
|
||||||
|
@ -832,9 +813,9 @@ int PlayMusicStream(int musicIndex, char *fileName)
|
||||||
TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic[musicIndex].totalSamplesLeft);
|
TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic[musicIndex].totalSamplesLeft);
|
||||||
TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic[musicIndex].totalLengthSeconds);
|
TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic[musicIndex].totalLengthSeconds);
|
||||||
|
|
||||||
currentMusic[musicIndex].ctx = InitAudioContext(48000, mixIndex, 2, true);
|
currentMusic[musicIndex].mixc = InitMixChannel(48000, mixIndex, 2, false);
|
||||||
if(!currentMusic[musicIndex].ctx) return 5; // error
|
if(!currentMusic[musicIndex].mixc) return 5; // error
|
||||||
currentMusic[musicIndex].ctx->playing = true;
|
currentMusic[musicIndex].mixc->playing = true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -853,9 +834,9 @@ int PlayMusicStream(int musicIndex, char *fileName)
|
||||||
// Stop music playing for individual music index of currentMusic array (close stream)
|
// Stop music playing for individual music index of currentMusic array (close stream)
|
||||||
void StopMusicStream(int index)
|
void StopMusicStream(int index)
|
||||||
{
|
{
|
||||||
if (index < MAX_MUSIC_STREAMS && currentMusic[index].ctx)
|
if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc)
|
||||||
{
|
{
|
||||||
CloseAudioContext(currentMusic[index].ctx);
|
CloseMixChannel(currentMusic[index].mixc);
|
||||||
|
|
||||||
if (currentMusic[index].chipTune)
|
if (currentMusic[index].chipTune)
|
||||||
{
|
{
|
||||||
|
@ -889,11 +870,11 @@ int getMusicStreamCount(void)
|
||||||
void PauseMusicStream(int index)
|
void PauseMusicStream(int index)
|
||||||
{
|
{
|
||||||
// Pause music stream if music available!
|
// Pause music stream if music available!
|
||||||
if (index < MAX_MUSIC_STREAMS && currentMusic[index].ctx && musicEnabled_g)
|
if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc && musicEnabled_g)
|
||||||
{
|
{
|
||||||
TraceLog(INFO, "Pausing music stream");
|
TraceLog(INFO, "Pausing music stream");
|
||||||
alSourcePause(currentMusic[index].ctx->alSource);
|
alSourcePause(currentMusic[index].mixc->alSource);
|
||||||
currentMusic[index].ctx->playing = false;
|
currentMusic[index].mixc->playing = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -902,13 +883,13 @@ void ResumeMusicStream(int index)
|
||||||
{
|
{
|
||||||
// Resume music playing... if music available!
|
// Resume music playing... if music available!
|
||||||
ALenum state;
|
ALenum state;
|
||||||
if(index < MAX_MUSIC_STREAMS && currentMusic[index].ctx){
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
|
||||||
alGetSourcei(currentMusic[index].ctx->alSource, AL_SOURCE_STATE, &state);
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
|
||||||
if (state == AL_PAUSED)
|
if (state == AL_PAUSED)
|
||||||
{
|
{
|
||||||
TraceLog(INFO, "Resuming music stream");
|
TraceLog(INFO, "Resuming music stream");
|
||||||
alSourcePlay(currentMusic[index].ctx->alSource);
|
alSourcePlay(currentMusic[index].mixc->alSource);
|
||||||
currentMusic[index].ctx->playing = true;
|
currentMusic[index].mixc->playing = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -919,8 +900,8 @@ bool IsMusicPlaying(int index)
|
||||||
bool playing = false;
|
bool playing = false;
|
||||||
ALint state;
|
ALint state;
|
||||||
|
|
||||||
if(index < MAX_MUSIC_STREAMS && currentMusic[index].ctx){
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
|
||||||
alGetSourcei(currentMusic[index].ctx->alSource, AL_SOURCE_STATE, &state);
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
|
||||||
if (state == AL_PLAYING) playing = true;
|
if (state == AL_PLAYING) playing = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -930,15 +911,15 @@ bool IsMusicPlaying(int index)
|
||||||
// Set volume for music
|
// Set volume for music
|
||||||
void SetMusicVolume(int index, float volume)
|
void SetMusicVolume(int index, float volume)
|
||||||
{
|
{
|
||||||
if(index < MAX_MUSIC_STREAMS && currentMusic[index].ctx){
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
|
||||||
alSourcef(currentMusic[index].ctx->alSource, AL_GAIN, volume);
|
alSourcef(currentMusic[index].mixc->alSource, AL_GAIN, volume);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetMusicPitch(int index, float pitch)
|
void SetMusicPitch(int index, float pitch)
|
||||||
{
|
{
|
||||||
if(index < MAX_MUSIC_STREAMS && currentMusic[index].ctx){
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
|
||||||
alSourcef(currentMusic[index].ctx->alSource, AL_PITCH, pitch);
|
alSourcef(currentMusic[index].mixc->alSource, AL_PITCH, pitch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -962,19 +943,19 @@ float GetMusicTimeLength(int index)
|
||||||
float GetMusicTimePlayed(int index)
|
float GetMusicTimePlayed(int index)
|
||||||
{
|
{
|
||||||
float secondsPlayed;
|
float secondsPlayed;
|
||||||
if(index < MAX_MUSIC_STREAMS && currentMusic[index].ctx)
|
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc)
|
||||||
{
|
{
|
||||||
if (currentMusic[index].chipTune)
|
if (currentMusic[index].chipTune)
|
||||||
{
|
{
|
||||||
uint64_t samples;
|
uint64_t samples;
|
||||||
jar_xm_get_position(currentMusic[index].chipctx, NULL, NULL, NULL, &samples);
|
jar_xm_get_position(currentMusic[index].chipctx, NULL, NULL, NULL, &samples);
|
||||||
secondsPlayed = (float)samples / (48000 * currentMusic[index].ctx->channels); // Not sure if this is the correct value
|
secondsPlayed = (float)samples / (48000 * currentMusic[index].mixc->channels); // Not sure if this is the correct value
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].ctx->channels;
|
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels;
|
||||||
int samplesPlayed = totalSamples - currentMusic[index].totalSamplesLeft;
|
int samplesPlayed = totalSamples - currentMusic[index].totalSamplesLeft;
|
||||||
secondsPlayed = (float)samplesPlayed / (currentMusic[index].ctx->sampleRate * currentMusic[index].ctx->channels);
|
secondsPlayed = (float)samplesPlayed / (currentMusic[index].mixc->sampleRate * currentMusic[index].mixc->channels);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -987,32 +968,32 @@ float GetMusicTimePlayed(int index)
|
||||||
//----------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------
|
||||||
|
|
||||||
// Fill music buffers with new data from music stream
|
// Fill music buffers with new data from music stream
|
||||||
static bool BufferMusicStream(int index)
|
static bool BufferMusicStream(int index, int numBuffers)
|
||||||
{
|
{
|
||||||
short pcm[MUSIC_BUFFER_SIZE_SHORT];
|
short pcm[MUSIC_BUFFER_SIZE_SHORT];
|
||||||
float pcmf[MUSIC_BUFFER_SIZE_FLOAT];
|
float pcmf[MUSIC_BUFFER_SIZE_FLOAT];
|
||||||
|
|
||||||
int size = 0; // Total size of data steamed in L+R samples
|
int size = 0; // Total size of data steamed in L+R samples for xm floats, individual L or R for ogg shorts
|
||||||
bool active = true; // We can get more data from stream (not finished)
|
bool active = true; // We can get more data from stream (not finished)
|
||||||
|
|
||||||
|
|
||||||
if (!currentMusic[index].ctx->playing && currentMusic[index].totalSamplesLeft > 0)
|
|
||||||
{
|
|
||||||
UpdateAudioContext(currentMusic[index].ctx, NULL, 0);
|
|
||||||
return true; // it is still active but it is paused
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if (currentMusic[index].chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes.
|
if (currentMusic[index].chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes.
|
||||||
{
|
{
|
||||||
if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_FLOAT / 2)
|
if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT)
|
||||||
size = MUSIC_BUFFER_SIZE_FLOAT / 2;
|
size = MUSIC_BUFFER_SIZE_SHORT / 2;
|
||||||
else
|
else
|
||||||
size = currentMusic[index].totalSamplesLeft / 2;
|
size = currentMusic[index].totalSamplesLeft / 2;
|
||||||
|
|
||||||
jar_xm_generate_samples(currentMusic[index].chipctx, pcmf, size); // reads 2*readlen shorts and moves them to buffer+size memory location
|
for(int x=0; x<numBuffers; x++)
|
||||||
UpdateAudioContext(currentMusic[index].ctx, pcmf, size * 2);
|
{
|
||||||
currentMusic[index].totalSamplesLeft -= size * 2;
|
jar_xm_generate_samples_16bit(currentMusic[index].chipctx, pcm, size); // reads 2*readlen shorts and moves them to buffer+size memory location
|
||||||
|
BufferMixChannel(currentMusic[index].mixc, pcm, size * 2);
|
||||||
|
currentMusic[index].totalSamplesLeft -= size * 2;
|
||||||
|
if(currentMusic[index].totalSamplesLeft <= 0)
|
||||||
|
{
|
||||||
|
active = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1021,13 +1002,18 @@ static bool BufferMusicStream(int index)
|
||||||
else
|
else
|
||||||
size = currentMusic[index].totalSamplesLeft;
|
size = currentMusic[index].totalSamplesLeft;
|
||||||
|
|
||||||
int streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic[index].stream, currentMusic[index].ctx->channels, pcm, size);
|
for(int x=0; x<numBuffers; x++)
|
||||||
UpdateAudioContext(currentMusic[index].ctx, pcm, streamedBytes * currentMusic[index].ctx->channels);
|
{
|
||||||
currentMusic[index].totalSamplesLeft -= streamedBytes * currentMusic[index].ctx->channels;
|
int streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic[index].stream, currentMusic[index].mixc->channels, pcm, size);
|
||||||
|
BufferMixChannel(currentMusic[index].mixc, pcm, streamedBytes * currentMusic[index].mixc->channels);
|
||||||
|
currentMusic[index].totalSamplesLeft -= streamedBytes * currentMusic[index].mixc->channels;
|
||||||
|
if(currentMusic[index].totalSamplesLeft <= 0)
|
||||||
|
{
|
||||||
|
active = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceLog(DEBUG, "Buffering index:%i, chiptune:%i", index, (int)currentMusic[index].chipTune);
|
|
||||||
if(currentMusic[index].totalSamplesLeft <= 0) active = false;
|
|
||||||
|
|
||||||
return active;
|
return active;
|
||||||
}
|
}
|
||||||
|
@ -1038,25 +1024,22 @@ static void EmptyMusicStream(int index)
|
||||||
ALuint buffer = 0;
|
ALuint buffer = 0;
|
||||||
int queued = 0;
|
int queued = 0;
|
||||||
|
|
||||||
alGetSourcei(currentMusic[index].ctx->alSource, AL_BUFFERS_QUEUED, &queued);
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_QUEUED, &queued);
|
||||||
|
|
||||||
while (queued > 0)
|
while (queued > 0)
|
||||||
{
|
{
|
||||||
alSourceUnqueueBuffers(currentMusic[index].ctx->alSource, 1, &buffer);
|
alSourceUnqueueBuffers(currentMusic[index].mixc->alSource, 1, &buffer);
|
||||||
|
|
||||||
queued--;
|
queued--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//determine if a music stream is ready to be written to
|
//determine if a music stream is ready to be written to
|
||||||
static bool isMusicStreamReady(int index)
|
static int IsMusicStreamReadyForBuffering(int index)
|
||||||
{
|
{
|
||||||
ALint processed = 0;
|
ALint processed = 0;
|
||||||
alGetSourcei(currentMusic[index].ctx->alSource, AL_BUFFERS_PROCESSED, &processed);
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_PROCESSED, &processed);
|
||||||
|
return processed;
|
||||||
if(processed) return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update (re-fill) music buffers if data already processed
|
// Update (re-fill) music buffers if data already processed
|
||||||
|
@ -1064,21 +1047,22 @@ void UpdateMusicStream(int index)
|
||||||
{
|
{
|
||||||
ALenum state;
|
ALenum state;
|
||||||
bool active = true;
|
bool active = true;
|
||||||
|
int numBuffers = IsMusicStreamReadyForBuffering(index);
|
||||||
if (index < MAX_MUSIC_STREAMS && musicEnabled_g && currentMusic[index].ctx && isMusicStreamReady(index))
|
|
||||||
|
if (currentMusic[index].mixc->playing && index < MAX_MUSIC_STREAMS && musicEnabled_g && currentMusic[index].mixc && numBuffers)
|
||||||
{
|
{
|
||||||
active = BufferMusicStream(index);
|
active = BufferMusicStream(index, numBuffers);
|
||||||
|
|
||||||
if (!active && currentMusic[index].loop && currentMusic[index].ctx->playing)
|
if (!active && currentMusic[index].loop)
|
||||||
{
|
{
|
||||||
if (currentMusic[index].chipTune)
|
if (currentMusic[index].chipTune)
|
||||||
{
|
{
|
||||||
currentMusic[index].totalSamplesLeft = currentMusic[index].totalLengthSeconds * currentMusic[index].ctx->sampleRate;
|
currentMusic[index].totalSamplesLeft = currentMusic[index].totalLengthSeconds * 48000;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
stb_vorbis_seek_start(currentMusic[index].stream);
|
stb_vorbis_seek_start(currentMusic[index].stream);
|
||||||
currentMusic[index].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].ctx->channels;
|
currentMusic[index].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels;
|
||||||
}
|
}
|
||||||
active = true;
|
active = true;
|
||||||
}
|
}
|
||||||
|
@ -1086,9 +1070,9 @@ void UpdateMusicStream(int index)
|
||||||
|
|
||||||
if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data...");
|
if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data...");
|
||||||
|
|
||||||
alGetSourcei(currentMusic[index].ctx->alSource, AL_SOURCE_STATE, &state);
|
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
|
||||||
|
|
||||||
if (state != AL_PLAYING && active && currentMusic[index].ctx->playing) alSourcePlay(currentMusic[index].ctx->alSource);
|
if (state != AL_PLAYING && active) alSourcePlay(currentMusic[index].mixc->alSource);
|
||||||
|
|
||||||
if (!active) StopMusicStream(index);
|
if (!active) StopMusicStream(index);
|
||||||
|
|
||||||
|
|
15
src/audio.h
15
src/audio.h
|
@ -61,10 +61,7 @@ typedef struct Wave {
|
||||||
short channels;
|
short channels;
|
||||||
} Wave;
|
} Wave;
|
||||||
|
|
||||||
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be
|
typedef int RawAudioContext;
|
||||||
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to
|
|
||||||
// a dedicated mix channel.
|
|
||||||
typedef void* AudioContext;
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" { // Prevents name mangling of functions
|
extern "C" { // Prevents name mangling of functions
|
||||||
|
@ -82,13 +79,6 @@ void InitAudioDevice(void); // Initialize au
|
||||||
void CloseAudioDevice(void); // Close the audio device and context (and music stream)
|
void CloseAudioDevice(void); // Close the audio device and context (and music stream)
|
||||||
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
|
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
|
||||||
|
|
||||||
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing
|
|
||||||
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
|
|
||||||
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
|
|
||||||
AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint);
|
|
||||||
void CloseAudioContext(AudioContext ctx); // Frees audio context
|
|
||||||
unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements); // Pushes more audio data into context mix channel, if NULL is passed to data then zeros are played
|
|
||||||
|
|
||||||
Sound LoadSound(char *fileName); // Load sound to memory
|
Sound LoadSound(char *fileName); // Load sound to memory
|
||||||
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
|
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
|
||||||
Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource)
|
Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource)
|
||||||
|
@ -112,6 +102,9 @@ float GetMusicTimePlayed(int index); // Get current m
|
||||||
int getMusicStreamCount(void);
|
int getMusicStreamCount(void);
|
||||||
void SetMusicPitch(int index, float pitch);
|
void SetMusicPitch(int index, float pitch);
|
||||||
|
|
||||||
|
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint);
|
||||||
|
void CloseRawAudioContext(RawAudioContext ctx);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
15
src/raylib.h
15
src/raylib.h
|
@ -451,10 +451,7 @@ typedef struct Wave {
|
||||||
short channels;
|
short channels;
|
||||||
} Wave;
|
} Wave;
|
||||||
|
|
||||||
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be
|
typedef int RawAudioContext;
|
||||||
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to
|
|
||||||
// a dedicated mix channel.
|
|
||||||
typedef void* AudioContext;
|
|
||||||
|
|
||||||
// Texture formats
|
// Texture formats
|
||||||
// NOTE: Support depends on OpenGL version and platform
|
// NOTE: Support depends on OpenGL version and platform
|
||||||
|
@ -876,13 +873,6 @@ void InitAudioDevice(void); // Initialize au
|
||||||
void CloseAudioDevice(void); // Close the audio device and context (and music stream)
|
void CloseAudioDevice(void); // Close the audio device and context (and music stream)
|
||||||
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
|
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
|
||||||
|
|
||||||
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing
|
|
||||||
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
|
|
||||||
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
|
|
||||||
AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint);
|
|
||||||
void CloseAudioContext(AudioContext ctx); // Frees audio context
|
|
||||||
unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements); // Pushes more audio data into context mix channel, if NULL is passed to data then zeros are played
|
|
||||||
|
|
||||||
Sound LoadSound(char *fileName); // Load sound to memory
|
Sound LoadSound(char *fileName); // Load sound to memory
|
||||||
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
|
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
|
||||||
Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource)
|
Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource)
|
||||||
|
@ -906,6 +896,9 @@ float GetMusicTimePlayed(int index); // Get current m
|
||||||
int getMusicStreamCount(void);
|
int getMusicStreamCount(void);
|
||||||
void SetMusicPitch(int index, float pitch);
|
void SetMusicPitch(int index, float pitch);
|
||||||
|
|
||||||
|
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint); // used to output raw audio streams, returns negative numbers on error
|
||||||
|
void CloseRawAudioContext(RawAudioContext ctx);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue