#include <Wmcodecdsp.h>
#pragma comment( lib, "mfplat.lib" )
+#pragma comment( lib, "mfuuid.lib" )
#pragma comment( lib, "wmcodecdspuuid" )
//=============================================================================
float renderSrRatio = 0.0f;
WasapiBuffer captureBuffer;
WasapiBuffer renderBuffer;
+ WasapiResampler* captureResampler = NULL;
+ WasapiResampler* renderResampler = NULL;
// declare local stream variables
RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
unsigned int bufferFrameCount = 0;
unsigned int numFramesPadding = 0;
unsigned int convBufferSize = 0;
- bool callbackPushed = false;
+ bool callbackPushed = true;
bool callbackPulled = false;
bool callbackStopped = false;
int callbackResult = 0;
goto Exit;
}
+ // init captureResampler
+ captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
+ formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
+ captureFormat->nSamplesPerSec, stream_.sampleRate );
+
captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
// initialize capture stream according to desire buffer size
}
// scale outBufferSize according to stream->user sample rate ratio
- unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
+ unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
inBufferSize *= stream_.nDeviceChannels[INPUT];
// set captureBuffer size
goto Exit;
}
+ // init renderResampler
+ renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
+ formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
+ stream_.sampleRate, renderFormat->nSamplesPerSec );
+
renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
// initialize render stream according to desire buffer size
}
// scale inBufferSize according to user->stream sample rate ratio
- unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
+ unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
outBufferSize *= stream_.nDeviceChannels[OUTPUT];
// set renderBuffer size
}
}
- if ( stream_.mode == INPUT ) {
- using namespace std; // for roundf
- convBuffSize = ( size_t ) roundf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
+ // malloc buffer memory
+ if ( stream_.mode == INPUT )
+ {
+ using namespace std; // for ceilf
+ convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
}
- else if ( stream_.mode == OUTPUT ) {
- convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
+ else if ( stream_.mode == OUTPUT )
+ {
+ convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
}
- else if ( stream_.mode == DUPLEX ) {
- convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
- ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
+ else if ( stream_.mode == DUPLEX )
+ {
+ convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
+ ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
}
+ convBuffSize *= 2; // allow overflow for *SrRatio remainders
convBuffer = ( char* ) malloc( convBuffSize );
stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
if ( !convBuffer || !stream_.deviceBuffer ) {
// 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
// Convert callback buffer to user format
- if ( captureAudioClient ) {
- // Pull callback buffer from inputBuffer
- callbackPulled = captureBuffer.pullBuffer( convBuffer,
- ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
- stream_.deviceFormat[INPUT] );
+ if ( captureAudioClient )
+ {
+ int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
+ if ( captureSrRatio != 1 )
+ {
+ // account for remainders
+ samplesToPull--;
+ }
+
+ convBufferSize = 0;
+ while ( convBufferSize < stream_.bufferSize )
+ {
+ // Pull callback buffer from inputBuffer
+ callbackPulled = captureBuffer.pullBuffer( convBuffer,
+ samplesToPull * stream_.nDeviceChannels[INPUT],
+ stream_.deviceFormat[INPUT] );
+
+ if ( !callbackPulled )
+ {
+ break;
+ }
- if ( callbackPulled ) {
// Convert callback buffer to user sample rate
- convertBufferWasapi( stream_.deviceBuffer,
- convBuffer,
- stream_.nDeviceChannels[INPUT],
- captureFormat->nSamplesPerSec,
- stream_.sampleRate,
- ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
- convBufferSize,
- stream_.deviceFormat[INPUT] );
+ unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.userFormat );
+ unsigned int convSamples = 0;
+
+ captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
+ convBuffer,
+ samplesToPull,
+ convSamples );
+
+ convBufferSize += convSamples;
+ samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
+ }
+ if ( callbackPulled )
+ {
if ( stream_.doConvertBuffer[INPUT] ) {
// Convert callback buffer to user format
convertBuffer( stream_.userBuffer[INPUT],
// 2. Convert callback buffer to stream sample rate and channel count
// 3. Push callback buffer into outputBuffer
- if ( renderAudioClient && callbackPulled ) {
- if ( stream_.doConvertBuffer[OUTPUT] ) {
- // Convert callback buffer to stream format
- convertBuffer( stream_.deviceBuffer,
- stream_.userBuffer[OUTPUT],
- stream_.convertInfo[OUTPUT] );
+ if ( renderAudioClient && callbackPulled )
+ {
+ // if the last call to renderBuffer.PushBuffer() was successful
+ if ( callbackPushed || convBufferSize == 0 )
+ {
+ if ( stream_.doConvertBuffer[OUTPUT] )
+ {
+ // Convert callback buffer to stream format
+ convertBuffer( stream_.deviceBuffer,
+ stream_.userBuffer[OUTPUT],
+ stream_.convertInfo[OUTPUT] );
- }
+ }
- // Convert callback buffer to stream sample rate
- convertBufferWasapi( convBuffer,
- stream_.deviceBuffer,
- stream_.nDeviceChannels[OUTPUT],
- stream_.sampleRate,
- renderFormat->nSamplesPerSec,
- stream_.bufferSize,
- convBufferSize,
- stream_.deviceFormat[OUTPUT] );
+ // Convert callback buffer to stream sample rate
+ renderResampler->Convert( convBuffer,
+ stream_.deviceBuffer,
+ stream_.bufferSize,
+ convBufferSize );
+ }
// Push callback buffer into outputBuffer
callbackPushed = renderBuffer.pushBuffer( convBuffer,
// if the callback buffer was pushed renderBuffer reset callbackPulled flag
if ( callbackPushed ) {
+ // unsetting the callbackPulled flag lets the stream know that
+ // the audio device is ready for another callback output buffer.
callbackPulled = false;
+
// tick stream time
RtApi::tickStreamTime();
}
CoTaskMemFree( renderFormat );
free ( convBuffer );
+ delete renderResampler;
+ delete captureResampler;
CoUninitialize();