#include <avrt.h>
#include <mmdeviceapi.h>
#include <functiondiscoverykeys_devpkey.h>
+#include <sstream>
//=============================================================================
//-----------------------------------------------------------------------------
-// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
-// between HW and the user. The convertBufferWasapi function is used to perform this conversion
-// between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
-// This sample rate converter works best with conversions between one rate and its multiple.
-void convertBufferWasapi( char* outBuffer,
- const char* inBuffer,
- const unsigned int& channelCount,
- const unsigned int& inSampleRate,
- const unsigned int& outSampleRate,
- const unsigned int& inSampleCount,
- unsigned int& outSampleCount,
- const RtAudioFormat& format )
-{
- // calculate the new outSampleCount and relative sampleStep
- float sampleRatio = ( float ) outSampleRate / inSampleRate;
- float sampleRatioInv = ( float ) 1 / sampleRatio;
- float sampleStep = 1.0f / sampleRatio;
- float inSampleFraction = 0.0f;
-
- outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
-
- // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
- if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
- {
- // frame-by-frame, copy each relative input sample into it's corresponding output sample
- for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
- {
- unsigned int inSample = ( unsigned int ) inSampleFraction;
-
- switch ( format )
- {
- case RTAUDIO_SINT8:
- memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
- break;
- case RTAUDIO_SINT16:
- memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
- break;
- case RTAUDIO_SINT24:
- memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
- break;
- case RTAUDIO_SINT32:
- memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
- break;
- case RTAUDIO_FLOAT32:
- memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
- break;
- case RTAUDIO_FLOAT64:
- memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
- break;
- }
-
- // jump to next in sample
- inSampleFraction += sampleStep;
- }
- }
- else // else interpolate
- {
- // frame-by-frame, copy each relative input sample into it's corresponding output sample
- for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
- {
- unsigned int inSample = ( unsigned int ) inSampleFraction;
- float inSampleDec = inSampleFraction - inSample;
- unsigned int frameInSample = inSample * channelCount;
- unsigned int frameOutSample = outSample * channelCount;
-
- switch ( format )
- {
- case RTAUDIO_SINT8:
- {
- for ( unsigned int channel = 0; channel < channelCount; channel++ )
- {
- char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
- char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
- char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
- ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
- }
- break;
- }
- case RTAUDIO_SINT16:
- {
- for ( unsigned int channel = 0; channel < channelCount; channel++ )
- {
- short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
- short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
- short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
- ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
- }
- break;
- }
- case RTAUDIO_SINT24:
- {
- for ( unsigned int channel = 0; channel < channelCount; channel++ )
- {
- int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
- int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
- int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
- ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
- }
- break;
- }
- case RTAUDIO_SINT32:
- {
- for ( unsigned int channel = 0; channel < channelCount; channel++ )
- {
- int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
- int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
- int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
- ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
- }
- break;
- }
- case RTAUDIO_FLOAT32:
- {
- for ( unsigned int channel = 0; channel < channelCount; channel++ )
- {
- float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
- float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
- float sampleDiff = ( toSample - fromSample ) * inSampleDec;
- ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
- }
- break;
- }
- case RTAUDIO_FLOAT64:
- {
- for ( unsigned int channel = 0; channel < channelCount; channel++ )
- {
- double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
- double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
- double sampleDiff = ( toSample - fromSample ) * inSampleDec;
- ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
- }
- break;
- }
- }
-
- // jump to next in sample
- inSampleFraction += sampleStep;
- }
- }
-}
-
-//-----------------------------------------------------------------------------
-
// A structure to hold various information related to the WASAPI implementation.
struct WasapiHandle
{
info.duplexChannels = 0;
}
- // sample rates
- info.sampleRates.clear();
-
- // allow support for all sample rates as we have a built-in sample rate converter
- for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
- info.sampleRates.push_back( SAMPLE_RATES[i] );
- }
+ // sample rates (WASAPI only supports the one native sample rate)
info.preferredSampleRate = deviceFormat->nSamplesPerSec;
+ info.sampleRates.clear();
+ info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
+
// native format
info.nativeFormats = 0;
WAVEFORMATEX* deviceFormat = NULL;
unsigned int bufferBytes;
stream_.state = STREAM_STOPPED;
+ RtAudio::DeviceInfo deviceInfo;
// create API Handle if not already created
if ( !stream_.apiHandle )
goto Exit;
}
+ deviceInfo = getDeviceInfo( device );
+
+ // validate sample rate
+ if ( sampleRate != deviceInfo.preferredSampleRate )
+ {
+ errorType = RtAudioError::INVALID_USE;
+ std::stringstream ss;
+ ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
+ << "Hz sample rate not supported. This device only supports "
+ << deviceInfo.preferredSampleRate << "Hz.";
+ errorText_ = ss.str();
+ goto Exit;
+ }
+
// determine whether index falls within capture or render devices
if ( device >= renderDeviceCount ) {
if ( mode != INPUT ) {
stream_.nUserChannels[mode] = channels;
stream_.channelOffset[mode] = firstChannel;
stream_.userFormat = format;
- stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
+ stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
stream_.userInterleaved = false;
WAVEFORMATEX* captureFormat = NULL;
WAVEFORMATEX* renderFormat = NULL;
- float captureSrRatio = 0.0f;
- float renderSrRatio = 0.0f;
WasapiBuffer captureBuffer;
WasapiBuffer renderBuffer;
unsigned long captureFlags = 0;
unsigned int bufferFrameCount = 0;
unsigned int numFramesPadding = 0;
- unsigned int convBufferSize = 0;
bool callbackPushed = false;
bool callbackPulled = false;
bool callbackStopped = false;
int callbackResult = 0;
- // convBuffer is used to store converted buffers between WASAPI and the user
- char* convBuffer = NULL;
- unsigned int convBuffSize = 0;
unsigned int deviceBuffSize = 0;
errorText_.clear();
goto Exit;
}
- captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
-
// initialize capture stream according to desire buffer size
- float desiredBufferSize = stream_.bufferSize * captureSrRatio;
- REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
if ( !captureClient ) {
hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
}
// scale outBufferSize according to stream->user sample rate ratio
- unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
+ unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
inBufferSize *= stream_.nDeviceChannels[INPUT];
// set captureBuffer size
goto Exit;
}
- renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
-
// initialize render stream according to desire buffer size
- float desiredBufferSize = stream_.bufferSize * renderSrRatio;
- REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
if ( !renderClient ) {
hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
}
// scale inBufferSize according to user->stream sample rate ratio
- unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
+ unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
outBufferSize *= stream_.nDeviceChannels[OUTPUT];
// set renderBuffer size
}
if ( stream_.mode == INPUT ) {
- convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
+ using namespace std; // for roundf
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
}
else if ( stream_.mode == OUTPUT ) {
- convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
}
else if ( stream_.mode == DUPLEX ) {
- convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
- ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
}
- convBuffer = ( char* ) malloc( convBuffSize );
stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
- if ( !convBuffer || !stream_.deviceBuffer ) {
+ if ( !stream_.deviceBuffer ) {
errorType = RtAudioError::MEMORY_ERROR;
errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
goto Exit;
// Callback Input
// ==============
// 1. Pull callback buffer from inputBuffer
- // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
- // Convert callback buffer to user format
+ // 2. If 1. was successful: Convert callback buffer to user format
if ( captureAudioClient ) {
// Pull callback buffer from inputBuffer
- callbackPulled = captureBuffer.pullBuffer( convBuffer,
- ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
+ callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
+ ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
stream_.deviceFormat[INPUT] );
if ( callbackPulled ) {
- // Convert callback buffer to user sample rate
- convertBufferWasapi( stream_.deviceBuffer,
- convBuffer,
- stream_.nDeviceChannels[INPUT],
- captureFormat->nSamplesPerSec,
- stream_.sampleRate,
- ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
- convBufferSize,
- stream_.deviceFormat[INPUT] );
-
if ( stream_.doConvertBuffer[INPUT] ) {
// Convert callback buffer to user format
convertBuffer( stream_.userBuffer[INPUT],
// Callback Output
// ===============
// 1. Convert callback buffer to stream format
- // 2. Convert callback buffer to stream sample rate and channel count
- // 3. Push callback buffer into outputBuffer
+ // 2. Push callback buffer into outputBuffer
if ( renderAudioClient && callbackPulled ) {
if ( stream_.doConvertBuffer[OUTPUT] ) {
}
- // Convert callback buffer to stream sample rate
- convertBufferWasapi( convBuffer,
- stream_.deviceBuffer,
- stream_.nDeviceChannels[OUTPUT],
- stream_.sampleRate,
- renderFormat->nSamplesPerSec,
- stream_.bufferSize,
- convBufferSize,
- stream_.deviceFormat[OUTPUT] );
-
// Push callback buffer into outputBuffer
- callbackPushed = renderBuffer.pushBuffer( convBuffer,
- convBufferSize * stream_.nDeviceChannels[OUTPUT],
+ callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
+ stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
stream_.deviceFormat[OUTPUT] );
}
else {
CoTaskMemFree( captureFormat );
CoTaskMemFree( renderFormat );
- free ( convBuffer );
-
CoUninitialize();
// update stream state