From: Marcus Tomlinson Date: Sat, 23 Dec 2017 08:45:10 +0000 (+0200) Subject: Revert strict sample rate selection in WASAPI API X-Git-Url: https://git.carlh.net/gitweb/?p=rtaudio-cdist.git;a=commitdiff_plain;h=7a179f6a59380f40578d353c0f77f4d47627b603 Revert strict sample rate selection in WASAPI API --- diff --git a/RtAudio.cpp b/RtAudio.cpp index ea0c202..db7ccd8 100644 --- a/RtAudio.cpp +++ b/RtAudio.cpp @@ -3693,7 +3693,6 @@ static const char* getAsioErrorString( ASIOError result ) #include #include #include -#include //============================================================================= @@ -3867,6 +3866,152 @@ private: //----------------------------------------------------------------------------- +// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate +// between HW and the user. The convertBufferWasapi function is used to perform this conversion +// between HwIn->UserIn and UserOut->HwOut during the stream callback loop. +// This sample rate converter works best with conversions between one rate and its multiple. +void convertBufferWasapi( char* outBuffer, + const char* inBuffer, + const unsigned int& channelCount, + const unsigned int& inSampleRate, + const unsigned int& outSampleRate, + const unsigned int& inSampleCount, + unsigned int& outSampleCount, + const RtAudioFormat& format ) +{ + // calculate the new outSampleCount and relative sampleStep + float sampleRatio = ( float ) outSampleRate / inSampleRate; + float sampleRatioInv = ( float ) 1 / sampleRatio; + float sampleStep = 1.0f / sampleRatio; + float inSampleFraction = 0.0f; + + // for cmath functions + using namespace std; + + outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio ); + + // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate + if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv ) + { + // frame-by-frame, copy each relative input sample into it's corresponding output sample + for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ ) + { + unsigned int inSample = ( unsigned int ) inSampleFraction; + + switch ( format ) + { + case RTAUDIO_SINT8: + memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) ); + break; + case RTAUDIO_SINT16: + memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) ); + break; + case RTAUDIO_SINT24: + memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) ); + break; + case RTAUDIO_SINT32: + memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) ); + break; + case RTAUDIO_FLOAT32: + memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) ); + break; + case RTAUDIO_FLOAT64: + memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) ); + break; + } + + // jump to next in sample + inSampleFraction += sampleStep; + } + } + else // else interpolate + { + // frame-by-frame, copy each relative input sample into it's corresponding output sample + for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ ) + { + unsigned int inSample = ( unsigned int ) inSampleFraction; + float inSampleDec = inSampleFraction - inSample; + unsigned int frameInSample = inSample * channelCount; + unsigned int frameOutSample = outSample * channelCount; + + switch ( format ) + { + case RTAUDIO_SINT8: + { + for ( unsigned int channel = 0; channel < channelCount; channel++ ) + { + char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ]; + char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ]; + char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec ); + ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff; + } + break; + } + case RTAUDIO_SINT16: + { + for ( unsigned int channel = 0; channel < channelCount; channel++ ) + { + short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ]; + short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ]; + short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec ); + ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff; + } + break; + } + case RTAUDIO_SINT24: + { + for ( unsigned int channel = 0; channel < channelCount; channel++ ) + { + int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt(); + int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt(); + int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec ); + ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff; + } + break; + } + case RTAUDIO_SINT32: + { + for ( unsigned int channel = 0; channel < channelCount; channel++ ) + { + int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ]; + int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ]; + int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec ); + ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff; + } + break; + } + case RTAUDIO_FLOAT32: + { + for ( unsigned int channel = 0; channel < channelCount; channel++ ) + { + float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ]; + float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ]; + float sampleDiff = ( toSample - fromSample ) * inSampleDec; + ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff; + } + break; + } + case RTAUDIO_FLOAT64: + { + for ( unsigned int channel = 0; channel < channelCount; channel++ ) + { + double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ]; + double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ]; + double sampleDiff = ( toSample - fromSample ) * inSampleDec; + ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff; + } + break; + } + } + + // jump to next in sample + inSampleFraction += sampleStep; + } + } +} + +//----------------------------------------------------------------------------- + // A structure to hold various information related to the WASAPI implementation. struct WasapiHandle { @@ -4132,11 +4277,14 @@ RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device ) info.duplexChannels = 0; } - // sample rates (WASAPI only supports the one native sample rate) - info.preferredSampleRate = deviceFormat->nSamplesPerSec; - + // sample rates info.sampleRates.clear(); - info.sampleRates.push_back( deviceFormat->nSamplesPerSec ); + + // allow support for all sample rates as we have a built-in sample rate converter + for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) { + info.sampleRates.push_back( SAMPLE_RATES[i] ); + } + info.preferredSampleRate = deviceFormat->nSamplesPerSec; // native format info.nativeFormats = 0; @@ -4413,7 +4561,6 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne WAVEFORMATEX* deviceFormat = NULL; unsigned int bufferBytes; stream_.state = STREAM_STOPPED; - RtAudio::DeviceInfo deviceInfo; // create API Handle if not already created if ( !stream_.apiHandle ) @@ -4454,20 +4601,6 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne goto Exit; } - deviceInfo = getDeviceInfo( device ); - - // validate sample rate - if ( sampleRate != deviceInfo.preferredSampleRate ) - { - errorType = RtAudioError::INVALID_USE; - std::stringstream ss; - ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate - << "Hz sample rate not supported. This device only supports " - << deviceInfo.preferredSampleRate << "Hz."; - errorText_ = ss.str(); - goto Exit; - } - // determine whether index falls within capture or render devices if ( device >= renderDeviceCount ) { if ( mode != INPUT ) { @@ -4551,7 +4684,7 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne stream_.nUserChannels[mode] = channels; stream_.channelOffset[mode] = firstChannel; stream_.userFormat = format; - stream_.deviceFormat[mode] = deviceInfo.nativeFormats; + stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats; if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; @@ -4651,6 +4784,8 @@ void RtApiWasapi::wasapiThread() WAVEFORMATEX* captureFormat = NULL; WAVEFORMATEX* renderFormat = NULL; + float captureSrRatio = 0.0f; + float renderSrRatio = 0.0f; WasapiBuffer captureBuffer; WasapiBuffer renderBuffer; @@ -4660,11 +4795,15 @@ void RtApiWasapi::wasapiThread() unsigned long captureFlags = 0; unsigned int bufferFrameCount = 0; unsigned int numFramesPadding = 0; + unsigned int convBufferSize = 0; bool callbackPushed = false; bool callbackPulled = false; bool callbackStopped = false; int callbackResult = 0; + // convBuffer is used to store converted buffers between WASAPI and the user + char* convBuffer = NULL; + unsigned int convBuffSize = 0; unsigned int deviceBuffSize = 0; errorText_.clear(); @@ -4687,8 +4826,11 @@ void RtApiWasapi::wasapiThread() goto Exit; } + captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate ); + // initialize capture stream according to desire buffer size - REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec ); + float desiredBufferSize = stream_.bufferSize * captureSrRatio; + REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec ); if ( !captureClient ) { hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, @@ -4735,7 +4877,7 @@ void RtApiWasapi::wasapiThread() } // scale outBufferSize according to stream->user sample rate ratio - unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT]; + unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT]; inBufferSize *= stream_.nDeviceChannels[INPUT]; // set captureBuffer size @@ -4764,8 +4906,11 @@ void RtApiWasapi::wasapiThread() goto Exit; } + renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate ); + // initialize render stream according to desire buffer size - REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec ); + float desiredBufferSize = stream_.bufferSize * renderSrRatio; + REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec ); if ( !renderClient ) { hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, @@ -4812,7 +4957,7 @@ void RtApiWasapi::wasapiThread() } // scale inBufferSize according to user->stream sample rate ratio - unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT]; + unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT]; outBufferSize *= stream_.nDeviceChannels[OUTPUT]; // set renderBuffer size @@ -4835,18 +4980,23 @@ void RtApiWasapi::wasapiThread() if ( stream_.mode == INPUT ) { using namespace std; // for roundf + convBuffSize = ( size_t ) roundf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ); deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ); } else if ( stream_.mode == OUTPUT ) { + convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ); deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ); } else if ( stream_.mode == DUPLEX ) { + convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ), + ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) ); deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ), stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) ); } + convBuffer = ( char* ) malloc( convBuffSize ); stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize ); - if ( !stream_.deviceBuffer ) { + if ( !convBuffer || !stream_.deviceBuffer ) { errorType = RtAudioError::MEMORY_ERROR; errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory."; goto Exit; @@ -4858,15 +5008,26 @@ void RtApiWasapi::wasapiThread() // Callback Input // ============== // 1. Pull callback buffer from inputBuffer - // 2. If 1. was successful: Convert callback buffer to user format + // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count + // Convert callback buffer to user format if ( captureAudioClient ) { // Pull callback buffer from inputBuffer - callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer, - ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT], + callbackPulled = captureBuffer.pullBuffer( convBuffer, + ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT], stream_.deviceFormat[INPUT] ); if ( callbackPulled ) { + // Convert callback buffer to user sample rate + convertBufferWasapi( stream_.deviceBuffer, + convBuffer, + stream_.nDeviceChannels[INPUT], + captureFormat->nSamplesPerSec, + stream_.sampleRate, + ( unsigned int ) ( stream_.bufferSize * captureSrRatio ), + convBufferSize, + stream_.deviceFormat[INPUT] ); + if ( stream_.doConvertBuffer[INPUT] ) { // Convert callback buffer to user format convertBuffer( stream_.userBuffer[INPUT], @@ -4940,7 +5101,8 @@ void RtApiWasapi::wasapiThread() // Callback Output // =============== // 1. Convert callback buffer to stream format - // 2. Push callback buffer into outputBuffer + // 2. Convert callback buffer to stream sample rate and channel count + // 3. Push callback buffer into outputBuffer if ( renderAudioClient && callbackPulled ) { if ( stream_.doConvertBuffer[OUTPUT] ) { @@ -4951,9 +5113,19 @@ void RtApiWasapi::wasapiThread() } + // Convert callback buffer to stream sample rate + convertBufferWasapi( convBuffer, + stream_.deviceBuffer, + stream_.nDeviceChannels[OUTPUT], + stream_.sampleRate, + renderFormat->nSamplesPerSec, + stream_.bufferSize, + convBufferSize, + stream_.deviceFormat[OUTPUT] ); + // Push callback buffer into outputBuffer - callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer, - stream_.bufferSize * stream_.nDeviceChannels[OUTPUT], + callbackPushed = renderBuffer.pushBuffer( convBuffer, + convBufferSize * stream_.nDeviceChannels[OUTPUT], stream_.deviceFormat[OUTPUT] ); } else { @@ -5099,6 +5271,8 @@ Exit: CoTaskMemFree( captureFormat ); CoTaskMemFree( renderFormat ); + free ( convBuffer ); + CoUninitialize(); // update stream state