diff options
| author | Marcus Tomlinson <themarcustomlinson@gmail.com> | 2014-04-26 11:01:40 +0200 |
|---|---|---|
| committer | Marcus Tomlinson <themarcustomlinson@gmail.com> | 2014-04-26 11:01:40 +0200 |
| commit | 4411d7c79eaf3994ac7a26f291db68ef43c3f429 (patch) | |
| tree | ed2a2d7423da21f7783be7cfe73379f4a00d4376 /RtAudio.cpp | |
| parent | 1a14e053f746c3b783e853f21659f36bdbeac826 (diff) | |
Let convertBuffer() take care of all channel count conversion, while convertBufferWasapi() does only sample rate conversion.
Diffstat (limited to 'RtAudio.cpp')
| -rw-r--r-- | RtAudio.cpp | 76 |
1 files changed, 31 insertions, 45 deletions
diff --git a/RtAudio.cpp b/RtAudio.cpp index 3af8029..6ab1615 100644 --- a/RtAudio.cpp +++ b/RtAudio.cpp @@ -3585,7 +3585,7 @@ static const char* getAsioErrorString( ASIOError result ) // - Introduces support for the Windows WASAPI API
// - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
// - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
-// - Includes automatic internal conversion of sample rate, buffer size and channel count
+// - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
#ifndef INITGUID
#define INITGUID
@@ -3767,15 +3767,14 @@ private: //-----------------------------------------------------------------------------
-// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and
-// channel counts between HW and the user. The convertBufferWasapi function is used to perform
-// these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
+// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
+// between HW and the user. The convertBufferWasapi function is used to perform this conversion
+// between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
// This sample rate converter favors speed over quality, and works best with conversions between
// one rate and its multiple.
void convertBufferWasapi( char* outBuffer,
const char* inBuffer,
- const unsigned int& inChannelCount,
- const unsigned int& outChannelCount,
+ const unsigned int& channelCount,
const unsigned int& inSampleRate,
const unsigned int& outSampleRate,
const unsigned int& inSampleCount,
@@ -3786,7 +3785,6 @@ void convertBufferWasapi( char* outBuffer, float sampleRatio = ( float ) outSampleRate / inSampleRate;
float sampleStep = 1.0f / sampleRatio;
float inSampleFraction = 0.0f;
- unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );
outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );
@@ -3798,22 +3796,22 @@ void convertBufferWasapi( char* outBuffer, switch ( format )
{
case RTAUDIO_SINT8:
- memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );
+ memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
break;
case RTAUDIO_SINT16:
- memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );
+ memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
break;
case RTAUDIO_SINT24:
- memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );
+ memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
break;
case RTAUDIO_SINT32:
- memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );
+ memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
break;
case RTAUDIO_FLOAT32:
- memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );
+ memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
break;
case RTAUDIO_FLOAT64:
- memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );
+ memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
break;
}
@@ -4511,10 +4509,11 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne // Set flags for buffer conversion.
stream_.doConvertBuffer[mode] = false;
- if ( stream_.userFormat != stream_.deviceFormat[mode] )
+ if ( stream_.userFormat != stream_.deviceFormat[mode] ||
+ stream_.nUserChannels != stream_.nDeviceChannels )
stream_.doConvertBuffer[mode] = true;
- if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
- stream_.nUserChannels[mode] > 1 )
+ else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
+ stream_.nUserChannels[mode] > 1 )
stream_.doConvertBuffer[mode] = true;
if ( stream_.doConvertBuffer[mode] )
@@ -4833,11 +4832,10 @@ void RtApiWasapi::wasapiThread() stream_.deviceFormat[INPUT] );
if ( callbackPulled ) {
- // Convert callback buffer to user sample rate and channel count
+ // Convert callback buffer to user sample rate
convertBufferWasapi( stream_.deviceBuffer,
convBuffer,
stream_.nDeviceChannels[INPUT],
- stream_.nUserChannels[INPUT],
captureFormat->nSamplesPerSec,
stream_.sampleRate,
( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
@@ -4851,7 +4849,7 @@ void RtApiWasapi::wasapiThread() stream_.convertInfo[INPUT] );
}
else {
- // no conversion, simple copy deviceBuffer to userBuffer
+ // no further conversion, simple copy deviceBuffer to userBuffer
memcpy( stream_.userBuffer[INPUT],
stream_.deviceBuffer,
stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
@@ -4927,30 +4925,18 @@ void RtApiWasapi::wasapiThread() stream_.userBuffer[OUTPUT],
stream_.convertInfo[OUTPUT] );
- // Convert callback buffer to stream sample rate and channel count
- convertBufferWasapi( convBuffer,
- stream_.deviceBuffer,
- stream_.nUserChannels[OUTPUT],
- stream_.nDeviceChannels[OUTPUT],
- stream_.sampleRate,
- renderFormat->nSamplesPerSec,
- stream_.bufferSize,
- convBufferSize,
- stream_.deviceFormat[OUTPUT] );
- }
- else {
- // Convert callback buffer to stream sample rate and channel count
- convertBufferWasapi( convBuffer,
- stream_.userBuffer[OUTPUT],
- stream_.nUserChannels[OUTPUT],
- stream_.nDeviceChannels[OUTPUT],
- stream_.sampleRate,
- renderFormat->nSamplesPerSec,
- stream_.bufferSize,
- convBufferSize,
- stream_.deviceFormat[OUTPUT] );
}
+ // Convert callback buffer to stream sample rate
+ convertBufferWasapi( convBuffer,
+ stream_.deviceBuffer,
+ stream_.nDeviceChannels[OUTPUT],
+ stream_.sampleRate,
+ renderFormat->nSamplesPerSec,
+ stream_.bufferSize,
+ convBufferSize,
+ stream_.deviceFormat[OUTPUT] );
+
// Push callback buffer into outputBuffer
callbackPushed = renderBuffer.pushBuffer( convBuffer,
convBufferSize * stream_.nDeviceChannels[OUTPUT],
@@ -4985,8 +4971,8 @@ void RtApiWasapi::wasapiThread() if ( bufferFrameCount != 0 ) {
// Push capture buffer into inputBuffer
if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
- bufferFrameCount * stream_.nDeviceChannels[INPUT],
- stream_.deviceFormat[INPUT] ) )
+ bufferFrameCount * stream_.nDeviceChannels[INPUT],
+ stream_.deviceFormat[INPUT] ) )
{
// Release capture buffer
hr = captureClient->ReleaseBuffer( bufferFrameCount );
@@ -5054,8 +5040,8 @@ void RtApiWasapi::wasapiThread() // Pull next buffer from outputBuffer
// Fill render buffer with next buffer
if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
- bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
- stream_.deviceFormat[OUTPUT] ) )
+ bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
+ stream_.deviceFormat[OUTPUT] ) )
{
// Release render buffer
hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
|
