-/************************************************************************/
+/************************************************************************/
/*! \class RtAudio
\brief Realtime audio i/o C++ classes.
const char **ports;
std::string port, previousPort;
unsigned int nChannels = 0, nDevices = 0;
- ports = jack_get_ports( client, NULL, NULL, 0 );
+ ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
if ( ports ) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
const char **ports;
std::string port, previousPort;
unsigned int nPorts = 0, nDevices = 0;
- ports = jack_get_ports( client, NULL, NULL, 0 );
+ ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
if ( ports ) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
unsigned int nChannels = 0;
- ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
+ ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
if ( ports ) {
while ( ports[ nChannels ] ) nChannels++;
free( ports );
// Jack "output ports" equal RtAudio input channels.
nChannels = 0;
- ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
+ ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
if ( ports ) {
while ( ports[ nChannels ] ) nChannels++;
free( ports );
const char **ports;
std::string port, previousPort, deviceName;
unsigned int nPorts = 0, nDevices = 0;
- ports = jack_get_ports( client, NULL, NULL, 0 );
+ ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
if ( ports ) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
return FAILURE;
}
- // Count the available ports containing the client name as device
- // channels. Jack "input ports" equal RtAudio output channels.
- unsigned int nChannels = 0;
unsigned long flag = JackPortIsInput;
if ( mode == INPUT ) flag = JackPortIsOutput;
- ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
- if ( ports ) {
- while ( ports[ nChannels ] ) nChannels++;
- free( ports );
- }
- // Compare the jack ports for specified client to the requested number of channels.
- if ( nChannels < (channels + firstChannel) ) {
- errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
- errorText_ = errorStream_.str();
- return FAILURE;
+ if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
+ // Count the available ports containing the client name as device
+ // channels. Jack "input ports" equal RtAudio output channels.
+ unsigned int nChannels = 0;
+ ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
+ if ( ports ) {
+ while ( ports[ nChannels ] ) nChannels++;
+ free( ports );
+ }
+ // Compare the jack ports for specified client to the requested number of channels.
+ if ( nChannels < (channels + firstChannel) ) {
+ errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
+ errorText_ = errorStream_.str();
+ return FAILURE;
+ }
}
// Check the jack server sample rate.
stream_.sampleRate = jackRate;
// Get the latency of the JACK port.
- ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
+ ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
if ( ports[ firstChannel ] ) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
// Get the list of available ports.
if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
result = 1;
- ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
+ ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
if ( ports == NULL) {
errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
goto unlock;
if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
result = 1;
- ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
+ ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
if ( ports == NULL) {
errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
goto unlock;
#ifndef INITGUID
#define INITGUID
#endif
+
+#include <mfapi.h>
+#include <mferror.h>
+#include <mfplay.h>
+#include <mftransform.h>
+#include <wmcodecdsp.h>
+
#include <audioclient.h>
#include <avrt.h>
#include <mmdeviceapi.h>
#include <functiondiscoverykeys_devpkey.h>
-#include <sstream>
+
+#ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
+ #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
+#endif
+
+#ifndef MFSTARTUP_NOSOCKET
+ #define MFSTARTUP_NOSOCKET 0x1
+#endif
+
+#ifdef _MSC_VER
+ #pragma comment( lib, "ksuser" )
+ #pragma comment( lib, "mfplat.lib" )
+ #pragma comment( lib, "mfuuid.lib" )
+ #pragma comment( lib, "wmcodecdspuuid" )
+#endif
//=============================================================================
//-----------------------------------------------------------------------------
+// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
+// between HW and the user. The WasapiResampler class is used to perform this conversion between
+// HwIn->UserIn and UserOut->HwOut during the stream callback loop.
+class WasapiResampler
+{
+public:
+ WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
+ unsigned int inSampleRate, unsigned int outSampleRate )
+ : _bytesPerSample( bitsPerSample / 8 )
+ , _channelCount( channelCount )
+ , _sampleRatio( ( float ) outSampleRate / inSampleRate )
+ , _transformUnk( NULL )
+ , _transform( NULL )
+ , _mediaType( NULL )
+ , _inputMediaType( NULL )
+ , _outputMediaType( NULL )
+
+ #ifdef __IWMResamplerProps_FWD_DEFINED__
+ , _resamplerProps( NULL )
+ #endif
+ {
+ // 1. Initialization
+
+ MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
+
+ // 2. Create Resampler Transform Object
+
+ CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
+ IID_IUnknown, ( void** ) &_transformUnk );
+
+ _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
+
+ #ifdef __IWMResamplerProps_FWD_DEFINED__
+ _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
+ _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
+ #endif
+
+ // 3. Specify input / output format
+
+ MFCreateMediaType( &_mediaType );
+ _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
+ _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
+ _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
+ _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
+ _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
+ _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
+ _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
+ _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
+
+ MFCreateMediaType( &_inputMediaType );
+ _mediaType->CopyAllItems( _inputMediaType );
+
+ _transform->SetInputType( 0, _inputMediaType, 0 );
+
+ MFCreateMediaType( &_outputMediaType );
+ _mediaType->CopyAllItems( _outputMediaType );
+
+ _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
+ _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
+
+ _transform->SetOutputType( 0, _outputMediaType, 0 );
+
+ // 4. Send stream start messages to Resampler
+
+ _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
+ _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
+ _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
+ }
+
+ ~WasapiResampler()
+ {
+ // 8. Send stream stop messages to Resampler
+
+ _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
+ _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
+
+ // 9. Cleanup
+
+ MFShutdown();
+
+ SAFE_RELEASE( _transformUnk );
+ SAFE_RELEASE( _transform );
+ SAFE_RELEASE( _mediaType );
+ SAFE_RELEASE( _inputMediaType );
+ SAFE_RELEASE( _outputMediaType );
+
+ #ifdef __IWMResamplerProps_FWD_DEFINED__
+ SAFE_RELEASE( _resamplerProps );
+ #endif
+ }
+
+ void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
+ {
+ unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
+ if ( _sampleRatio == 1 )
+ {
+ // no sample rate conversion required
+ memcpy( outBuffer, inBuffer, inputBufferSize );
+ outSampleCount = inSampleCount;
+ return;
+ }
+
+ unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
+
+ IMFMediaBuffer* rInBuffer;
+ IMFSample* rInSample;
+ BYTE* rInByteBuffer = NULL;
+
+ // 5. Create Sample object from input data
+
+ MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
+
+ rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
+ memcpy( rInByteBuffer, inBuffer, inputBufferSize );
+ rInBuffer->Unlock();
+ rInByteBuffer = NULL;
+
+ rInBuffer->SetCurrentLength( inputBufferSize );
+
+ MFCreateSample( &rInSample );
+ rInSample->AddBuffer( rInBuffer );
+
+ // 6. Pass input data to Resampler
+
+ _transform->ProcessInput( 0, rInSample, 0 );
+
+ SAFE_RELEASE( rInBuffer );
+ SAFE_RELEASE( rInSample );
+
+ // 7. Perform sample rate conversion
+
+ IMFMediaBuffer* rOutBuffer = NULL;
+ BYTE* rOutByteBuffer = NULL;
+
+ MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
+ DWORD rStatus;
+ DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
+
+ // 7.1 Create Sample object for output data
+
+ memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
+ MFCreateSample( &( rOutDataBuffer.pSample ) );
+ MFCreateMemoryBuffer( rBytes, &rOutBuffer );
+ rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
+ rOutDataBuffer.dwStreamID = 0;
+ rOutDataBuffer.dwStatus = 0;
+ rOutDataBuffer.pEvents = NULL;
+
+ // 7.2 Get output data from Resampler
+
+ if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
+ {
+ outSampleCount = 0;
+ SAFE_RELEASE( rOutBuffer );
+ SAFE_RELEASE( rOutDataBuffer.pSample );
+ return;
+ }
+
+ // 7.3 Write output data to outBuffer
+
+ SAFE_RELEASE( rOutBuffer );
+ rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
+ rOutBuffer->GetCurrentLength( &rBytes );
+
+ rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
+ memcpy( outBuffer, rOutByteBuffer, rBytes );
+ rOutBuffer->Unlock();
+ rOutByteBuffer = NULL;
+
+ outSampleCount = rBytes / _bytesPerSample / _channelCount;
+ SAFE_RELEASE( rOutBuffer );
+ SAFE_RELEASE( rOutDataBuffer.pSample );
+ }
+
+private:
+ unsigned int _bytesPerSample;
+ unsigned int _channelCount;
+ float _sampleRatio;
+
+ IUnknown* _transformUnk;
+ IMFTransform* _transform;
+ IMFMediaType* _mediaType;
+ IMFMediaType* _inputMediaType;
+ IMFMediaType* _outputMediaType;
+
+ #ifdef __IWMResamplerProps_FWD_DEFINED__
+ IWMResamplerProps* _resamplerProps;
+ #endif
+};
+
+//-----------------------------------------------------------------------------
+
// A structure to hold various information related to the WASAPI implementation.
struct WasapiHandle
{
CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
( void** ) &deviceEnumerator_ );
- if ( FAILED( hr ) ) {
- errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
- error( RtAudioError::DRIVER_ERROR );
- }
+ // If this runs on an old Windows, it will fail. Ignore and proceed.
+ if ( FAILED( hr ) )
+ deviceEnumerator_ = NULL;
}
//-----------------------------------------------------------------------------
IMMDeviceCollection* captureDevices = NULL;
IMMDeviceCollection* renderDevices = NULL;
+ if ( !deviceEnumerator_ )
+ return 0;
+
// Count capture devices
errorText_.clear();
HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
info.duplexChannels = 0;
}
- // sample rates (WASAPI only supports the one native sample rate)
- info.preferredSampleRate = deviceFormat->nSamplesPerSec;
-
+ // sample rates
info.sampleRates.clear();
- info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
+
+ // allow support for all sample rates as we have a built-in sample rate converter
+ for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
+ info.sampleRates.push_back( SAMPLE_RATES[i] );
+ }
+ info.preferredSampleRate = deviceFormat->nSamplesPerSec;
// native format
info.nativeFormats = 0;
WAVEFORMATEX* deviceFormat = NULL;
unsigned int bufferBytes;
stream_.state = STREAM_STOPPED;
- RtAudio::DeviceInfo deviceInfo;
// create API Handle if not already created
if ( !stream_.apiHandle )
goto Exit;
}
- deviceInfo = getDeviceInfo( device );
-
- // validate sample rate
- if ( sampleRate != deviceInfo.preferredSampleRate )
- {
- errorType = RtAudioError::INVALID_USE;
- std::stringstream ss;
- ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
- << "Hz sample rate not supported. This device only supports "
- << deviceInfo.preferredSampleRate << "Hz.";
- errorText_ = ss.str();
- goto Exit;
- }
-
// determine whether index falls within capture or render devices
if ( device >= renderDeviceCount ) {
if ( mode != INPUT ) {
stream_.nUserChannels[mode] = channels;
stream_.channelOffset[mode] = firstChannel;
stream_.userFormat = format;
- stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
+ stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
stream_.userInterleaved = false;
// Set flags for buffer conversion.
stream_.doConvertBuffer[mode] = false;
if ( stream_.userFormat != stream_.deviceFormat[mode] ||
- stream_.nUserChannels != stream_.nDeviceChannels )
+ stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
+ stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
stream_.doConvertBuffer[mode] = true;
else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
stream_.nUserChannels[mode] > 1 )
WAVEFORMATEX* captureFormat = NULL;
WAVEFORMATEX* renderFormat = NULL;
+ float captureSrRatio = 0.0f;
+ float renderSrRatio = 0.0f;
WasapiBuffer captureBuffer;
WasapiBuffer renderBuffer;
+ WasapiResampler* captureResampler = NULL;
+ WasapiResampler* renderResampler = NULL;
// declare local stream variables
RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
unsigned long captureFlags = 0;
unsigned int bufferFrameCount = 0;
unsigned int numFramesPadding = 0;
- bool callbackPushed = false;
+ unsigned int convBufferSize = 0;
+ bool callbackPushed = true;
bool callbackPulled = false;
bool callbackStopped = false;
int callbackResult = 0;
+ // convBuffer is used to store converted buffers between WASAPI and the user
+ char* convBuffer = NULL;
+ unsigned int convBuffSize = 0;
unsigned int deviceBuffSize = 0;
errorText_.clear();
goto Exit;
}
+ // init captureResampler
+ captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
+ formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
+ captureFormat->nSamplesPerSec, stream_.sampleRate );
+
+ captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
+
// initialize capture stream according to desire buffer size
- REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
+ float desiredBufferSize = stream_.bufferSize * captureSrRatio;
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
if ( !captureClient ) {
hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
}
// scale outBufferSize according to stream->user sample rate ratio
- unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
+ unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
inBufferSize *= stream_.nDeviceChannels[INPUT];
// set captureBuffer size
goto Exit;
}
+ // init renderResampler
+ renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
+ formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
+ stream_.sampleRate, renderFormat->nSamplesPerSec );
+
+ renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
+
// initialize render stream according to desire buffer size
- REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
+ float desiredBufferSize = stream_.bufferSize * renderSrRatio;
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
if ( !renderClient ) {
hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
}
// scale inBufferSize according to user->stream sample rate ratio
- unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
+ unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
outBufferSize *= stream_.nDeviceChannels[OUTPUT];
// set renderBuffer size
}
}
- if ( stream_.mode == INPUT ) {
- using namespace std; // for roundf
+ // malloc buffer memory
+ if ( stream_.mode == INPUT )
+ {
+ using namespace std; // for ceilf
+ convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
}
- else if ( stream_.mode == OUTPUT ) {
+ else if ( stream_.mode == OUTPUT )
+ {
+ convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
}
- else if ( stream_.mode == DUPLEX ) {
+ else if ( stream_.mode == DUPLEX )
+ {
+ convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
+ ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
}
+ convBuffSize *= 2; // allow overflow for *SrRatio remainders
+ convBuffer = ( char* ) malloc( convBuffSize );
stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
- if ( !stream_.deviceBuffer ) {
+ if ( !convBuffer || !stream_.deviceBuffer ) {
errorType = RtAudioError::MEMORY_ERROR;
errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
goto Exit;
// Callback Input
// ==============
// 1. Pull callback buffer from inputBuffer
- // 2. If 1. was successful: Convert callback buffer to user format
+ // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
+ // Convert callback buffer to user format
- if ( captureAudioClient ) {
- // Pull callback buffer from inputBuffer
- callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
- ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
- stream_.deviceFormat[INPUT] );
+ if ( captureAudioClient )
+ {
+ int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
+ if ( captureSrRatio != 1 )
+ {
+ // account for remainders
+ samplesToPull--;
+ }
- if ( callbackPulled ) {
+ convBufferSize = 0;
+ while ( convBufferSize < stream_.bufferSize )
+ {
+ // Pull callback buffer from inputBuffer
+ callbackPulled = captureBuffer.pullBuffer( convBuffer,
+ samplesToPull * stream_.nDeviceChannels[INPUT],
+ stream_.deviceFormat[INPUT] );
+
+ if ( !callbackPulled )
+ {
+ break;
+ }
+
+ // Convert callback buffer to user sample rate
+ unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
+ unsigned int convSamples = 0;
+
+ captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
+ convBuffer,
+ samplesToPull,
+ convSamples );
+
+ convBufferSize += convSamples;
+ samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
+ }
+
+ if ( callbackPulled )
+ {
if ( stream_.doConvertBuffer[INPUT] ) {
// Convert callback buffer to user format
convertBuffer( stream_.userBuffer[INPUT],
// Callback Output
// ===============
// 1. Convert callback buffer to stream format
- // 2. Push callback buffer into outputBuffer
+ // 2. Convert callback buffer to stream sample rate and channel count
+ // 3. Push callback buffer into outputBuffer
- if ( renderAudioClient && callbackPulled ) {
- if ( stream_.doConvertBuffer[OUTPUT] ) {
- // Convert callback buffer to stream format
- convertBuffer( stream_.deviceBuffer,
- stream_.userBuffer[OUTPUT],
- stream_.convertInfo[OUTPUT] );
+ if ( renderAudioClient && callbackPulled )
+ {
+ // if the last call to renderBuffer.PushBuffer() was successful
+ if ( callbackPushed || convBufferSize == 0 )
+ {
+ if ( stream_.doConvertBuffer[OUTPUT] )
+ {
+ // Convert callback buffer to stream format
+ convertBuffer( stream_.deviceBuffer,
+ stream_.userBuffer[OUTPUT],
+ stream_.convertInfo[OUTPUT] );
+ }
+
+ // Convert callback buffer to stream sample rate
+ renderResampler->Convert( convBuffer,
+ stream_.deviceBuffer,
+ stream_.bufferSize,
+ convBufferSize );
}
// Push callback buffer into outputBuffer
- callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
- stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
+ callbackPushed = renderBuffer.pushBuffer( convBuffer,
+ convBufferSize * stream_.nDeviceChannels[OUTPUT],
stream_.deviceFormat[OUTPUT] );
}
else {
// if the callback buffer was pushed renderBuffer reset callbackPulled flag
if ( callbackPushed ) {
+ // unsetting the callbackPulled flag lets the stream know that
+ // the audio device is ready for another callback output buffer.
callbackPulled = false;
+
// tick stream time
RtApi::tickStreamTime();
}
CoTaskMemFree( captureFormat );
CoTaskMemFree( renderFormat );
+ free ( convBuffer );
+ delete renderResampler;
+ delete captureResampler;
+
CoUninitialize();
+ if ( !errorText_.empty() )
+ error( errorType );
+
// update stream state
stream_.state = STREAM_STOPPED;
-
- if ( errorText_.empty() )
- return;
- else
- error( errorType );
}
//******************** End of __WINDOWS_WASAPI__ *********************//
# configure flags
AC_ARG_ENABLE(debug, [AS_HELP_STRING([--enable-debug],[enable various debug output])])
-AC_ARG_WITH(jack, [AS_HELP_STRING([--with-jack], [choose JACK server support (mac and linux only)])])
+AC_ARG_WITH(jack, [AS_HELP_STRING([--with-jack], [choose JACK server support])])
AC_ARG_WITH(alsa, [AS_HELP_STRING([--with-alsa], [choose native ALSA API support (linux only)])])
-AC_ARG_WITH(pulse, [AS_HELP_STRING([--with-pulse], [choose PulseAudio API support (linux only)])])
+AC_ARG_WITH(pulse, [AS_HELP_STRING([--with-pulse], [choose PulseAudio API support (unixes)])])
AC_ARG_WITH(oss, [AS_HELP_STRING([--with-oss], [choose OSS API support (unixes)])])
AC_ARG_WITH(core, [AS_HELP_STRING([--with-core], [choose CoreAudio API support (mac only)])])
AC_ARG_WITH(asio, [AS_HELP_STRING([--with-asio], [choose ASIO API support (win32 only)])])
AS_IF([test "x${GXX}" = "xyes" ], [
CXXFLAGS="-Wall -Wextra ${CXXFLAGS}"
AS_IF([ test "x${enable_debug}" = "xyes" ], [
- # Add -Werror in debug mode
- CXXFLAGS="-Werror ${CXXFLAGS}"
+ # Add -Werror in debug mode (except unused-function)
+ CXXFLAGS="-Werror -Wno-error=unused-function ${CXXFLAGS}"
], [
# hide private symbols in non-debug mode
visibility="-fvisibility=hidden"
# Checks for package options and external software
AC_CANONICAL_HOST
-AC_MSG_CHECKING([for audio API])
+# Aggregate options into a single string.
+AS_IF([test "x$with_jack" = "xyes"], [systems="$systems jack"])
+AS_IF([test "x$with_alsa" = "xyes"], [systems="$systems alsa"])
+AS_IF([test "x$with_pulse" = "xyes"], [systems="$systems pulse"])
+AS_IF([test "x$with_oss" = "xyes"], [systems="$systems oss"])
+AS_IF([test "x$with_core" = "xyes"], [systems="$systems core"])
+AS_IF([test "x$with_asio" = "xyes"], [systems="$systems asio"])
+AS_IF([test "x$with_dsound" = "xyes"], [systems="$systems ds"])
+AS_IF([test "x$with_wasapi" = "xyes"], [systems="$systems wasapi"])
+required=" $systems "
+
+# If none, assign defaults if any are known for this OS.
+# User must specified with-* options for any unknown OS.
+AS_IF([test "x$systems" = "x"],
+ AS_CASE([$host],
+ [*-*-netbsd*], [systems="oss"],
+ [*-*-freebsd*], [systems="oss"],
+ [*-*-linux*], [systems="alsa pulse jack oss"],
+ [*-apple*], [systems="core jack"],
+ [*-mingw32*], [systems="asio ds wasapi jack"]
+ ))
+
+# If any were specifically requested disabled, do it.
+AS_IF([test "x$with_jack" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v jack`])
+AS_IF([test "x$with_alsa" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v alsa`])
+AS_IF([test "x$with_pulse" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v pulse`])
+AS_IF([test "x$with_oss" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v oss`])
+AS_IF([test "x$with_core" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v core`])
+AS_IF([test "x$with_asio" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v asio`])
+AS_IF([test "x$with_dsound" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v dsound`])
+AS_IF([test "x$with_wasapi" = "xno"], [systems=`echo $systems|tr ' ' \\\\n|grep -v wasapi`])
+systems=" `echo $systems|tr \\\\n ' '` "
-AS_IF([test "x$with_jack" = "xyes"], [
- AC_MSG_RESULT([using JACK])
- AC_CHECK_LIB(jack, jack_client_open, , AC_MSG_ERROR([JACK support requires the jack library!]))
- api="$api -D__UNIX_JACK__"
- req="$req jack"
+# For each audio system, check if it is selected and found.
+# Note: Order specified above is not necessarily respected. However,
+# *actual* priority is set at run-time, see RtAudio::openRtApi.
+# One AS_CASE per system, since they are not mutually-exclusive.
+
+AS_CASE(["$systems"], [*" alsa "*], [
+ AC_CHECK_LIB(asound, snd_pcm_open,
+ [api="$api -D__LINUX_ALSA__"
+ req="$req alsa"
+ need_pthread=yes
+ found="$found ALSA"
+ LIBS="-lasound $LIBS"],
+ AS_CASE(["$required"], [*" alsa "*],
+ AC_MSG_ERROR([ALSA support requires the asound library!])))
])
+AS_CASE(["$systems"], [*" pulse "*], [
+ AC_CHECK_LIB(pulse-simple, pa_simple_flush,
+ [api="$api -D__LINUX_PULSE__"
+ req="$req libpulse-simple"
+ need_pthread=yes
+ found="$found PulseAudio"
+ LIBS="-lpulse-simple $LIBS"],
+ AS_CASE(["$required"], [*" pulse "*],
+ AC_MSG_ERROR([PulseAudio support requires the pulse-simple library!])))
+])
-AS_CASE([$host],
- [*-*-netbsd*],
- AS_IF([test "x$api" = "x"], [
- AC_MSG_RESULT([using OSS])
- api="$api -D__LINUX_OSS__"
- AC_CHECK_LIB(ossaudio, main, , AC_MSG_ERROR([RtAudio requires the ossaudio library]))
- AC_CHECK_LIB(pthread, pthread_create, , AC_MSG_ERROR([RtAudio requires the pthread library!]))
- ]),
- [*-*-freebsd*],
- AS_IF([test "x$api" = "x"], [
- AC_MSG_RESULT([using OSS])
- api="$api -D__LINUX_OSS__"
- AC_CHECK_LIB(ossaudio, main, , AC_MSG_ERROR([RtAudio requires the ossaudio library]))
- AC_CHECK_LIB(pthread, pthread_create, , AC_MSG_ERROR([RtAudio requires the pthread library!]))
- ]),
- [*-*-linux*], [
- # Look for ALSA flag
- AS_IF([test "x$with_alsa" = "xyes"], [
- AC_MSG_RESULT([using ALSA])
- api="$api -D__LINUX_ALSA__"
- req="$req alsa"
- AC_CHECK_LIB(asound, snd_pcm_open, , AC_MSG_ERROR([ALSA support requires the asound library!]))
- ])
- # Look for PULSE flag
- AS_IF([test "x$with_pulse" = "xyes"], [
- AC_MSG_RESULT([using PulseAudio])
- api="$api -D__LINUX_PULSE__"
- req="$req libpulse-simple"
- AC_CHECK_LIB(pulse-simple, pa_simple_flush, , AC_MSG_ERROR([PulseAudio support requires the pulse-simple library!]))
- ])
+AS_CASE(["$systems"], [*" oss "*], [
+ # libossaudio not required on some platforms (e.g. linux) so we
+ # don't break things if it's not found, but issue a warning when we
+ # are not sure (i.e. not on linux)
+ AS_CASE([$host], [*-*-linux*], [], [*], [need_ossaudio=yes])
+ AC_CHECK_LIB(ossaudio, main, [have_ossaudio=true],
+ AS_CASE(["$required"], [*" oss "*],
+ AS_IF([test "x$need_ossaudio" = xyes],
+ AC_MSG_WARN([RtAudio may require the ossaudio library]))))
- # Look for OSS flag
- AS_IF([test "x$with_oss" = "xyes"], [
- AC_MSG_RESULT([using OSS])
- api="$api -D__LINUX_OSS__"
- ])
+ # linux systems may have soundcard.h but *not* have OSS4 installed,
+ # we have to actually check if it exports OSS4 symbols
+ AC_CHECK_DECL(SNDCTL_SYSINFO,
+ [api="$api -D__LINUX_OSS__"
+ need_pthread=yes
+ found="$found OSS"],
+ AS_CASE(["$required"], [*" oss "*],
+ AC_MSG_ERROR([sys/soundcard.h not found]))
+ [],
+ [#include <sys/soundcard.h>])
+])
- # If no audio api flags specified, use ALSA
- AS_IF([test "x$api" = "x" ], [
- AC_MSG_RESULT([using ALSA])
- api="${api} -D__LINUX_ALSA__"
- req="${req} alsa"
- AC_CHECK_LIB(asound, snd_pcm_open, , AC_MSG_ERROR([ALSA support requires the asound library!]))
- ])
- AC_CHECK_LIB(pthread, pthread_create, , AC_MSG_ERROR([RtAudio requires the pthread library!]))
- ],
- [*-apple*],[
- # Look for Core flag
- AS_IF([test "x$with_core" = "xyes"], [
- AC_MSG_RESULT([using CoreAudio])
- api="$api -D__MACOSX_CORE__"
- AC_CHECK_HEADER(CoreAudio/CoreAudio.h, [], [AC_MSG_ERROR([CoreAudio header files not found!])] )
- LIBS="$LIBS -framework CoreAudio -framework CoreFoundation"
- ])
- # If no audio api flags specified, use CoreAudio
- AS_IF([test "x$api" = "x" ], [
- AC_MSG_RESULT([using CoreAudio])
- api="${api} -D__MACOSX_CORE__"
- AC_CHECK_HEADER(CoreAudio/CoreAudio.h,
- [],
- [AC_MSG_ERROR([CoreAudio header files not found!])] )
- LIBS="LIBS -framework CoreAudio -framework CoreFoundation"
- ])
- AC_CHECK_LIB(pthread, pthread_create, , AC_MSG_ERROR([RtAudio requires the pthread library!]))
- ],
- [*-mingw32*],[
- AS_IF([test "x$with_asio" = "xyes" ], [
- AC_MSG_RESULT([using ASIO])
- api="$api -D__WINDOWS_ASIO__"
- use_asio=yes
- CPPFLAGS="-I$srcdir/include $CPPFLAGS"
- ])
- # Look for DirectSound flag
- AS_IF([test "x$with_ds" = "xyes" ], [
- AC_MSG_RESULT([using DirectSound])
- api="$api -D__WINDOWS_DS__"
- LIBS="-ldsound -lwinmm $LIBS"
- ])
- # Look for WASAPI flag
- AS_IF([test "x$with_wasapi" = "xyes"], [
- AC_MSG_RESULT([using WASAPI])
- api="$api -D__WINDOWS_WASAPI__"
- LIBS="-lwinmm -luuid -lksuser $LIBS"
- CPPFLAGS="-I$srcdir/include $CPPFLAGS"
- ])
- # If no audio api flags specified, use DS
- AS_IF([test "x$api" = "x" ], [
- AC_MSG_RESULT([using DirectSound])
- api="$api -D__WINDOWS_DS__"
- LIBS="-ldsound -lwinmm $LIBS"
- ])
- LIBS="-lole32 $LIBS"
- ],[
+AS_CASE(["$systems"], [*" jack "*], [
+ AC_CHECK_LIB(jack, jack_client_open,
+ [api="$api -D__UNIX_JACK__"
+ req="$req jack"
+ need_pthread=yes
+ found="$found JACK"
+ LIBS="-ljack $LIBS"],
+ AS_CASE(["$required"], [*" jack "*],
+ AC_MSG_ERROR([JACK support requires the jack library!])))
+])
+
+AS_CASE(["$systems"], [*" core "*], [
+ AC_CHECK_HEADER(CoreAudio/CoreAudio.h,
+ [api="$api -D__MACOSX_CORE__"
+ need_pthread=yes
+ found="$found CoreAudio",
+ LIBS="$LIBS -framework CoreAudio -framework CoreFoundation"],
+ AS_CASE(["$required"], [*" core "*],
+ AC_MSG_ERROR([CoreAudio header files not found!])))
+])
+
+AS_CASE(["$systems"], [*" asio "*], [
+ api="$api -D__WINDOWS_ASIO__"
+ use_asio=yes
+ CPPFLAGS="-I$srcdir/include $CPPFLAGS"
+ need_ole32=yes
+ found="$found ASIO"
+])
+
+AS_CASE(["$systems"], [*" ds "*], [
+ AC_CHECK_HEADERS(mmsystem.h mmreg.h dsound.h,
+ [api="$api -D__WINDOWS_DS__"
+ need_ole32=yes
+ found="$found DirectSound"
+ LIBS="-ldsound -lwinmm $LIBS"])
+])
+
+AS_CASE(["$systems"], [*" wasapi "*], [
+ AC_CHECK_HEADERS(windows.h audioclient.h avrt.h mmdeviceapi.h,
+ [api="$api -D__WINDOWS_WASAPI__"
+ CPPFLAGS="-I$srcdir/include $CPPFLAGS"
+ need_ole32=yes
+ found="$found WASAPI"
+ LIBS="-lwinmm -lksuser -lmfplat -lmfuuid -lwmcodecdspuuid $LIBS"])
+])
+
+AS_IF([test -n "$need_ole32"], [LIBS="-lole32 $LIBS"])
+
+AS_IF([test -n "$need_pthread"],[
+ AC_MSG_CHECKING([for pthread])
+ AC_CHECK_LIB(pthread, pthread_create, ,
+ AC_MSG_ERROR([RtAudio requires the pthread library!]))])
+
+AC_MSG_CHECKING([for audio API])
+
+# Error case: no known realtime systems found.
+AS_IF([test x"$api" = "x"], [
AC_MSG_RESULT([none])
- # Default case for unknown realtime systems.
- AC_MSG_ERROR([Unknown system type for realtime support!])
- ]
-)
+ AC_MSG_ERROR([No known system type found for realtime support!])
+], [
+ AC_MSG_RESULT([$found])
+])
AM_CONDITIONAL( ASIO, [test "x${use_asio}" = "xyes" ])