-/************************************************************************/\r
+/************************************************************************/\r
/*! \class RtAudio\r
\brief Realtime audio i/o C++ classes.\r
\r
RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/\r
\r
RtAudio: realtime audio i/o C++ classes\r
- Copyright (c) 2001-2014 Gary P. Scavone\r
+ Copyright (c) 2001-2016 Gary P. Scavone\r
\r
Permission is hereby granted, free of charge, to any person\r
obtaining a copy of this software and associated documentation files\r
*/\r
/************************************************************************/\r
\r
-// RtAudio: Version 4.1.1pre\r
+// RtAudio: Version 4.1.2\r
\r
#include "RtAudio.h"\r
#include <iostream>\r
#include <cstdlib>\r
#include <cstring>\r
#include <climits>\r
+#include <algorithm>\r
\r
// Static variable definitions.\r
const unsigned int RtApi::MAX_SAMPLE_RATES = 14;\r
#define MUTEX_DESTROY(A) DeleteCriticalSection(A)\r
#define MUTEX_LOCK(A) EnterCriticalSection(A)\r
#define MUTEX_UNLOCK(A) LeaveCriticalSection(A)\r
+\r
+ #include "tchar.h"\r
+\r
+ static std::string convertCharPointerToStdString(const char *text)\r
+ {\r
+ return std::string(text);\r
+ }\r
+\r
+ static std::string convertCharPointerToStdString(const wchar_t *text)\r
+ {\r
+ int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);\r
+ std::string s( length-1, '\0' );\r
+ WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);\r
+ return s;\r
+ }\r
+\r
#elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)\r
// pthread API\r
#define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)\r
getCompiledApi( apis );\r
for ( unsigned int i=0; i<apis.size(); i++ ) {\r
openRtApi( apis[i] );\r
- if ( rtapi_->getDeviceCount() ) break;\r
+ if ( rtapi_ && rtapi_->getDeviceCount() ) break;\r
}\r
\r
if ( rtapi_ ) return;\r
#endif\r
}\r
\r
+void RtApi :: setStreamTime( double time )\r
+{\r
+ verifyStream();\r
+\r
+ if ( time >= 0.0 )\r
+ stream_.streamTime = time;\r
+}\r
+\r
unsigned int RtApi :: getStreamSampleRate( void )\r
{\r
verifyStream();\r
bool haveValueRange = false;\r
info.sampleRates.clear();\r
for ( UInt32 i=0; i<nRanges; i++ ) {\r
- if ( rangeList[i].mMinimum == rangeList[i].mMaximum )\r
- info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );\r
- else {\r
+ if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {\r
+ unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;\r
+ info.sampleRates.push_back( tmpSr );\r
+\r
+ if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = tmpSr;\r
+\r
+ } else {\r
haveValueRange = true;\r
if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;\r
if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;\r
\r
if ( haveValueRange ) {\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
- if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )\r
+ if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+ }\r
}\r
}\r
\r
\r
CoreHandle *handle = (CoreHandle *) stream_.apiHandle;\r
if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {\r
+ if (handle) {\r
+ AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,\r
+ kAudioObjectPropertyScopeGlobal,\r
+ kAudioObjectPropertyElementMaster };\r
+\r
+ property.mSelector = kAudioDeviceProcessorOverload;\r
+ property.mScope = kAudioObjectPropertyScopeGlobal;\r
+ if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {\r
+ errorText_ = "RtApiCore::closeStream(): error removing property listener!";\r
+ error( RtAudioError::WARNING );\r
+ }\r
+ }\r
if ( stream_.state == STREAM_RUNNING )\r
AudioDeviceStop( handle->id[0], callbackHandler );\r
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )\r
}\r
\r
if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {\r
+ if (handle) {\r
+ AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,\r
+ kAudioObjectPropertyScopeGlobal,\r
+ kAudioObjectPropertyElementMaster };\r
+\r
+ property.mSelector = kAudioDeviceProcessorOverload;\r
+ property.mScope = kAudioObjectPropertyScopeGlobal;\r
+ if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {\r
+ errorText_ = "RtApiCore::closeStream(): error removing property listener!";\r
+ error( RtAudioError::WARNING );\r
+ }\r
+ }\r
if ( stream_.state == STREAM_RUNNING )\r
AudioDeviceStop( handle->id[1], callbackHandler );\r
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )\r
}\r
}\r
}\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
AudioDeviceID inputDevice;\r
\r
// Get the current jack server sample rate.\r
info.sampleRates.clear();\r
- info.sampleRates.push_back( jack_get_sample_rate( client ) );\r
+\r
+ info.preferredSampleRate = jack_get_sample_rate( client );\r
+ info.sampleRates.push_back( info.preferredSampleRate );\r
\r
// Count the available ports containing the client name as device\r
// channels. Jack "input ports" equal RtAudio output channels.\r
memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );\r
}\r
}\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
info.sampleRates.clear();\r
for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {\r
result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );\r
- if ( result == ASE_OK )\r
+ if ( result == ASE_OK ) {\r
info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[i];\r
+ }\r
}\r
\r
// Determine supported data types ... just check first channel and assume rest are the same.\r
unsigned int firstChannel, unsigned int sampleRate,\r
RtAudioFormat format, unsigned int *bufferSize,\r
RtAudio::StreamOptions *options )\r
-{\r
+{////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r
+\r
+ bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;\r
+\r
// For ASIO, a duplex stream MUST use the same driver.\r
- if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {\r
+ if ( isDuplexInput && stream_.device[0] != device ) {\r
errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";\r
return FAILURE;\r
}\r
}\r
\r
// Only load the driver once for duplex stream.\r
- if ( mode != INPUT || stream_.mode != OUTPUT ) {\r
+ if ( !isDuplexInput ) {\r
// The getDeviceInfo() function will not work when a stream is open\r
// because ASIO does not allow multiple devices to run at the same\r
// time. Thus, we'll probe the system before opening a stream and\r
}\r
}\r
\r
+ // keep them before any "goto error", they are used for error cleanup + goto device boundary checks\r
+ bool buffersAllocated = false;\r
+ AsioHandle *handle = (AsioHandle *) stream_.apiHandle;\r
+ unsigned int nChannels;\r
+\r
+\r
// Check the device channel count.\r
long inputChannels, outputChannels;\r
result = ASIOGetChannels( &inputChannels, &outputChannels );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||\r
( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
stream_.nDeviceChannels[mode] = channels;\r
stream_.nUserChannels[mode] = channels;\r
// Verify the sample rate is supported.\r
result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Get the current sample rate\r
ASIOSampleRate currentRate;\r
result = ASIOGetSampleRate( ¤tRate );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Set the sample rate only if necessary\r
if ( currentRate != sampleRate ) {\r
result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
}\r
\r
else channelInfo.isInput = true;\r
result = ASIOGetChannelInfo( &channelInfo );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Assuming WINDOWS host is always little-endian.\r
}\r
\r
if ( stream_.deviceFormat[mode] == 0 ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
// Set the buffer size. For a duplex stream, this will end up\r
long minSize, maxSize, preferSize, granularity;\r
result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );\r
if ( result != ASE_OK ) {\r
- drivers.removeCurrentDriver();\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";\r
errorText_ = errorStream_.str();\r
- return FAILURE;\r
+ goto error;\r
}\r
\r
- if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
- else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
- else if ( granularity == -1 ) {\r
- // Make sure bufferSize is a power of two.\r
- int log2_of_min_size = 0;\r
- int log2_of_max_size = 0;\r
+ if ( isDuplexInput ) {\r
+ // When this is the duplex input (output was opened before), then we have to use the same\r
+ // buffersize as the output, because it might use the preferred buffer size, which most\r
+ // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,\r
+ // So instead of throwing an error, make them equal. The caller uses the reference\r
+ // to the "bufferSize" param as usual to set up processing buffers.\r
\r
- for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {\r
- if ( minSize & ((long)1 << i) ) log2_of_min_size = i;\r
- if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;\r
- }\r
+ *bufferSize = stream_.bufferSize;\r
\r
- long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );\r
- int min_delta_num = log2_of_min_size;\r
+ } else {\r
+ if ( *bufferSize == 0 ) *bufferSize = preferSize;\r
+ else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
+ else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
+ else if ( granularity == -1 ) {\r
+ // Make sure bufferSize is a power of two.\r
+ int log2_of_min_size = 0;\r
+ int log2_of_max_size = 0;\r
\r
- for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {\r
- long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );\r
- if (current_delta < min_delta) {\r
- min_delta = current_delta;\r
- min_delta_num = i;\r
+ for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {\r
+ if ( minSize & ((long)1 << i) ) log2_of_min_size = i;\r
+ if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;\r
}\r
- }\r
\r
- *bufferSize = ( (unsigned int)1 << min_delta_num );\r
- if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
- else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
- }\r
- else if ( granularity != 0 ) {\r
- // Set to an even multiple of granularity, rounding up.\r
- *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;\r
+ long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );\r
+ int min_delta_num = log2_of_min_size;\r
+\r
+ for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {\r
+ long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );\r
+ if (current_delta < min_delta) {\r
+ min_delta = current_delta;\r
+ min_delta_num = i;\r
+ }\r
+ }\r
+\r
+ *bufferSize = ( (unsigned int)1 << min_delta_num );\r
+ if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;\r
+ else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;\r
+ }\r
+ else if ( granularity != 0 ) {\r
+ // Set to an even multiple of granularity, rounding up.\r
+ *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;\r
+ }\r
}\r
\r
- if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {\r
- drivers.removeCurrentDriver();\r
+ /*\r
+ // we don't use it anymore, see above!\r
+ // Just left it here for the case...\r
+ if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {\r
errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";\r
- return FAILURE;\r
+ goto error;\r
}\r
+ */\r
\r
stream_.bufferSize = *bufferSize;\r
stream_.nBuffers = 2;\r
stream_.deviceInterleaved[mode] = false;\r
\r
// Allocate, if necessary, our AsioHandle structure for the stream.\r
- AsioHandle *handle = (AsioHandle *) stream_.apiHandle;\r
if ( handle == 0 ) {\r
try {\r
handle = new AsioHandle;\r
}\r
catch ( std::bad_alloc& ) {\r
- //if ( handle == NULL ) { \r
- drivers.removeCurrentDriver();\r
errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";\r
- return FAILURE;\r
+ goto error;\r
}\r
handle->bufferInfos = 0;\r
\r
// Create the ASIO internal buffers. Since RtAudio sets up input\r
// and output separately, we'll have to dispose of previously\r
// created output buffers for a duplex stream.\r
- long inputLatency, outputLatency;\r
if ( mode == INPUT && stream_.mode == OUTPUT ) {\r
ASIODisposeBuffers();\r
if ( handle->bufferInfos ) free( handle->bufferInfos );\r
}\r
\r
// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.\r
- bool buffersAllocated = false;\r
- unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];\r
+ unsigned int i;\r
+ nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];\r
handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );\r
if ( handle->bufferInfos == NULL ) {\r
errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";\r
infos->buffers[0] = infos->buffers[1] = 0;\r
}\r
\r
+ // prepare for callbacks\r
+ stream_.sampleRate = sampleRate;\r
+ stream_.device[mode] = device;\r
+ stream_.mode = isDuplexInput ? DUPLEX : mode;\r
+\r
+ // store this class instance before registering callbacks, that are going to use it\r
+ asioCallbackInfo = &stream_.callbackInfo;\r
+ stream_.callbackInfo.object = (void *) this;\r
+\r
// Set up the ASIO callback structure and create the ASIO data buffers.\r
asioCallbacks.bufferSwitch = &bufferSwitch;\r
asioCallbacks.sampleRateDidChange = &sampleRateChanged;\r
asioCallbacks.asioMessage = &asioMessages;\r
asioCallbacks.bufferSwitchTimeInfo = NULL;\r
result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );\r
+ if ( result != ASE_OK ) {\r
+ // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges\r
+ // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver\r
+ // in that case, let's be naïve and try that instead\r
+ *bufferSize = preferSize;\r
+ stream_.bufferSize = *bufferSize;\r
+ result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );\r
+ }\r
+\r
if ( result != ASE_OK ) {\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";\r
errorText_ = errorStream_.str();\r
goto error;\r
}\r
- buffersAllocated = true;\r
+ buffersAllocated = true; \r
+ stream_.state = STREAM_STOPPED;\r
\r
// Set flags for buffer conversion.\r
stream_.doConvertBuffer[mode] = false;\r
\r
bool makeBuffer = true;\r
bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );\r
- if ( mode == INPUT ) {\r
- if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {\r
- unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );\r
- if ( bufferBytes <= bytesOut ) makeBuffer = false;\r
- }\r
+ if ( isDuplexInput && stream_.deviceBuffer ) {\r
+ unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );\r
+ if ( bufferBytes <= bytesOut ) makeBuffer = false;\r
}\r
\r
if ( makeBuffer ) {\r
}\r
}\r
\r
- stream_.sampleRate = sampleRate;\r
- stream_.device[mode] = device;\r
- stream_.state = STREAM_STOPPED;\r
- asioCallbackInfo = &stream_.callbackInfo;\r
- stream_.callbackInfo.object = (void *) this;\r
- if ( stream_.mode == OUTPUT && mode == INPUT )\r
- // We had already set up an output stream.\r
- stream_.mode = DUPLEX;\r
- else\r
- stream_.mode = mode;\r
-\r
// Determine device latencies\r
+ long inputLatency, outputLatency;\r
result = ASIOGetLatencies( &inputLatency, &outputLatency );\r
if ( result != ASE_OK ) {\r
errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";\r
return SUCCESS;\r
\r
error:\r
- if ( buffersAllocated )\r
- ASIODisposeBuffers();\r
- drivers.removeCurrentDriver();\r
+ if ( !isDuplexInput ) {\r
+ // the cleanup for error in the duplex input, is done by RtApi::openStream\r
+ // So we clean up for single channel only\r
\r
- if ( handle ) {\r
- CloseHandle( handle->condition );\r
- if ( handle->bufferInfos )\r
- free( handle->bufferInfos );\r
- delete handle;\r
- stream_.apiHandle = 0;\r
- }\r
+ if ( buffersAllocated )\r
+ ASIODisposeBuffers();\r
\r
- for ( int i=0; i<2; i++ ) {\r
- if ( stream_.userBuffer[i] ) {\r
- free( stream_.userBuffer[i] );\r
- stream_.userBuffer[i] = 0;\r
+ drivers.removeCurrentDriver();\r
+\r
+ if ( handle ) {\r
+ CloseHandle( handle->condition );\r
+ if ( handle->bufferInfos )\r
+ free( handle->bufferInfos );\r
+\r
+ delete handle;\r
+ stream_.apiHandle = 0;\r
}\r
- }\r
\r
- if ( stream_.deviceBuffer ) {\r
- free( stream_.deviceBuffer );\r
- stream_.deviceBuffer = 0;\r
+\r
+ if ( stream_.userBuffer[mode] ) {\r
+ free( stream_.userBuffer[mode] );\r
+ stream_.userBuffer[mode] = 0;\r
+ }\r
+\r
+ if ( stream_.deviceBuffer ) {\r
+ free( stream_.deviceBuffer );\r
+ stream_.deviceBuffer = 0;\r
+ }\r
}\r
\r
return FAILURE;\r
-}\r
+}////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r
\r
void RtApiAsio :: closeStream()\r
{\r
}\r
\r
}\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
\r
#if defined(__WINDOWS_WASAPI__) // Windows WASAPI API\r
\r
+// Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014\r
+// - Introduces support for the Windows WASAPI API\r
+// - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required\r
+// - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface\r
+// - Includes automatic internal conversion of sample rate and buffer size between hardware and the user\r
+\r
#ifndef INITGUID\r
#define INITGUID\r
#endif\r
outIndex_( 0 ) {}\r
\r
~WasapiBuffer() {\r
- delete buffer_;\r
+ free( buffer_ );\r
}\r
\r
// sets the length of the internal ring buffer\r
void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {\r
- delete buffer_;\r
+ free( buffer_ );\r
\r
buffer_ = ( char* ) calloc( bufferSize, formatBytes );\r
\r
\r
//-----------------------------------------------------------------------------\r
\r
-// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and\r
-// channel counts between HW and the user. The convertBufferWasapi function is used to perform\r
-// these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop.\r
-// This sample rate converter favors speed over quality, and works best with conversions between\r
-// one rate and its multiple. RtApiWasapi will not populate a device's sample rate list with rates\r
-// that may cause artifacts via this conversion.\r
+// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate\r
+// between HW and the user. The convertBufferWasapi function is used to perform this conversion\r
+// between HwIn->UserIn and UserOut->HwOut during the stream callback loop.\r
+// This sample rate converter works best with conversions between one rate and its multiple.\r
void convertBufferWasapi( char* outBuffer,\r
const char* inBuffer,\r
- const unsigned int& inChannelCount,\r
- const unsigned int& outChannelCount,\r
+ const unsigned int& channelCount,\r
const unsigned int& inSampleRate,\r
const unsigned int& outSampleRate,\r
const unsigned int& inSampleCount,\r
{\r
// calculate the new outSampleCount and relative sampleStep\r
float sampleRatio = ( float ) outSampleRate / inSampleRate;\r
+ float sampleRatioInv = ( float ) 1 / sampleRatio;\r
float sampleStep = 1.0f / sampleRatio;\r
float inSampleFraction = 0.0f;\r
- unsigned int commonChannelCount = std::min( inChannelCount, outChannelCount );\r
\r
- outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio );\r
+ outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );\r
\r
- // frame-by-frame, copy each relative input sample into it's corresponding output sample\r
- for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )\r
+ // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate\r
+ if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )\r
{\r
- unsigned int inSample = ( unsigned int ) inSampleFraction;\r
-\r
- switch ( format )\r
+ // frame-by-frame, copy each relative input sample into it's corresponding output sample\r
+ for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )\r
{\r
- case RTAUDIO_SINT8:\r
- memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) );\r
- break;\r
- case RTAUDIO_SINT16:\r
- memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) );\r
- break;\r
- case RTAUDIO_SINT24:\r
- memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) );\r
- break;\r
- case RTAUDIO_SINT32:\r
- memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) );\r
- break;\r
- case RTAUDIO_FLOAT32:\r
- memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) );\r
- break;\r
- case RTAUDIO_FLOAT64:\r
- memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) );\r
- break;\r
+ unsigned int inSample = ( unsigned int ) inSampleFraction;\r
+\r
+ switch ( format )\r
+ {\r
+ case RTAUDIO_SINT8:\r
+ memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );\r
+ break;\r
+ case RTAUDIO_SINT16:\r
+ memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );\r
+ break;\r
+ case RTAUDIO_SINT24:\r
+ memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );\r
+ break;\r
+ case RTAUDIO_SINT32:\r
+ memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );\r
+ break;\r
+ case RTAUDIO_FLOAT32:\r
+ memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );\r
+ break;\r
+ case RTAUDIO_FLOAT64:\r
+ memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );\r
+ break;\r
+ }\r
+\r
+ // jump to next in sample\r
+ inSampleFraction += sampleStep;\r
}\r
+ }\r
+ else // else interpolate\r
+ {\r
+ // frame-by-frame, copy each relative input sample into it's corresponding output sample\r
+ for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )\r
+ {\r
+ unsigned int inSample = ( unsigned int ) inSampleFraction;\r
\r
- // jump to next in sample\r
- inSampleFraction += sampleStep;\r
+ switch ( format )\r
+ {\r
+ case RTAUDIO_SINT8:\r
+ {\r
+ for ( unsigned int channel = 0; channel < channelCount; channel++ )\r
+ {\r
+ char fromSample = ( ( char* ) inBuffer )[ ( inSample * channelCount ) + channel ];\r
+ char toSample = ( ( char* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];\r
+ float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - floor( inSampleFraction ) );\r
+ ( ( char* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( char ) sampleDiff;\r
+ }\r
+ break;\r
+ }\r
+ case RTAUDIO_SINT16:\r
+ {\r
+ for ( unsigned int channel = 0; channel < channelCount; channel++ )\r
+ {\r
+ short fromSample = ( ( short* ) inBuffer )[ ( inSample * channelCount ) + channel ];\r
+ short toSample = ( ( short* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];\r
+ float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - floor( inSampleFraction ) );\r
+ ( ( short* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( short ) sampleDiff;\r
+ }\r
+ break;\r
+ }\r
+ case RTAUDIO_SINT24:\r
+ {\r
+ for ( unsigned int channel = 0; channel < channelCount; channel++ )\r
+ {\r
+ int fromSample = ( ( S24* ) inBuffer )[ ( inSample * channelCount ) + channel ].asInt();\r
+ int toSample = ( ( S24* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ].asInt();\r
+ float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - floor( inSampleFraction ) );\r
+ ( ( S24* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( int ) sampleDiff;\r
+ }\r
+ break;\r
+ }\r
+ case RTAUDIO_SINT32:\r
+ {\r
+ for ( unsigned int channel = 0; channel < channelCount; channel++ )\r
+ {\r
+ int fromSample = ( ( int* ) inBuffer )[ ( inSample * channelCount ) + channel ];\r
+ int toSample = ( ( int* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];\r
+ float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - floor( inSampleFraction ) );\r
+ ( ( int* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + ( int ) sampleDiff;\r
+ }\r
+ break;\r
+ }\r
+ case RTAUDIO_FLOAT32:\r
+ {\r
+ for ( unsigned int channel = 0; channel < channelCount; channel++ )\r
+ {\r
+ float fromSample = ( ( float* ) inBuffer )[ ( inSample * channelCount ) + channel ];\r
+ float toSample = ( ( float* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];\r
+ float sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - floor( inSampleFraction ) );\r
+ ( ( float* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + sampleDiff;\r
+ }\r
+ break;\r
+ }\r
+ case RTAUDIO_FLOAT64:\r
+ {\r
+ for ( unsigned int channel = 0; channel < channelCount; channel++ )\r
+ {\r
+ double fromSample = ( ( double* ) inBuffer )[ ( inSample * channelCount ) + channel ];\r
+ double toSample = ( ( double* ) inBuffer )[ ( ( inSample + 1 ) * channelCount ) + channel ];\r
+ double sampleDiff = ( toSample - fromSample ) * ( inSampleFraction - floor( inSampleFraction ) );\r
+ ( ( double* ) outBuffer )[ ( outSample * channelCount ) + channel ] = fromSample + sampleDiff;\r
+ }\r
+ break;\r
+ }\r
+ }\r
+\r
+ // jump to next in sample\r
+ inSampleFraction += sampleStep;\r
+ }\r
}\r
}\r
\r
{\r
// WASAPI can run either apartment or multi-threaded\r
HRESULT hr = CoInitialize( NULL );\r
-\r
if ( !FAILED( hr ) )\r
coInitialized_ = true;\r
\r
\r
RtApiWasapi::~RtApiWasapi()\r
{\r
- // if this object previously called CoInitialize()\r
- if ( coInitialized_ ) {\r
- CoUninitialize();\r
- }\r
-\r
- if ( stream_.state != STREAM_CLOSED ) {\r
+ if ( stream_.state != STREAM_CLOSED )\r
closeStream();\r
- }\r
\r
SAFE_RELEASE( deviceEnumerator_ );\r
+\r
+ // If this object previously called CoInitialize()\r
+ if ( coInitialized_ )\r
+ CoUninitialize();\r
}\r
\r
//=============================================================================\r
RtAudio::DeviceInfo info;\r
unsigned int captureDeviceCount = 0;\r
unsigned int renderDeviceCount = 0;\r
- std::wstring deviceName;\r
std::string defaultDeviceName;\r
bool isCaptureDevice = false;\r
\r
goto Exit;\r
}\r
\r
- deviceName = defaultDeviceNameProp.pwszVal;\r
- defaultDeviceName = std::string( deviceName.begin(), deviceName.end() );\r
+ defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);\r
\r
// name\r
hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );\r
goto Exit;\r
}\r
\r
- deviceName = deviceNameProp.pwszVal;\r
- info.name = std::string( deviceName.begin(), deviceName.end() );\r
+ info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);\r
\r
// is default\r
if ( isCaptureDevice ) {\r
// sample rates\r
info.sampleRates.clear();\r
\r
- // allow support for sample rates that are multiples of the base rate\r
+ // allow support for all sample rates as we have a built-in sample rate converter\r
for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {\r
- if ( SAMPLE_RATES[i] < deviceFormat->nSamplesPerSec ) {\r
- if ( deviceFormat->nSamplesPerSec % SAMPLE_RATES[i] == 0 ) {\r
- info.sampleRates.push_back( SAMPLE_RATES[i] );\r
- }\r
- }\r
- else {\r
- if ( SAMPLE_RATES[i] % deviceFormat->nSamplesPerSec == 0 ) {\r
- info.sampleRates.push_back( SAMPLE_RATES[i] );\r
- }\r
- }\r
+ info.sampleRates.push_back( SAMPLE_RATES[i] );\r
}\r
+ info.preferredSampleRate = deviceFormat->nSamplesPerSec;\r
\r
// native format\r
info.nativeFormats = 0;\r
\r
// Set flags for buffer conversion.\r
stream_.doConvertBuffer[mode] = false;\r
- if ( stream_.userFormat != stream_.deviceFormat[mode] )\r
+ if ( stream_.userFormat != stream_.deviceFormat[mode] ||\r
+ stream_.nUserChannels != stream_.nDeviceChannels )\r
stream_.doConvertBuffer[mode] = true;\r
- if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&\r
- stream_.nUserChannels[mode] > 1 )\r
+ else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&\r
+ stream_.nUserChannels[mode] > 1 )\r
stream_.doConvertBuffer[mode] = true;\r
\r
if ( stream_.doConvertBuffer[mode] )\r
\r
// convBuffer is used to store converted buffers between WASAPI and the user\r
char* convBuffer = NULL;\r
- unsigned int deviceBufferSize = 0;\r
+ unsigned int convBuffSize = 0;\r
+ unsigned int deviceBuffSize = 0;\r
\r
errorText_.clear();\r
RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;\r
\r
// Attempt to assign "Pro Audio" characteristic to thread\r
- HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );\r
+ HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );\r
if ( AvrtDll ) {\r
DWORD taskIndex = 0;\r
TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );\r
}\r
\r
if ( stream_.mode == INPUT ) {\r
- deviceBufferSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );\r
+ convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );\r
+ deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );\r
}\r
else if ( stream_.mode == OUTPUT ) {\r
- deviceBufferSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );\r
+ convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );\r
+ deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );\r
}\r
else if ( stream_.mode == DUPLEX ) {\r
- deviceBufferSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),\r
- ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
+ convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),\r
+ ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
+ deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),\r
+ stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
}\r
\r
- convBuffer = ( char* ) malloc( deviceBufferSize );\r
- stream_.deviceBuffer = ( char* ) malloc( deviceBufferSize );\r
+ convBuffer = ( char* ) malloc( convBuffSize );\r
+ stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );\r
if ( !convBuffer || !stream_.deviceBuffer ) {\r
errorType = RtAudioError::MEMORY_ERROR;\r
errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";\r
stream_.deviceFormat[INPUT] );\r
\r
if ( callbackPulled ) {\r
- // Convert callback buffer to user sample rate and channel count\r
+ // Convert callback buffer to user sample rate\r
convertBufferWasapi( stream_.deviceBuffer,\r
convBuffer,\r
stream_.nDeviceChannels[INPUT],\r
- stream_.nUserChannels[INPUT],\r
captureFormat->nSamplesPerSec,\r
stream_.sampleRate,\r
( unsigned int ) ( stream_.bufferSize * captureSrRatio ),\r
stream_.convertInfo[INPUT] );\r
}\r
else {\r
- // no conversion, simple copy deviceBuffer to userBuffer\r
+ // no further conversion, simple copy deviceBuffer to userBuffer\r
memcpy( stream_.userBuffer[INPUT],\r
stream_.deviceBuffer,\r
stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );\r
stream_.userBuffer[OUTPUT],\r
stream_.convertInfo[OUTPUT] );\r
\r
- // Convert callback buffer to stream sample rate and channel count\r
- convertBufferWasapi( convBuffer,\r
- stream_.deviceBuffer,\r
- stream_.nUserChannels[OUTPUT],\r
- stream_.nDeviceChannels[OUTPUT],\r
- stream_.sampleRate,\r
- renderFormat->nSamplesPerSec,\r
- stream_.bufferSize,\r
- convBufferSize,\r
- stream_.deviceFormat[OUTPUT] );\r
- }\r
- else {\r
- // Convert callback buffer to stream sample rate and channel count\r
- convertBufferWasapi( convBuffer,\r
- stream_.userBuffer[OUTPUT],\r
- stream_.nUserChannels[OUTPUT],\r
- stream_.nDeviceChannels[OUTPUT],\r
- stream_.sampleRate,\r
- renderFormat->nSamplesPerSec,\r
- stream_.bufferSize,\r
- convBufferSize,\r
- stream_.deviceFormat[OUTPUT] );\r
}\r
\r
+ // Convert callback buffer to stream sample rate\r
+ convertBufferWasapi( convBuffer,\r
+ stream_.deviceBuffer,\r
+ stream_.nDeviceChannels[OUTPUT],\r
+ stream_.sampleRate,\r
+ renderFormat->nSamplesPerSec,\r
+ stream_.bufferSize,\r
+ convBufferSize,\r
+ stream_.deviceFormat[OUTPUT] );\r
+\r
// Push callback buffer into outputBuffer\r
callbackPushed = renderBuffer.pushBuffer( convBuffer,\r
convBufferSize * stream_.nDeviceChannels[OUTPUT],\r
stream_.deviceFormat[OUTPUT] );\r
}\r
+ else {\r
+ // if there is no render stream, set callbackPushed flag\r
+ callbackPushed = true;\r
+ }\r
\r
// Stream Capture\r
// ==============\r
if ( bufferFrameCount != 0 ) {\r
// Push capture buffer into inputBuffer\r
if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,\r
- bufferFrameCount * stream_.nDeviceChannels[INPUT],\r
- stream_.deviceFormat[INPUT] ) )\r
+ bufferFrameCount * stream_.nDeviceChannels[INPUT],\r
+ stream_.deviceFormat[INPUT] ) )\r
{\r
// Release capture buffer\r
hr = captureClient->ReleaseBuffer( bufferFrameCount );\r
// Pull next buffer from outputBuffer\r
// Fill render buffer with next buffer\r
if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,\r
- bufferFrameCount * stream_.nDeviceChannels[OUTPUT],\r
- stream_.deviceFormat[OUTPUT] ) )\r
+ bufferFrameCount * stream_.nDeviceChannels[OUTPUT],\r
+ stream_.deviceFormat[OUTPUT] ) )\r
{\r
// Release render buffer\r
hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );\r
// if the callback buffer was pushed renderBuffer reset callbackPulled flag\r
if ( callbackPushed ) {\r
callbackPulled = false;\r
+ // tick stream time\r
+ RtApi::tickStreamTime();\r
}\r
\r
- // tick stream time\r
- RtApi::tickStreamTime();\r
}\r
\r
Exit:\r
CoTaskMemFree( captureFormat );\r
CoTaskMemFree( renderFormat );\r
\r
- //delete convBuffer;\r
free ( convBuffer );\r
\r
CoUninitialize();\r
error( RtAudioError::WARNING );\r
}\r
\r
- // Clean out any devices that may have disappeared.\r
- std::vector< int > indices;\r
- for ( unsigned int i=0; i<dsDevices.size(); i++ )\r
- if ( dsDevices[i].found == false ) indices.push_back( i );\r
- //unsigned int nErased = 0;\r
- for ( unsigned int i=0; i<indices.size(); i++ )\r
- dsDevices.erase( dsDevices.begin()+indices[i] );\r
- //dsDevices.erase( dsDevices.begin()-nErased++ );\r
+ // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).\r
+ for ( unsigned int i=0; i<dsDevices.size(); ) {\r
+ if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );\r
+ else i++;\r
+ }\r
\r
return static_cast<unsigned int>(dsDevices.size());\r
}\r
info.sampleRates.clear();\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&\r
- SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )\r
+ SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+ }\r
}\r
\r
// Get format information.\r
\r
stream_.state = STREAM_STOPPED;\r
\r
+ MUTEX_LOCK( &stream_.mutex );\r
+\r
// Stop the buffer and clear memory\r
LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];\r
result = buffer->Stop();\r
\r
stream_.state = STREAM_STOPPED;\r
\r
+ if ( stream_.mode != DUPLEX )\r
+ MUTEX_LOCK( &stream_.mutex );\r
+\r
result = buffer->Stop();\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";\r
\r
unlock:\r
timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
+\r
if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );\r
}\r
\r
char *buffer;\r
long bufferBytes;\r
\r
+ MUTEX_LOCK( &stream_.mutex );\r
+ if ( stream_.state == STREAM_STOPPED ) {\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
+ return;\r
+ }\r
+\r
if ( buffersRolling == false ) {\r
if ( stream_.mode == DUPLEX ) {\r
//assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;\r
handle->bufferPointer[0] = nextWritePointer;\r
+ }\r
\r
- if ( handle->drainCounter ) {\r
- handle->drainCounter++;\r
- goto unlock;\r
- }\r
+ // Don't bother draining input\r
+ if ( handle->drainCounter ) {\r
+ handle->drainCounter++;\r
+ goto unlock;\r
}\r
\r
if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
if ( FAILED( result ) ) {\r
errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";\r
errorText_ = errorStream_.str();\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
error( RtAudioError::SYSTEM_ERROR );\r
return;\r
}\r
}\r
\r
unlock:\r
+ MUTEX_UNLOCK( &stream_.mutex );\r
RtApi::tickStreamTime();\r
}\r
\r
return 0;\r
}\r
\r
-#include "tchar.h"\r
-\r
-static std::string convertTChar( LPCTSTR name )\r
-{\r
-#if defined( UNICODE ) || defined( _UNICODE )\r
- int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);\r
- std::string s( length-1, '\0' );\r
- WideCharToMultiByte(CP_UTF8, 0, name, -1, &s[0], length, NULL, NULL);\r
-#else\r
- std::string s( name );\r
-#endif\r
-\r
- return s;\r
-}\r
-\r
static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,\r
LPCTSTR description,\r
LPCTSTR /*module*/,\r
}\r
\r
// If good device, then save its name and guid.\r
- std::string name = convertTChar( description );\r
+ std::string name = convertCharPointerToStdString( description );\r
//if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )\r
if ( lpguid == NULL )\r
name = "Default Device";\r
\r
// Count cards and devices\r
card = -1;\r
+ subdevice = -1;\r
snd_card_next( &card );\r
while ( card >= 0 ) {\r
sprintf( name, "hw:%d", card );\r
// Test our discrete set of sample rate values.\r
info.sampleRates.clear();\r
for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {\r
- if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )\r
+ if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {\r
info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[i];\r
+ }\r
}\r
if ( info.sampleRates.size() == 0 ) {\r
snd_pcm_close( phandle );\r
errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";\r
errorText_ = errorStream_.str();\r
}\r
+ else\r
+ errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";\r
}\r
else {\r
errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";\r
bool *isRunning = &info->isRunning;\r
\r
#ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)\r
- if ( &info->doRealtime ) {\r
+ if ( info->doRealtime ) {\r
pthread_t tID = pthread_self(); // ID of this thread\r
sched_param prio = { info->priority }; // scheduling priority of thread\r
pthread_setschedparam( tID, SCHED_RR, &prio );\r
for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )\r
info.sampleRates.push_back( *sr );\r
\r
+ info.preferredSampleRate = 48000;\r
info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;\r
\r
return info;\r
pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );\r
\r
int error;\r
- if ( !options->streamName.empty() ) streamName = options->streamName;\r
+ if ( options && !options->streamName.empty() ) streamName = options->streamName;\r
switch ( mode ) {\r
case INPUT:\r
pa_buffer_attr buffer_attr;\r
}\r
break;\r
case OUTPUT:\r
- pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );\r
+ pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );\r
if ( !pah->s_play ) {\r
errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";\r
goto error;\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+\r
break;\r
}\r
}\r
else {\r
// Check min and max rate values;\r
for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
- if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )\r
+ if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {\r
info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+\r
+ if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )\r
+ info.preferredSampleRate = SAMPLE_RATES[k];\r
+ }\r
}\r
}\r
\r
\r
void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )\r
{\r
- register char val;\r
- register char *ptr;\r
+ char val;\r
+ char *ptr;\r
\r
ptr = buffer;\r
if ( format == RTAUDIO_SINT16 ) {\r