+ PropVariantInit( &deviceNameProp );\r
+\r
+ hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device property: PKEY_Device_FriendlyName" );\r
+\r
+ deviceName = deviceNameProp.pwszVal;\r
+ info.name = std::string( deviceName.begin(), deviceName.end() );\r
+\r
+ // is default\r
+ if ( isCaptureDevice ) {\r
+ info.isDefaultInput = info.name == defaultDeviceName;\r
+ info.isDefaultOutput = false;\r
+ }\r
+ else {\r
+ info.isDefaultInput = false;\r
+ info.isDefaultOutput = info.name == defaultDeviceName;\r
+ }\r
+\r
+ // channel count\r
+ hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );\r
+\r
+ hr = audioClient->GetMixFormat( &deviceFormat );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );\r
+\r
+ if ( isCaptureDevice ) {\r
+ info.inputChannels = deviceFormat->nChannels;\r
+ info.outputChannels = 0;\r
+ info.duplexChannels = 0;\r
+ }\r
+ else {\r
+ info.inputChannels = 0;\r
+ info.outputChannels = deviceFormat->nChannels;\r
+ info.duplexChannels = 0;\r
+ }\r
+\r
+ // sample rates\r
+ info.sampleRates.clear();\r
+\r
+ // allow support for sample rates that are multiples of the base rate\r
+ for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {\r
+ if ( SAMPLE_RATES[i] < deviceFormat->nSamplesPerSec ) {\r
+ if ( deviceFormat->nSamplesPerSec % SAMPLE_RATES[i] == 0 ) {\r
+ info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+ }\r
+ }\r
+ else {\r
+ if ( SAMPLE_RATES[i] % deviceFormat->nSamplesPerSec == 0 ) {\r
+ info.sampleRates.push_back( SAMPLE_RATES[i] );\r
+ }\r
+ }\r
+ }\r
+\r
+ // native format\r
+ info.nativeFormats = 0;\r
+\r
+ if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||\r
+ ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&\r
+ ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )\r
+ {\r
+ if ( deviceFormat->wBitsPerSample == 32 ) {\r
+ info.nativeFormats |= RTAUDIO_FLOAT32;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 64 ) {\r
+ info.nativeFormats |= RTAUDIO_FLOAT64;\r
+ }\r
+ }\r
+ else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||\r
+ ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&\r
+ ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )\r
+ {\r
+ if ( deviceFormat->wBitsPerSample == 8 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT8;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 16 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT16;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 24 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT24;\r
+ }\r
+ else if ( deviceFormat->wBitsPerSample == 32 ) {\r
+ info.nativeFormats |= RTAUDIO_SINT32;\r
+ }\r
+ }\r
+\r
+ // probed\r
+ info.probed = true;\r
+\r
+Exit:\r
+ // release all references\r
+ PropVariantClear( &deviceNameProp );\r
+ PropVariantClear( &defaultDeviceNameProp );\r
+\r
+ SAFE_RELEASE( captureDevices );\r
+ SAFE_RELEASE( renderDevices );\r
+ SAFE_RELEASE( devicePtr );\r
+ SAFE_RELEASE( defaultDevicePtr );\r
+ SAFE_RELEASE( audioClient );\r
+ SAFE_RELEASE( devicePropStore );\r
+ SAFE_RELEASE( defaultDevicePropStore );\r
+\r
+ CoTaskMemFree( deviceFormat );\r
+ CoTaskMemFree( closestMatchFormat );\r
+\r
+ return info;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+unsigned int RtApiWasapi::getDefaultOutputDevice( void )\r
+{\r
+ for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {\r
+ if ( getDeviceInfo( i ).isDefaultOutput ) {\r
+ return i;\r
+ }\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+unsigned int RtApiWasapi::getDefaultInputDevice( void )\r
+{\r
+ for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {\r
+ if ( getDeviceInfo( i ).isDefaultInput ) {\r
+ return i;\r
+ }\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::closeStream( void )\r
+{\r
+ if ( stream_.state == STREAM_CLOSED ) {\r
+ errorText_ = "RtApiWasapi::closeStream: No open stream to close";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ if ( stream_.state != STREAM_STOPPED )\r
+ stopStream();\r
+\r
+ // clean up stream memory\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )\r
+\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )\r
+ SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )\r
+\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )\r
+ CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );\r
+\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )\r
+ CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );\r
+\r
+ delete stream_.apiHandle;\r
+ stream_.apiHandle = NULL;\r
+\r
+ for ( int i = 0; i < 2; i++ ) {\r
+ if ( stream_.userBuffer[i] ) {\r
+ free( stream_.userBuffer[i] );\r
+ stream_.userBuffer[i] = 0;\r
+ }\r
+ }\r
+\r
+ if ( stream_.deviceBuffer ) {\r
+ free( stream_.deviceBuffer );\r
+ stream_.deviceBuffer = 0;\r
+ }\r
+\r
+ // update stream state\r
+ stream_.state = STREAM_CLOSED;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::startStream( void )\r
+{\r
+ verifyStream();\r
+\r
+ if ( stream_.state == STREAM_RUNNING ) {\r
+ errorText_ = "RtApiWasapi::startStream: The stream is already running";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ // update stream state\r
+ stream_.state = STREAM_RUNNING;\r
+\r
+ // create WASAPI stream thread\r
+ stream_.callbackInfo.thread = ( unsigned int ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );\r
+\r
+ if ( !stream_.callbackInfo.thread ) {\r
+ errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread";\r
+ error( RtAudioError::THREAD_ERROR );\r
+ }\r
+ else {\r
+ SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );\r
+ ResumeThread( ( void* ) stream_.callbackInfo.thread );\r
+ }\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::stopStream( void )\r
+{\r
+ verifyStream();\r
+\r
+ if ( stream_.state == STREAM_STOPPED ) {\r
+ errorText_ = "RtApiWasapi::stopStream: The stream is already stopped";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ // inform stream thread by setting stream state to STREAM_STOPPING\r
+ stream_.state = STREAM_STOPPING;\r
+\r
+ // wait until stream thread is stopped\r
+ while( stream_.state != STREAM_STOPPED ) {\r
+ Sleep( 1 );\r
+ }\r
+\r
+ // Wait for the last buffer to play before stopping.\r
+ Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );\r
+\r
+ // stop capture client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ }\r
+ }\r
+\r
+ // stop render client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ }\r
+ }\r
+\r
+ // close thread handle\r
+ if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread";\r
+ error( RtAudioError::THREAD_ERROR );\r
+ }\r
+\r
+ stream_.callbackInfo.thread = NULL;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::abortStream( void )\r
+{\r
+ verifyStream();\r
+\r
+ if ( stream_.state == STREAM_STOPPED ) {\r
+ errorText_ = "RtApiWasapi::abortStream: The stream is already stopped";\r
+ error( RtAudioError::WARNING );\r
+ return;\r
+ }\r
+\r
+ // inform stream thread by setting stream state to STREAM_STOPPING\r
+ stream_.state = STREAM_STOPPING;\r
+\r
+ // wait until stream thread is stopped\r
+ while ( stream_.state != STREAM_STOPPED ) {\r
+ Sleep( 1 );\r
+ }\r
+\r
+ // stop capture client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ }\r
+ }\r
+\r
+ // stop render client if applicable\r
+ if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {\r
+ HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();\r
+ if ( FAILED( hr ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream";\r
+ error( RtAudioError::DRIVER_ERROR );\r
+ }\r
+ }\r
+\r
+ // close thread handle\r
+ if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {\r
+ errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread";\r
+ error( RtAudioError::THREAD_ERROR );\r
+ }\r
+\r
+ stream_.callbackInfo.thread = NULL;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,\r
+ unsigned int firstChannel, unsigned int sampleRate,\r
+ RtAudioFormat format, unsigned int* bufferSize,\r
+ RtAudio::StreamOptions* options )\r
+{\r
+ bool methodResult = FAILURE;\r
+ unsigned int captureDeviceCount = 0;\r
+ unsigned int renderDeviceCount = 0;\r
+\r
+ IMMDeviceCollection* captureDevices = NULL;\r
+ IMMDeviceCollection* renderDevices = NULL;\r
+ IMMDevice* devicePtr = NULL;\r
+ WAVEFORMATEX* deviceFormat = NULL;\r
+\r
+ // create API Handle if not already created\r
+ if ( !stream_.apiHandle )\r
+ stream_.apiHandle = ( void* ) new WasapiHandle();\r
+\r
+ // count capture devices\r
+ HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" );\r
+\r
+ hr = captureDevices->GetCount( &captureDeviceCount );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" );\r
+\r
+ // count render devices\r
+ hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" );\r
+\r
+ hr = renderDevices->GetCount( &renderDeviceCount );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" );\r
+\r
+ // validate device index\r
+ if ( device >= captureDeviceCount + renderDeviceCount )\r
+ EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" );\r
+\r
+ // determine whether index falls within capture or render devices\r
+ if ( device < captureDeviceCount ) {\r
+ if ( mode != INPUT )\r
+ EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Capture device selected as output device" );\r
+\r
+ // retrieve captureAudioClient from devicePtr\r
+ IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;\r
+\r
+ hr = captureDevices->Item( device, &devicePtr );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" );\r
+\r
+ hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,\r
+ NULL, ( void** ) &captureAudioClient );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );\r
+\r
+ hr = captureAudioClient->GetMixFormat( &deviceFormat );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );\r
+\r
+ stream_.nDeviceChannels[mode] = deviceFormat->nChannels;\r
+ captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );\r
+ }\r
+ else {\r
+ if ( mode != OUTPUT )\r
+ EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Render device selected as input device" );\r
+\r
+ // retrieve renderAudioClient from devicePtr\r
+ IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;\r
+\r
+ hr = renderDevices->Item( device - captureDeviceCount, &devicePtr );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" );\r
+\r
+ hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,\r
+ NULL, ( void** ) &renderAudioClient );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" );\r
+\r
+ hr = renderAudioClient->GetMixFormat( &deviceFormat );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );\r
+\r
+ stream_.nDeviceChannels[mode] = deviceFormat->nChannels;\r
+ renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );\r
+ }\r
+\r
+ // fill stream data\r
+ if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||\r
+ ( stream_.mode == INPUT && mode == OUTPUT ) ) {\r
+ stream_.mode = DUPLEX;\r
+ }\r
+ else {\r
+ stream_.mode = mode;\r
+ }\r
+\r
+ stream_.device[mode] = device;\r
+ stream_.state = STREAM_STOPPED;\r
+ stream_.doByteSwap[mode] = false;\r
+ stream_.sampleRate = sampleRate;\r
+ stream_.bufferSize = *bufferSize;\r
+ stream_.nBuffers = 1;\r
+ stream_.nUserChannels[mode] = channels;\r
+ stream_.channelOffset[mode] = firstChannel;\r
+ stream_.userFormat = format;\r
+ stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;\r
+\r
+ if ( options && options->flags & RTAUDIO_NONINTERLEAVED )\r
+ stream_.userInterleaved = false;\r
+ else\r
+ stream_.userInterleaved = true;\r
+ stream_.deviceInterleaved[mode] = true;\r
+\r
+ // Set flags for buffer conversion.\r
+ stream_.doConvertBuffer[mode] = false;\r
+ if ( stream_.userFormat != stream_.deviceFormat[mode] )\r
+ stream_.doConvertBuffer[mode] = true;\r
+ if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&\r
+ stream_.nUserChannels[mode] > 1 )\r
+ stream_.doConvertBuffer[mode] = true;\r
+\r
+ if ( stream_.doConvertBuffer[mode] )\r
+ setConvertInfo( mode, 0 );\r
+\r
+ // Allocate necessary internal buffers\r
+ unsigned int bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );\r
+\r
+ stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );\r
+ if ( !stream_.userBuffer[mode] )\r
+ EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating user buffer memory" );\r
+\r
+ if ( stream_.doConvertBuffer[mode] && !stream_.deviceBuffer ) {\r
+ unsigned int deviceBufferSize = max( stream_.nUserChannels[INPUT] * stream_.bufferSize * formatBytes( stream_.userFormat ),\r
+ stream_.nUserChannels[OUTPUT] * stream_.bufferSize * formatBytes( stream_.userFormat ) );\r
+\r
+ stream_.deviceBuffer = ( char* ) calloc( deviceBufferSize, 1 );\r
+ if ( !stream_.deviceBuffer )\r
+ EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating device buffer memory" );\r
+ }\r
+\r
+ if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )\r
+ stream_.callbackInfo.priority = 15;\r
+ else\r
+ stream_.callbackInfo.priority = 0;\r
+\r
+ ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback\r
+ ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode\r
+\r
+ methodResult = SUCCESS;\r
+\r
+Exit:\r
+ //clean up\r
+\r
+ SAFE_RELEASE( captureDevices );\r
+ SAFE_RELEASE( renderDevices );\r
+ SAFE_RELEASE( devicePtr );\r
+\r
+ CoTaskMemFree( deviceFormat );\r
+\r
+ // if method failed, close the stream\r
+ if ( methodResult == FAILURE )\r
+ closeStream();\r
+\r
+ return methodResult;\r
+}\r
+\r
+//=============================================================================\r
+\r
+DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )\r
+{\r
+ if ( wasapiPtr )\r
+ ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();\r
+\r
+ return 0;\r
+}\r
+\r
+DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )\r
+{\r
+ if ( wasapiPtr )\r
+ ( ( RtApiWasapi* ) wasapiPtr )->stopStream();\r
+\r
+ return 0;\r
+}\r
+\r
+DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )\r
+{\r
+ if ( wasapiPtr )\r
+ ( ( RtApiWasapi* ) wasapiPtr )->abortStream();\r
+\r
+ return 0;\r
+}\r
+\r
+//-----------------------------------------------------------------------------\r
+\r
+void RtApiWasapi::wasapiThread()\r
+{\r
+ // as this is a new thread, we must CoInitialize it\r
+ CoInitialize( NULL );\r
+\r
+ HRESULT hr;\r
+\r
+ IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;\r
+ IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;\r
+ IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;\r
+ IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;\r
+ HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;\r
+ HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;\r
+\r
+ WAVEFORMATEX* captureFormat = NULL;\r
+ WAVEFORMATEX* renderFormat = NULL;\r
+ float captureSrRatio = 0.0f;\r
+ float renderSrRatio = 0.0f;\r
+ WasapiBuffer captureBuffer;\r
+ WasapiBuffer renderBuffer;\r
+\r
+ // Attempt to assign "Pro Audio" characteristic to thread\r
+ HMODULE AvrtDll = LoadLibrary( "AVRT.dll" );\r
+ if ( AvrtDll ) {\r
+ DWORD taskIndex = 0;\r
+ TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );\r
+ AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );\r
+ FreeLibrary( AvrtDll );\r
+ }\r
+\r
+ // start capture stream if applicable\r
+ if ( captureAudioClient ) {\r
+ hr = captureAudioClient->GetMixFormat( &captureFormat );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );\r
+\r
+ captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );\r
+\r
+ // initialize capture stream according to desire buffer size\r
+ float desiredBufferSize = stream_.bufferSize * captureSrRatio;\r
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );\r
+\r
+ if ( !captureClient ) {\r
+ hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,\r
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,\r
+ desiredBufferPeriod,\r
+ desiredBufferPeriod,\r
+ captureFormat,\r
+ NULL );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize capture audio client" );\r
+\r
+ hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),\r
+ ( void** ) &captureClient );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture client handle" );\r
+\r
+ // configure captureEvent to trigger on every available capture buffer\r
+ captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );\r
+ if ( !captureEvent )\r
+ EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create capture event" );\r
+\r
+ hr = captureAudioClient->SetEventHandle( captureEvent );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set capture event handle" );\r
+\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;\r
+ }\r
+\r
+ unsigned int inBufferSize = 0;\r
+ hr = captureAudioClient->GetBufferSize( &inBufferSize );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get capture buffer size" );\r
+\r
+ // scale outBufferSize according to stream->user sample rate ratio\r
+ // (outBufferSize must be a multiple of the input channel count)\r
+ unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio );\r
+ if ( outBufferSize % stream_.nDeviceChannels[INPUT] )\r
+ outBufferSize += stream_.nDeviceChannels[INPUT] - ( outBufferSize % stream_.nDeviceChannels[INPUT] );\r
+\r
+ inBufferSize *= stream_.nDeviceChannels[INPUT];\r
+\r
+ // set captureBuffer size\r
+ captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );\r
+\r
+ // reset the capture stream\r
+ hr = captureAudioClient->Reset();\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset capture stream" );\r
+\r
+ // start the capture stream\r
+ hr = captureAudioClient->Start();\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start capture stream" );\r
+ }\r
+\r
+ // start render stream if applicable\r
+ if ( renderAudioClient ) {\r
+ hr = renderAudioClient->GetMixFormat( &renderFormat );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" );\r
+\r
+ renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );\r
+\r
+ // initialize render stream according to desire buffer size\r
+ float desiredBufferSize = stream_.bufferSize * renderSrRatio;\r
+ REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );\r
+\r
+ if ( !renderClient ) {\r
+ hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,\r
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK,\r
+ desiredBufferPeriod,\r
+ desiredBufferPeriod,\r
+ renderFormat,\r
+ NULL );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize render audio client" );\r
+\r
+ hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),\r
+ ( void** ) &renderClient );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render client handle" );\r
+\r
+ // configure renderEvent to trigger on every available render buffer\r
+ renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );\r
+ if ( !renderEvent )\r
+ EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create render event" );\r
+\r
+ hr = renderAudioClient->SetEventHandle( renderEvent );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set render event handle" );\r
+\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;\r
+ ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;\r
+ }\r
+\r
+ unsigned int outBufferSize = 0;\r
+ hr = renderAudioClient->GetBufferSize( &outBufferSize );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get render buffer size" );\r
+\r
+ // scale inBufferSize according to user->stream sample rate ratio\r
+ // (inBufferSize must be a multiple of the output channel count)\r
+ unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio );\r
+ if ( inBufferSize % stream_.nDeviceChannels[OUTPUT] ) {\r
+ inBufferSize += stream_.nDeviceChannels[OUTPUT] - ( inBufferSize % stream_.nDeviceChannels[OUTPUT] );\r
+ }\r
+\r
+ outBufferSize *= stream_.nDeviceChannels[OUTPUT];\r
+\r
+ // set renderBuffer size\r
+ renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );\r
+\r
+ // reset the render stream\r
+ hr = renderAudioClient->Reset();\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset render stream" );\r
+\r
+ // start the render stream\r
+ hr = renderAudioClient->Start();\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start render stream" );\r
+ }\r
+\r
+ // declare local stream variables\r
+ RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;\r
+\r
+ BYTE* streamBuffer = NULL;\r
+ unsigned long captureFlags = 0;\r
+\r
+ unsigned int bufferFrameCount = 0;\r
+ unsigned int numFramesPadding = 0;\r
+ unsigned int convBufferSize = 0;\r
+\r
+ bool callbackPushed = false;\r
+ bool callbackPulled = false;\r
+ bool callbackStopped = false;\r
+\r
+ int callbackResult = 0;\r
+\r
+ // convBuffer is used to store converted buffers between WASAPI and the user\r
+ char* convBuffer = NULL;\r
+\r
+ if ( stream_.mode == INPUT ) {\r
+ convBuffer = ( char* ) malloc( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio * formatBytes( stream_.deviceFormat[INPUT] ) ) );\r
+ }\r
+ else if ( stream_.mode == OUTPUT ) {\r
+ convBuffer = ( char* ) malloc( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio * formatBytes( stream_.deviceFormat[OUTPUT] ) ) );\r
+ }\r
+ else if ( stream_.mode == DUPLEX ) {\r
+ convBuffer = ( char* ) malloc( max( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio * formatBytes( stream_.deviceFormat[INPUT] ) ),\r
+ ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio * formatBytes( stream_.deviceFormat[OUTPUT] ) ) ) );\r
+ }\r
+\r
+ // stream process loop\r
+ while ( stream_.state != STREAM_STOPPING ) {\r
+ if ( !callbackPulled ) {\r
+ // Callback Input\r
+ // ==============\r
+ // 1. Pull callback buffer from inputBuffer\r
+ // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count\r
+ // Convert callback buffer to user format\r
+\r
+ if ( captureAudioClient ) {\r
+ // Pull callback buffer from inputBuffer\r
+ callbackPulled = captureBuffer.pullBuffer( convBuffer,\r
+ ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio ),\r
+ stream_.deviceFormat[INPUT] );\r
+\r
+ if ( callbackPulled ) {\r
+ // Convert callback buffer to user sample rate and channel count\r
+ convertBufferWasapi( stream_.deviceBuffer,\r
+ convBuffer,\r
+ stream_.nDeviceChannels[INPUT],\r
+ stream_.nUserChannels[INPUT],\r
+ captureFormat->nSamplesPerSec,\r
+ stream_.sampleRate,\r
+ ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),\r
+ convBufferSize,\r
+ stream_.deviceFormat[INPUT] );\r
+\r
+ if ( stream_.doConvertBuffer[INPUT] ) {\r
+ // Convert callback buffer to user format\r
+ convertBuffer( stream_.userBuffer[INPUT],\r
+ stream_.deviceBuffer,\r
+ stream_.convertInfo[INPUT] );\r
+ }\r
+ else {\r
+ // no conversion, simple copy deviceBuffer to userBuffer\r
+ memcpy( stream_.userBuffer[INPUT],\r
+ stream_.deviceBuffer,\r
+ stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );\r
+ }\r
+ }\r
+ }\r
+ else {\r
+ // if there is no capture stream, set callbackPulled flag\r
+ callbackPulled = true;\r
+ }\r
+\r
+ // Execute Callback\r
+ // ================\r
+ // 1. Execute user callback method\r
+ // 2. Handle return value from callback\r
+\r
+ // if callback has not requested the stream to stop\r
+ if ( callbackPulled && !callbackStopped ) {\r
+ // Execute user callback method\r
+ callbackResult = callback( stream_.userBuffer[OUTPUT],\r
+ stream_.userBuffer[INPUT],\r
+ stream_.bufferSize,\r
+ getStreamTime(),\r
+ captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,\r
+ stream_.callbackInfo.userData );\r
+\r
+ // Handle return value from callback\r
+ if ( callbackResult == 1 ) {\r
+ // instantiate a thread to stop this thread\r
+ HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, NULL, NULL );\r
+\r
+ if ( !threadHandle ) {\r
+ EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream stop thread" );\r
+ }\r
+ else if ( !CloseHandle( threadHandle ) ) {\r
+ EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream stop thread handle" );\r
+ }\r
+\r
+ callbackStopped = true;\r
+ }\r
+ else if ( callbackResult == 2 ) {\r
+ // instantiate a thread to stop this thread\r
+ HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, NULL, NULL );\r
+\r
+ if ( !threadHandle ) {\r
+ EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream abort thread" );\r
+ }\r
+ else if ( !CloseHandle( threadHandle ) ) {\r
+ EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream abort thread handle" );\r
+ }\r
+\r
+ callbackStopped = true;\r
+ }\r
+ }\r
+ }\r
+\r
+ // Callback Output\r
+ // ===============\r
+ // 1. Convert callback buffer to stream format\r
+ // 2. Convert callback buffer to stream sample rate and channel count\r
+ // 3. Push callback buffer into outputBuffer\r
+\r
+ if ( renderAudioClient && callbackPulled ) {\r
+ if ( stream_.doConvertBuffer[OUTPUT] ) {\r
+ // Convert callback buffer to stream format\r
+ convertBuffer( stream_.deviceBuffer,\r
+ stream_.userBuffer[OUTPUT],\r
+ stream_.convertInfo[OUTPUT] );\r
+\r
+ // Convert callback buffer to stream sample rate and channel count\r
+ convertBufferWasapi( convBuffer,\r
+ stream_.deviceBuffer,\r
+ stream_.nUserChannels[OUTPUT],\r
+ stream_.nDeviceChannels[OUTPUT],\r
+ stream_.sampleRate,\r
+ renderFormat->nSamplesPerSec,\r
+ stream_.bufferSize,\r
+ convBufferSize,\r
+ stream_.deviceFormat[OUTPUT] );\r
+ }\r
+ else {\r
+ // Convert callback buffer to stream sample rate and channel count\r
+ convertBufferWasapi( convBuffer,\r
+ stream_.userBuffer[OUTPUT],\r
+ stream_.nUserChannels[OUTPUT],\r
+ stream_.nDeviceChannels[OUTPUT],\r
+ stream_.sampleRate,\r
+ renderFormat->nSamplesPerSec,\r
+ stream_.bufferSize,\r
+ convBufferSize,\r
+ stream_.deviceFormat[OUTPUT] );\r
+ }\r
+\r
+ // Push callback buffer into outputBuffer\r
+ callbackPushed = renderBuffer.pushBuffer( convBuffer,\r
+ convBufferSize * stream_.nDeviceChannels[OUTPUT],\r
+ stream_.deviceFormat[OUTPUT] );\r
+ }\r
+\r
+ // Stream Capture\r
+ // ==============\r
+ // 1. Get capture buffer from stream\r
+ // 2. Push capture buffer into inputBuffer\r
+ // 3. If 2. was successful: Release capture buffer\r
+\r
+ if ( captureAudioClient ) {\r
+ // if the callback input buffer was not pulled from captureBuffer, wait for next capture event\r
+ if ( !callbackPulled ) {\r
+ WaitForSingleObject( captureEvent, INFINITE );\r
+ }\r
+\r
+ // Get capture buffer from stream\r
+ hr = captureClient->GetBuffer( &streamBuffer,\r
+ &bufferFrameCount,\r
+ &captureFlags, NULL, NULL );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture buffer" );\r
+\r
+ if ( bufferFrameCount != 0 ) {\r
+ // Push capture buffer into inputBuffer\r
+ if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,\r
+ bufferFrameCount * stream_.nDeviceChannels[INPUT],\r
+ stream_.deviceFormat[INPUT] ) )\r
+ {\r
+ // Release capture buffer\r
+ hr = captureClient->ReleaseBuffer( bufferFrameCount );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that capture was unsuccessful\r
+ hr = captureClient->ReleaseBuffer( 0 );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that capture was unsuccessful\r
+ hr = captureClient->ReleaseBuffer( 0 );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" );\r
+ }\r
+ }\r
+\r
+ // Stream Render\r
+ // =============\r
+ // 1. Get render buffer from stream\r
+ // 2. Pull next buffer from outputBuffer\r
+ // 3. If 2. was successful: Fill render buffer with next buffer\r
+ // Release render buffer\r
+\r
+ if ( renderAudioClient ) {\r
+ // if the callback output buffer was not pushed to renderBuffer, wait for next render event\r
+ if ( callbackPulled && !callbackPushed ) {\r
+ WaitForSingleObject( renderEvent, INFINITE );\r
+ }\r
+\r
+ // Get render buffer from stream\r
+ hr = renderAudioClient->GetBufferSize( &bufferFrameCount );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer size" );\r
+\r
+ hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer padding" );\r
+\r
+ bufferFrameCount -= numFramesPadding;\r
+\r
+ if ( bufferFrameCount != 0 ) {\r
+ hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer" );\r
+\r
+ // Pull next buffer from outputBuffer\r
+ // Fill render buffer with next buffer\r
+ if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,\r
+ bufferFrameCount * stream_.nDeviceChannels[OUTPUT],\r
+ stream_.deviceFormat[OUTPUT] ) )\r
+ {\r
+ // Release render buffer\r
+ hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that render was unsuccessful\r
+ hr = renderClient->ReleaseBuffer( 0, 0 );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Inform WASAPI that render was unsuccessful\r
+ hr = renderClient->ReleaseBuffer( 0, 0 );\r
+ EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" );\r
+ }\r
+ }\r
+\r
+ // if the callback buffer was pushed renderBuffer reset callbackPulled flag\r
+ if ( callbackPushed ) {\r
+ callbackPulled = false;\r
+ }\r
+\r
+ // tick stream time\r
+ RtApi::tickStreamTime();\r
+ }\r
+\r
+Exit:\r
+ // clean up\r
+ CoTaskMemFree( captureFormat );\r
+ CoTaskMemFree( renderFormat );\r
+\r
+ delete convBuffer;\r
+\r
+ CoUninitialize();\r
+\r
+ // update stream state\r
+ stream_.state = STREAM_STOPPED;\r
+}\r
+\r
+//******************** End of __WINDOWS_WASAPI__ *********************//\r
+#endif\r
+\r
+\r
+#if defined(__WINDOWS_DS__) // Windows DirectSound API\r
+\r
+// Modified by Robin Davies, October 2005\r
+// - Improvements to DirectX pointer chasing. \r
+// - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.\r
+// - Auto-call CoInitialize for DSOUND and ASIO platforms.\r
+// Various revisions for RtAudio 4.0 by Gary Scavone, April 2007\r
+// Changed device query structure for RtAudio 4.0.7, January 2010\r
+\r
+#include <dsound.h>\r
+#include <assert.h>\r
+#include <algorithm>\r
+\r
+#if defined(__MINGW32__)\r
+ // missing from latest mingw winapi\r
+#define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */\r
+#define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */\r
+#define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */\r
+#define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */\r
+#endif\r
+\r
+#define MINIMUM_DEVICE_BUFFER_SIZE 32768\r
+\r
+#ifdef _MSC_VER // if Microsoft Visual C++\r
+#pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.\r
+#endif\r
+\r
+static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )\r
+{\r
+ if ( pointer > bufferSize ) pointer -= bufferSize;\r
+ if ( laterPointer < earlierPointer ) laterPointer += bufferSize;\r
+ if ( pointer < earlierPointer ) pointer += bufferSize;\r
+ return pointer >= earlierPointer && pointer < laterPointer;\r
+}\r
+\r
+// A structure to hold various information related to the DirectSound\r
+// API implementation.\r
+struct DsHandle {\r
+ unsigned int drainCounter; // Tracks callback counts when draining\r
+ bool internalDrain; // Indicates if stop is initiated from callback or not.\r
+ void *id[2];\r
+ void *buffer[2];\r
+ bool xrun[2];\r
+ UINT bufferPointer[2]; \r
+ DWORD dsBufferSize[2];\r
+ DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.\r
+ HANDLE condition;\r
+\r
+ DsHandle()\r
+ :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }\r
+};\r
+\r
+// Declarations for utility functions, callbacks, and structures\r
+// specific to the DirectSound implementation.\r
+static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,\r
+ LPCTSTR description,\r
+ LPCTSTR module,\r
+ LPVOID lpContext );\r
+\r
+static const char* getErrorString( int code );\r
+\r
+static unsigned __stdcall callbackHandler( void *ptr );\r
+\r
+struct DsDevice {\r
+ LPGUID id[2];\r
+ bool validId[2];\r
+ bool found;\r
+ std::string name;\r
+\r
+ DsDevice()\r
+ : found(false) { validId[0] = false; validId[1] = false; }\r
+};\r
+\r
+struct DsProbeData {\r
+ bool isInput;\r
+ std::vector<struct DsDevice>* dsDevices;\r
+};\r
+\r
+RtApiDs :: RtApiDs()\r
+{\r
+ // Dsound will run both-threaded. If CoInitialize fails, then just\r
+ // accept whatever the mainline chose for a threading model.\r
+ coInitialized_ = false;\r
+ HRESULT hr = CoInitialize( NULL );\r
+ if ( !FAILED( hr ) ) coInitialized_ = true;\r
+}\r
+\r
+RtApiDs :: ~RtApiDs()\r
+{\r
+ if ( coInitialized_ ) CoUninitialize(); // balanced call.\r
+ if ( stream_.state != STREAM_CLOSED ) closeStream();\r
+}\r
+\r
+// The DirectSound default output is always the first device.\r
+unsigned int RtApiDs :: getDefaultOutputDevice( void )\r
+{\r
+ return 0;\r
+}\r
+\r
+// The DirectSound default input is always the first input device,\r
+// which is the first capture device enumerated.\r
+unsigned int RtApiDs :: getDefaultInputDevice( void )\r
+{\r
+ return 0;\r
+}\r
+\r
+unsigned int RtApiDs :: getDeviceCount( void )\r
+{\r
+ // Set query flag for previously found devices to false, so that we\r
+ // can check for any devices that have disappeared.\r
+ for ( unsigned int i=0; i<dsDevices.size(); i++ )\r
+ dsDevices[i].found = false;\r
+\r
+ // Query DirectSound devices.\r
+ struct DsProbeData probeInfo;\r
+ probeInfo.isInput = false;\r
+ probeInfo.dsDevices = &dsDevices;\r
+ HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );\r
+ if ( FAILED( result ) ) {\r
+ errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";\r
+ errorText_ = errorStream_.str();\r
+ error( RtAudioError::WARNING );\r
+ }\r
+\r
+ // Query DirectSoundCapture devices.\r
+ probeInfo.isInput = true;\r
+ result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );\r
+ if ( FAILED( result ) ) {\r
+ errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";\r
+ errorText_ = errorStream_.str();\r
+ error( RtAudioError::WARNING );\r
+ }\r
+\r
+ // Clean out any devices that may have disappeared.\r
+ std::vector< int > indices;\r
+ for ( unsigned int i=0; i<dsDevices.size(); i++ )\r
+ if ( dsDevices[i].found == false ) indices.push_back( i );\r
+ unsigned int nErased = 0;\r
+ for ( unsigned int i=0; i<indices.size(); i++ )\r
+ dsDevices.erase( dsDevices.begin()-nErased++ );\r
+\r
+ return static_cast<unsigned int>(dsDevices.size());\r
+}\r
+\r
+RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )\r
+{\r
+ RtAudio::DeviceInfo info;\r
+ info.probed = false;\r
+\r
+ if ( dsDevices.size() == 0 ) {\r
+ // Force a query of all devices\r
+ getDeviceCount();\r
+ if ( dsDevices.size() == 0 ) {\r
+ errorText_ = "RtApiDs::getDeviceInfo: no devices found!";\r
+ error( RtAudioError::INVALID_USE );\r
+ return info;\r
+ }\r
+ }\r
+\r
+ if ( device >= dsDevices.size() ) {\r
+ errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";\r
+ error( RtAudioError::INVALID_USE );\r
+ return info;\r
+ }\r
+\r
+ HRESULT result;\r
+ if ( dsDevices[ device ].validId[0] == false ) goto probeInput;\r
+\r
+ LPDIRECTSOUND output;\r
+ DSCAPS outCaps;\r
+ result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );\r
+ if ( FAILED( result ) ) {\r
+ errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";\r
+ errorText_ = errorStream_.str();\r
+ error( RtAudioError::WARNING );\r
+ goto probeInput;\r
+ }\r
+\r
+ outCaps.dwSize = sizeof( outCaps );\r
+ result = output->GetCaps( &outCaps );\r
+ if ( FAILED( result ) ) {\r
+ output->Release();\r
+ errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";\r
+ errorText_ = errorStream_.str();\r
+ error( RtAudioError::WARNING );\r
+ goto probeInput;\r
+ }\r
+\r
+ // Get output channel information.\r
+ info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;\r
+\r
+ // Get sample rate information.\r
+ info.sampleRates.clear();\r
+ for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {\r
+ if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&\r
+ SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )\r
+ info.sampleRates.push_back( SAMPLE_RATES[k] );\r
+ }\r
+\r
+ // Get format information.\r
+ if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;\r
+ if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;\r
+\r
+ output->Release();\r
+\r
+ if ( getDefaultOutputDevice() == device )\r
+ info.isDefaultOutput = true;\r
+\r
+ if ( dsDevices[ device ].validId[1] == false ) {\r
+ info.name = dsDevices[ device ].name;\r
+ info.probed = true;\r
+ return info;\r
+ }\r
+\r
+ probeInput:\r
+\r
+ LPDIRECTSOUNDCAPTURE input;\r
+ result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );\r