1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 OSStatus result = noErr;
1545 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1546 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1548 result = AudioDeviceStart( handle->id[0], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1551 errorText_ = errorStream_.str();
1556 if ( stream_.mode == INPUT ||
1557 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1559 result = AudioDeviceStart( handle->id[1], callbackHandler );
1560 if ( result != noErr ) {
1561 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1562 errorText_ = errorStream_.str();
1567 handle->drainCounter = 0;
1568 handle->internalDrain = false;
1569 stream_.state = STREAM_RUNNING;
1572 if ( result == noErr ) return;
1573 error( RtAudioError::SYSTEM_ERROR );
1576 void RtApiCore :: stopStream( void )
1579 if ( stream_.state == STREAM_STOPPED ) {
1580 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1581 error( RtAudioError::WARNING );
1585 OSStatus result = noErr;
1586 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1589 if ( handle->drainCounter == 0 ) {
1590 handle->drainCounter = 2;
1591 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1594 result = AudioDeviceStop( handle->id[0], callbackHandler );
1595 if ( result != noErr ) {
1596 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1597 errorText_ = errorStream_.str();
1602 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1604 result = AudioDeviceStop( handle->id[1], callbackHandler );
1605 if ( result != noErr ) {
1606 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1607 errorText_ = errorStream_.str();
1612 stream_.state = STREAM_STOPPED;
1615 if ( result == noErr ) return;
1616 error( RtAudioError::SYSTEM_ERROR );
1619 void RtApiCore :: abortStream( void )
1622 if ( stream_.state == STREAM_STOPPED ) {
1623 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1624 error( RtAudioError::WARNING );
1628 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1629 handle->drainCounter = 2;
1634 // This function will be called by a spawned thread when the user
1635 // callback function signals that the stream should be stopped or
1636 // aborted. It is better to handle it this way because the
1637 // callbackEvent() function probably should return before the AudioDeviceStop()
1638 // function is called.
1639 static void *coreStopStream( void *ptr )
1641 CallbackInfo *info = (CallbackInfo *) ptr;
1642 RtApiCore *object = (RtApiCore *) info->object;
1644 object->stopStream();
1645 pthread_exit( NULL );
1648 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1649 const AudioBufferList *inBufferList,
1650 const AudioBufferList *outBufferList )
1652 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1653 if ( stream_.state == STREAM_CLOSED ) {
1654 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1655 error( RtAudioError::WARNING );
1659 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1660 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1662 // Check if we were draining the stream and signal is finished.
1663 if ( handle->drainCounter > 3 ) {
1664 ThreadHandle threadId;
1666 stream_.state = STREAM_STOPPING;
1667 if ( handle->internalDrain == true )
1668 pthread_create( &threadId, NULL, coreStopStream, info );
1669 else // external call to stopStream()
1670 pthread_cond_signal( &handle->condition );
1674 AudioDeviceID outputDevice = handle->id[0];
1676 // Invoke user callback to get fresh output data UNLESS we are
1677 // draining stream or duplex mode AND the input/output devices are
1678 // different AND this function is called for the input device.
1679 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1680 RtAudioCallback callback = (RtAudioCallback) info->callback;
1681 double streamTime = getStreamTime();
1682 RtAudioStreamStatus status = 0;
1683 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1684 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1685 handle->xrun[0] = false;
1687 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1688 status |= RTAUDIO_INPUT_OVERFLOW;
1689 handle->xrun[1] = false;
1692 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1693 stream_.bufferSize, streamTime, status, info->userData );
1694 if ( cbReturnValue == 2 ) {
1695 stream_.state = STREAM_STOPPING;
1696 handle->drainCounter = 2;
1700 else if ( cbReturnValue == 1 ) {
1701 handle->drainCounter = 1;
1702 handle->internalDrain = true;
1706 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1708 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1710 if ( handle->nStreams[0] == 1 ) {
1711 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1713 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1715 else { // fill multiple streams with zeros
1716 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1717 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1719 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1723 else if ( handle->nStreams[0] == 1 ) {
1724 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1725 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1726 stream_.userBuffer[0], stream_.convertInfo[0] );
1728 else { // copy from user buffer
1729 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0],
1731 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1734 else { // fill multiple streams
1735 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1736 if ( stream_.doConvertBuffer[0] ) {
1737 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1738 inBuffer = (Float32 *) stream_.deviceBuffer;
1741 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1742 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1743 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1744 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1745 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1748 else { // fill multiple multi-channel streams with interleaved data
1749 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1752 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1753 UInt32 inChannels = stream_.nUserChannels[0];
1754 if ( stream_.doConvertBuffer[0] ) {
1755 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1756 inChannels = stream_.nDeviceChannels[0];
1759 if ( inInterleaved ) inOffset = 1;
1760 else inOffset = stream_.bufferSize;
1762 channelsLeft = inChannels;
1763 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1765 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1766 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1769 // Account for possible channel offset in first stream
1770 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1771 streamChannels -= stream_.channelOffset[0];
1772 outJump = stream_.channelOffset[0];
1776 // Account for possible unfilled channels at end of the last stream
1777 if ( streamChannels > channelsLeft ) {
1778 outJump = streamChannels - channelsLeft;
1779 streamChannels = channelsLeft;
1782 // Determine input buffer offsets and skips
1783 if ( inInterleaved ) {
1784 inJump = inChannels;
1785 in += inChannels - channelsLeft;
1789 in += (inChannels - channelsLeft) * inOffset;
1792 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1793 for ( unsigned int j=0; j<streamChannels; j++ ) {
1794 *out++ = in[j*inOffset];
1799 channelsLeft -= streamChannels;
1805 // Don't bother draining input
1806 if ( handle->drainCounter ) {
1807 handle->drainCounter++;
1811 AudioDeviceID inputDevice;
1812 inputDevice = handle->id[1];
1813 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1815 if ( handle->nStreams[1] == 1 ) {
1816 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1817 convertBuffer( stream_.userBuffer[1],
1818 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1819 stream_.convertInfo[1] );
1821 else { // copy to user buffer
1822 memcpy( stream_.userBuffer[1],
1823 inBufferList->mBuffers[handle->iStream[1]].mData,
1824 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1827 else { // read from multiple streams
1828 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1829 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1831 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1832 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1833 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1834 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1835 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1838 else { // read from multiple multi-channel streams
1839 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1842 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1843 UInt32 outChannels = stream_.nUserChannels[1];
1844 if ( stream_.doConvertBuffer[1] ) {
1845 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1846 outChannels = stream_.nDeviceChannels[1];
1849 if ( outInterleaved ) outOffset = 1;
1850 else outOffset = stream_.bufferSize;
1852 channelsLeft = outChannels;
1853 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1855 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1856 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1859 // Account for possible channel offset in first stream
1860 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1861 streamChannels -= stream_.channelOffset[1];
1862 inJump = stream_.channelOffset[1];
1866 // Account for possible unread channels at end of the last stream
1867 if ( streamChannels > channelsLeft ) {
1868 inJump = streamChannels - channelsLeft;
1869 streamChannels = channelsLeft;
1872 // Determine output buffer offsets and skips
1873 if ( outInterleaved ) {
1874 outJump = outChannels;
1875 out += outChannels - channelsLeft;
1879 out += (outChannels - channelsLeft) * outOffset;
1882 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1883 for ( unsigned int j=0; j<streamChannels; j++ ) {
1884 out[j*outOffset] = *in++;
1889 channelsLeft -= streamChannels;
1893 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1894 convertBuffer( stream_.userBuffer[1],
1895 stream_.deviceBuffer,
1896 stream_.convertInfo[1] );
1902 //MUTEX_UNLOCK( &stream_.mutex );
1904 RtApi::tickStreamTime();
1908 const char* RtApiCore :: getErrorCode( OSStatus code )
1912 case kAudioHardwareNotRunningError:
1913 return "kAudioHardwareNotRunningError";
1915 case kAudioHardwareUnspecifiedError:
1916 return "kAudioHardwareUnspecifiedError";
1918 case kAudioHardwareUnknownPropertyError:
1919 return "kAudioHardwareUnknownPropertyError";
1921 case kAudioHardwareBadPropertySizeError:
1922 return "kAudioHardwareBadPropertySizeError";
1924 case kAudioHardwareIllegalOperationError:
1925 return "kAudioHardwareIllegalOperationError";
1927 case kAudioHardwareBadObjectError:
1928 return "kAudioHardwareBadObjectError";
1930 case kAudioHardwareBadDeviceError:
1931 return "kAudioHardwareBadDeviceError";
1933 case kAudioHardwareBadStreamError:
1934 return "kAudioHardwareBadStreamError";
1936 case kAudioHardwareUnsupportedOperationError:
1937 return "kAudioHardwareUnsupportedOperationError";
1939 case kAudioDeviceUnsupportedFormatError:
1940 return "kAudioDeviceUnsupportedFormatError";
1942 case kAudioDevicePermissionsError:
1943 return "kAudioDevicePermissionsError";
1946 return "CoreAudio unknown error";
1950 //******************** End of __MACOSX_CORE__ *********************//
1953 #if defined(__UNIX_JACK__)
1955 // JACK is a low-latency audio server, originally written for the
1956 // GNU/Linux operating system and now also ported to OS-X. It can
1957 // connect a number of different applications to an audio device, as
1958 // well as allowing them to share audio between themselves.
1960 // When using JACK with RtAudio, "devices" refer to JACK clients that
1961 // have ports connected to the server. The JACK server is typically
1962 // started in a terminal as follows:
1964 // .jackd -d alsa -d hw:0
1966 // or through an interface program such as qjackctl. Many of the
1967 // parameters normally set for a stream are fixed by the JACK server
1968 // and can be specified when the JACK server is started. In
1971 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1973 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1974 // frames, and number of buffers = 4. Once the server is running, it
1975 // is not possible to override these values. If the values are not
1976 // specified in the command-line, the JACK server uses default values.
1978 // The JACK server does not have to be running when an instance of
1979 // RtApiJack is created, though the function getDeviceCount() will
1980 // report 0 devices found until JACK has been started. When no
1981 // devices are available (i.e., the JACK server is not running), a
1982 // stream cannot be opened.
1984 #include <jack/jack.h>
1988 // A structure to hold various information related to the Jack API
1991 jack_client_t *client;
1992 jack_port_t **ports[2];
1993 std::string deviceName[2];
1995 pthread_cond_t condition;
1996 int drainCounter; // Tracks callback counts when draining
1997 bool internalDrain; // Indicates if stop is initiated from callback or not.
2000 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2003 #if !defined(__RTAUDIO_DEBUG__)
2004 static void jackSilentError( const char * ) {};
2007 RtApiJack :: RtApiJack()
2008 :shouldAutoconnect_(true) {
2009 // Nothing to do here.
2010 #if !defined(__RTAUDIO_DEBUG__)
2011 // Turn off Jack's internal error reporting.
2012 jack_set_error_function( &jackSilentError );
2016 RtApiJack :: ~RtApiJack()
2018 if ( stream_.state != STREAM_CLOSED ) closeStream();
2021 unsigned int RtApiJack :: getDeviceCount( void )
2023 // See if we can become a jack client.
2024 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2025 jack_status_t *status = NULL;
2026 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2027 if ( client == 0 ) return 0;
2030 std::string port, previousPort;
2031 unsigned int nChannels = 0, nDevices = 0;
2032 ports = jack_get_ports( client, NULL, NULL, 0 );
2034 // Parse the port names up to the first colon (:).
2037 port = (char *) ports[ nChannels ];
2038 iColon = port.find(":");
2039 if ( iColon != std::string::npos ) {
2040 port = port.substr( 0, iColon + 1 );
2041 if ( port != previousPort ) {
2043 previousPort = port;
2046 } while ( ports[++nChannels] );
2050 jack_client_close( client );
2054 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2056 RtAudio::DeviceInfo info;
2057 info.probed = false;
2059 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2060 jack_status_t *status = NULL;
2061 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2062 if ( client == 0 ) {
2063 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2064 error( RtAudioError::WARNING );
2069 std::string port, previousPort;
2070 unsigned int nPorts = 0, nDevices = 0;
2071 ports = jack_get_ports( client, NULL, NULL, 0 );
2073 // Parse the port names up to the first colon (:).
2076 port = (char *) ports[ nPorts ];
2077 iColon = port.find(":");
2078 if ( iColon != std::string::npos ) {
2079 port = port.substr( 0, iColon );
2080 if ( port != previousPort ) {
2081 if ( nDevices == device ) info.name = port;
2083 previousPort = port;
2086 } while ( ports[++nPorts] );
2090 if ( device >= nDevices ) {
2091 jack_client_close( client );
2092 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2093 error( RtAudioError::INVALID_USE );
2097 // Get the current jack server sample rate.
2098 info.sampleRates.clear();
2100 info.preferredSampleRate = jack_get_sample_rate( client );
2101 info.sampleRates.push_back( info.preferredSampleRate );
2103 // Count the available ports containing the client name as device
2104 // channels. Jack "input ports" equal RtAudio output channels.
2105 unsigned int nChannels = 0;
2106 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2108 while ( ports[ nChannels ] ) nChannels++;
2110 info.outputChannels = nChannels;
2113 // Jack "output ports" equal RtAudio input channels.
2115 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2117 while ( ports[ nChannels ] ) nChannels++;
2119 info.inputChannels = nChannels;
2122 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2123 jack_client_close(client);
2124 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2125 error( RtAudioError::WARNING );
2129 // If device opens for both playback and capture, we determine the channels.
2130 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2131 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2133 // Jack always uses 32-bit floats.
2134 info.nativeFormats = RTAUDIO_FLOAT32;
2136 // Jack doesn't provide default devices so we'll use the first available one.
2137 if ( device == 0 && info.outputChannels > 0 )
2138 info.isDefaultOutput = true;
2139 if ( device == 0 && info.inputChannels > 0 )
2140 info.isDefaultInput = true;
2142 jack_client_close(client);
2147 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2149 CallbackInfo *info = (CallbackInfo *) infoPointer;
2151 RtApiJack *object = (RtApiJack *) info->object;
2152 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2157 // This function will be called by a spawned thread when the Jack
2158 // server signals that it is shutting down. It is necessary to handle
2159 // it this way because the jackShutdown() function must return before
2160 // the jack_deactivate() function (in closeStream()) will return.
2161 static void *jackCloseStream( void *ptr )
2163 CallbackInfo *info = (CallbackInfo *) ptr;
2164 RtApiJack *object = (RtApiJack *) info->object;
2166 object->closeStream();
2168 pthread_exit( NULL );
2170 static void jackShutdown( void *infoPointer )
2172 CallbackInfo *info = (CallbackInfo *) infoPointer;
2173 RtApiJack *object = (RtApiJack *) info->object;
2175 // Check current stream state. If stopped, then we'll assume this
2176 // was called as a result of a call to RtApiJack::stopStream (the
2177 // deactivation of a client handle causes this function to be called).
2178 // If not, we'll assume the Jack server is shutting down or some
2179 // other problem occurred and we should close the stream.
2180 if ( object->isStreamRunning() == false ) return;
2182 ThreadHandle threadId;
2183 pthread_create( &threadId, NULL, jackCloseStream, info );
2184 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2187 static int jackXrun( void *infoPointer )
2189 JackHandle *handle = *((JackHandle **) infoPointer);
2191 if ( handle->ports[0] ) handle->xrun[0] = true;
2192 if ( handle->ports[1] ) handle->xrun[1] = true;
2197 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2198 unsigned int firstChannel, unsigned int sampleRate,
2199 RtAudioFormat format, unsigned int *bufferSize,
2200 RtAudio::StreamOptions *options )
2202 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2204 // Look for jack server and try to become a client (only do once per stream).
2205 jack_client_t *client = 0;
2206 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2207 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2208 jack_status_t *status = NULL;
2209 if ( options && !options->streamName.empty() )
2210 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2212 client = jack_client_open( "RtApiJack", jackoptions, status );
2213 if ( client == 0 ) {
2214 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2215 error( RtAudioError::WARNING );
2220 // The handle must have been created on an earlier pass.
2221 client = handle->client;
2225 std::string port, previousPort, deviceName;
2226 unsigned int nPorts = 0, nDevices = 0;
2227 ports = jack_get_ports( client, NULL, NULL, 0 );
2229 // Parse the port names up to the first colon (:).
2232 port = (char *) ports[ nPorts ];
2233 iColon = port.find(":");
2234 if ( iColon != std::string::npos ) {
2235 port = port.substr( 0, iColon );
2236 if ( port != previousPort ) {
2237 if ( nDevices == device ) deviceName = port;
2239 previousPort = port;
2242 } while ( ports[++nPorts] );
2246 if ( device >= nDevices ) {
2247 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2251 // Count the available ports containing the client name as device
2252 // channels. Jack "input ports" equal RtAudio output channels.
2253 unsigned int nChannels = 0;
2254 unsigned long flag = JackPortIsInput;
2255 if ( mode == INPUT ) flag = JackPortIsOutput;
2256 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2258 while ( ports[ nChannels ] ) nChannels++;
2262 // Compare the jack ports for specified client to the requested number of channels.
2263 if ( nChannels < (channels + firstChannel) ) {
2264 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2265 errorText_ = errorStream_.str();
2269 // Check the jack server sample rate.
2270 unsigned int jackRate = jack_get_sample_rate( client );
2271 if ( sampleRate != jackRate ) {
2272 jack_client_close( client );
2273 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2274 errorText_ = errorStream_.str();
2277 stream_.sampleRate = jackRate;
2279 // Get the latency of the JACK port.
2280 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2281 if ( ports[ firstChannel ] ) {
2283 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2284 // the range (usually the min and max are equal)
2285 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2286 // get the latency range
2287 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2288 // be optimistic, use the min!
2289 stream_.latency[mode] = latrange.min;
2290 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2294 // The jack server always uses 32-bit floating-point data.
2295 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2296 stream_.userFormat = format;
2298 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2299 else stream_.userInterleaved = true;
2301 // Jack always uses non-interleaved buffers.
2302 stream_.deviceInterleaved[mode] = false;
2304 // Jack always provides host byte-ordered data.
2305 stream_.doByteSwap[mode] = false;
2307 // Get the buffer size. The buffer size and number of buffers
2308 // (periods) is set when the jack server is started.
2309 stream_.bufferSize = (int) jack_get_buffer_size( client );
2310 *bufferSize = stream_.bufferSize;
2312 stream_.nDeviceChannels[mode] = channels;
2313 stream_.nUserChannels[mode] = channels;
2315 // Set flags for buffer conversion.
2316 stream_.doConvertBuffer[mode] = false;
2317 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2318 stream_.doConvertBuffer[mode] = true;
2319 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2320 stream_.nUserChannels[mode] > 1 )
2321 stream_.doConvertBuffer[mode] = true;
2323 // Allocate our JackHandle structure for the stream.
2324 if ( handle == 0 ) {
2326 handle = new JackHandle;
2328 catch ( std::bad_alloc& ) {
2329 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2333 if ( pthread_cond_init(&handle->condition, NULL) ) {
2334 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2337 stream_.apiHandle = (void *) handle;
2338 handle->client = client;
2340 handle->deviceName[mode] = deviceName;
2342 // Allocate necessary internal buffers.
2343 unsigned long bufferBytes;
2344 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2345 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2346 if ( stream_.userBuffer[mode] == NULL ) {
2347 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2351 if ( stream_.doConvertBuffer[mode] ) {
2353 bool makeBuffer = true;
2354 if ( mode == OUTPUT )
2355 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2356 else { // mode == INPUT
2357 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2358 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2359 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2360 if ( bufferBytes < bytesOut ) makeBuffer = false;
2365 bufferBytes *= *bufferSize;
2366 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2367 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2368 if ( stream_.deviceBuffer == NULL ) {
2369 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2375 // Allocate memory for the Jack ports (channels) identifiers.
2376 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2377 if ( handle->ports[mode] == NULL ) {
2378 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2382 stream_.device[mode] = device;
2383 stream_.channelOffset[mode] = firstChannel;
2384 stream_.state = STREAM_STOPPED;
2385 stream_.callbackInfo.object = (void *) this;
2387 if ( stream_.mode == OUTPUT && mode == INPUT )
2388 // We had already set up the stream for output.
2389 stream_.mode = DUPLEX;
2391 stream_.mode = mode;
2392 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2393 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2394 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2397 // Register our ports.
2399 if ( mode == OUTPUT ) {
2400 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2401 snprintf( label, 64, "outport %d", i );
2402 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2403 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2407 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2408 snprintf( label, 64, "inport %d", i );
2409 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2410 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2414 // Setup the buffer conversion information structure. We don't use
2415 // buffers to do channel offsets, so we override that parameter
2417 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2419 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2425 pthread_cond_destroy( &handle->condition );
2426 jack_client_close( handle->client );
2428 if ( handle->ports[0] ) free( handle->ports[0] );
2429 if ( handle->ports[1] ) free( handle->ports[1] );
2432 stream_.apiHandle = 0;
2435 for ( int i=0; i<2; i++ ) {
2436 if ( stream_.userBuffer[i] ) {
2437 free( stream_.userBuffer[i] );
2438 stream_.userBuffer[i] = 0;
2442 if ( stream_.deviceBuffer ) {
2443 free( stream_.deviceBuffer );
2444 stream_.deviceBuffer = 0;
2450 void RtApiJack :: closeStream( void )
2452 if ( stream_.state == STREAM_CLOSED ) {
2453 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2454 error( RtAudioError::WARNING );
2458 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2461 if ( stream_.state == STREAM_RUNNING )
2462 jack_deactivate( handle->client );
2464 jack_client_close( handle->client );
2468 if ( handle->ports[0] ) free( handle->ports[0] );
2469 if ( handle->ports[1] ) free( handle->ports[1] );
2470 pthread_cond_destroy( &handle->condition );
2472 stream_.apiHandle = 0;
2475 for ( int i=0; i<2; i++ ) {
2476 if ( stream_.userBuffer[i] ) {
2477 free( stream_.userBuffer[i] );
2478 stream_.userBuffer[i] = 0;
2482 if ( stream_.deviceBuffer ) {
2483 free( stream_.deviceBuffer );
2484 stream_.deviceBuffer = 0;
2487 stream_.mode = UNINITIALIZED;
2488 stream_.state = STREAM_CLOSED;
2491 void RtApiJack :: startStream( void )
2494 if ( stream_.state == STREAM_RUNNING ) {
2495 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2496 error( RtAudioError::WARNING );
2500 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2501 int result = jack_activate( handle->client );
2503 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2509 // Get the list of available ports.
2510 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2512 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2513 if ( ports == NULL) {
2514 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2518 // Now make the port connections. Since RtAudio wasn't designed to
2519 // allow the user to select particular channels of a device, we'll
2520 // just open the first "nChannels" ports with offset.
2521 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2523 if ( ports[ stream_.channelOffset[0] + i ] )
2524 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2527 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2534 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2536 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2537 if ( ports == NULL) {
2538 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2542 // Now make the port connections. See note above.
2543 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2545 if ( ports[ stream_.channelOffset[1] + i ] )
2546 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2549 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2556 handle->drainCounter = 0;
2557 handle->internalDrain = false;
2558 stream_.state = STREAM_RUNNING;
2561 if ( result == 0 ) return;
2562 error( RtAudioError::SYSTEM_ERROR );
2565 void RtApiJack :: stopStream( void )
2568 if ( stream_.state == STREAM_STOPPED ) {
2569 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2570 error( RtAudioError::WARNING );
2574 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2575 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2577 if ( handle->drainCounter == 0 ) {
2578 handle->drainCounter = 2;
2579 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2583 jack_deactivate( handle->client );
2584 stream_.state = STREAM_STOPPED;
2587 void RtApiJack :: abortStream( void )
2590 if ( stream_.state == STREAM_STOPPED ) {
2591 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2592 error( RtAudioError::WARNING );
2596 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2597 handle->drainCounter = 2;
2602 // This function will be called by a spawned thread when the user
2603 // callback function signals that the stream should be stopped or
2604 // aborted. It is necessary to handle it this way because the
2605 // callbackEvent() function must return before the jack_deactivate()
2606 // function will return.
2607 static void *jackStopStream( void *ptr )
2609 CallbackInfo *info = (CallbackInfo *) ptr;
2610 RtApiJack *object = (RtApiJack *) info->object;
2612 object->stopStream();
2613 pthread_exit( NULL );
2616 bool RtApiJack :: callbackEvent( unsigned long nframes )
2618 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2619 if ( stream_.state == STREAM_CLOSED ) {
2620 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2621 error( RtAudioError::WARNING );
2624 if ( stream_.bufferSize != nframes ) {
2625 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2626 error( RtAudioError::WARNING );
2630 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2631 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2633 // Check if we were draining the stream and signal is finished.
2634 if ( handle->drainCounter > 3 ) {
2635 ThreadHandle threadId;
2637 stream_.state = STREAM_STOPPING;
2638 if ( handle->internalDrain == true )
2639 pthread_create( &threadId, NULL, jackStopStream, info );
2641 pthread_cond_signal( &handle->condition );
2645 // Invoke user callback first, to get fresh output data.
2646 if ( handle->drainCounter == 0 ) {
2647 RtAudioCallback callback = (RtAudioCallback) info->callback;
2648 double streamTime = getStreamTime();
2649 RtAudioStreamStatus status = 0;
2650 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2651 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2652 handle->xrun[0] = false;
2654 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2655 status |= RTAUDIO_INPUT_OVERFLOW;
2656 handle->xrun[1] = false;
2658 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2659 stream_.bufferSize, streamTime, status, info->userData );
2660 if ( cbReturnValue == 2 ) {
2661 stream_.state = STREAM_STOPPING;
2662 handle->drainCounter = 2;
2664 pthread_create( &id, NULL, jackStopStream, info );
2667 else if ( cbReturnValue == 1 ) {
2668 handle->drainCounter = 1;
2669 handle->internalDrain = true;
2673 jack_default_audio_sample_t *jackbuffer;
2674 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2675 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2677 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2679 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2680 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2681 memset( jackbuffer, 0, bufferBytes );
2685 else if ( stream_.doConvertBuffer[0] ) {
2687 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2689 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2690 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2691 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2694 else { // no buffer conversion
2695 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2696 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2697 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2702 // Don't bother draining input
2703 if ( handle->drainCounter ) {
2704 handle->drainCounter++;
2708 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2710 if ( stream_.doConvertBuffer[1] ) {
2711 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2712 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2713 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2715 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2717 else { // no buffer conversion
2718 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2719 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2720 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2726 RtApi::tickStreamTime();
2729 //******************** End of __UNIX_JACK__ *********************//
2732 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2734 // The ASIO API is designed around a callback scheme, so this
2735 // implementation is similar to that used for OS-X CoreAudio and Linux
2736 // Jack. The primary constraint with ASIO is that it only allows
2737 // access to a single driver at a time. Thus, it is not possible to
2738 // have more than one simultaneous RtAudio stream.
2740 // This implementation also requires a number of external ASIO files
2741 // and a few global variables. The ASIO callback scheme does not
2742 // allow for the passing of user data, so we must create a global
2743 // pointer to our callbackInfo structure.
2745 // On unix systems, we make use of a pthread condition variable.
2746 // Since there is no equivalent in Windows, I hacked something based
2747 // on information found in
2748 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2750 #include "asiosys.h"
2752 #include "iasiothiscallresolver.h"
2753 #include "asiodrivers.h"
2756 static AsioDrivers drivers;
2757 static ASIOCallbacks asioCallbacks;
2758 static ASIODriverInfo driverInfo;
2759 static CallbackInfo *asioCallbackInfo;
2760 static bool asioXRun;
2763 int drainCounter; // Tracks callback counts when draining
2764 bool internalDrain; // Indicates if stop is initiated from callback or not.
2765 ASIOBufferInfo *bufferInfos;
2769 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2772 // Function declarations (definitions at end of section)
2773 static const char* getAsioErrorString( ASIOError result );
2774 static void sampleRateChanged( ASIOSampleRate sRate );
2775 static long asioMessages( long selector, long value, void* message, double* opt );
2777 RtApiAsio :: RtApiAsio()
2779 // ASIO cannot run on a multi-threaded appartment. You can call
2780 // CoInitialize beforehand, but it must be for appartment threading
2781 // (in which case, CoInitilialize will return S_FALSE here).
2782 coInitialized_ = false;
2783 HRESULT hr = CoInitialize( NULL );
2785 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2786 error( RtAudioError::WARNING );
2788 coInitialized_ = true;
2790 drivers.removeCurrentDriver();
2791 driverInfo.asioVersion = 2;
2793 // See note in DirectSound implementation about GetDesktopWindow().
2794 driverInfo.sysRef = GetForegroundWindow();
2797 RtApiAsio :: ~RtApiAsio()
2799 if ( stream_.state != STREAM_CLOSED ) closeStream();
2800 if ( coInitialized_ ) CoUninitialize();
2803 unsigned int RtApiAsio :: getDeviceCount( void )
2805 return (unsigned int) drivers.asioGetNumDev();
2808 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2810 RtAudio::DeviceInfo info;
2811 info.probed = false;
2814 unsigned int nDevices = getDeviceCount();
2815 if ( nDevices == 0 ) {
2816 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2817 error( RtAudioError::INVALID_USE );
2821 if ( device >= nDevices ) {
2822 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2823 error( RtAudioError::INVALID_USE );
2827 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2828 if ( stream_.state != STREAM_CLOSED ) {
2829 if ( device >= devices_.size() ) {
2830 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2831 error( RtAudioError::WARNING );
2834 return devices_[ device ];
2837 char driverName[32];
2838 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2839 if ( result != ASE_OK ) {
2840 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2841 errorText_ = errorStream_.str();
2842 error( RtAudioError::WARNING );
2846 info.name = driverName;
2848 if ( !drivers.loadDriver( driverName ) ) {
2849 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2850 errorText_ = errorStream_.str();
2851 error( RtAudioError::WARNING );
2855 result = ASIOInit( &driverInfo );
2856 if ( result != ASE_OK ) {
2857 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2858 errorText_ = errorStream_.str();
2859 error( RtAudioError::WARNING );
2863 // Determine the device channel information.
2864 long inputChannels, outputChannels;
2865 result = ASIOGetChannels( &inputChannels, &outputChannels );
2866 if ( result != ASE_OK ) {
2867 drivers.removeCurrentDriver();
2868 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2869 errorText_ = errorStream_.str();
2870 error( RtAudioError::WARNING );
2874 info.outputChannels = outputChannels;
2875 info.inputChannels = inputChannels;
2876 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2877 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2879 // Determine the supported sample rates.
2880 info.sampleRates.clear();
2881 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2882 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2883 if ( result == ASE_OK ) {
2884 info.sampleRates.push_back( SAMPLE_RATES[i] );
2886 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2887 info.preferredSampleRate = SAMPLE_RATES[i];
2891 // Determine supported data types ... just check first channel and assume rest are the same.
2892 ASIOChannelInfo channelInfo;
2893 channelInfo.channel = 0;
2894 channelInfo.isInput = true;
2895 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2896 result = ASIOGetChannelInfo( &channelInfo );
2897 if ( result != ASE_OK ) {
2898 drivers.removeCurrentDriver();
2899 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2900 errorText_ = errorStream_.str();
2901 error( RtAudioError::WARNING );
2905 info.nativeFormats = 0;
2906 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2907 info.nativeFormats |= RTAUDIO_SINT16;
2908 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2909 info.nativeFormats |= RTAUDIO_SINT32;
2910 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2911 info.nativeFormats |= RTAUDIO_FLOAT32;
2912 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2913 info.nativeFormats |= RTAUDIO_FLOAT64;
2914 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2915 info.nativeFormats |= RTAUDIO_SINT24;
2917 if ( info.outputChannels > 0 )
2918 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2919 if ( info.inputChannels > 0 )
2920 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2923 drivers.removeCurrentDriver();
2927 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2929 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2930 object->callbackEvent( index );
2933 void RtApiAsio :: saveDeviceInfo( void )
2937 unsigned int nDevices = getDeviceCount();
2938 devices_.resize( nDevices );
2939 for ( unsigned int i=0; i<nDevices; i++ )
2940 devices_[i] = getDeviceInfo( i );
2943 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2944 unsigned int firstChannel, unsigned int sampleRate,
2945 RtAudioFormat format, unsigned int *bufferSize,
2946 RtAudio::StreamOptions *options )
2947 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2949 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2951 // For ASIO, a duplex stream MUST use the same driver.
2952 if ( isDuplexInput && stream_.device[0] != device ) {
2953 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2957 char driverName[32];
2958 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2959 if ( result != ASE_OK ) {
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2961 errorText_ = errorStream_.str();
2965 // Only load the driver once for duplex stream.
2966 if ( !isDuplexInput ) {
2967 // The getDeviceInfo() function will not work when a stream is open
2968 // because ASIO does not allow multiple devices to run at the same
2969 // time. Thus, we'll probe the system before opening a stream and
2970 // save the results for use by getDeviceInfo().
2971 this->saveDeviceInfo();
2973 if ( !drivers.loadDriver( driverName ) ) {
2974 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2975 errorText_ = errorStream_.str();
2979 result = ASIOInit( &driverInfo );
2980 if ( result != ASE_OK ) {
2981 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2982 errorText_ = errorStream_.str();
2987 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2988 bool buffersAllocated = false;
2989 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2990 unsigned int nChannels;
2993 // Check the device channel count.
2994 long inputChannels, outputChannels;
2995 result = ASIOGetChannels( &inputChannels, &outputChannels );
2996 if ( result != ASE_OK ) {
2997 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2998 errorText_ = errorStream_.str();
3002 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3003 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3004 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3005 errorText_ = errorStream_.str();
3008 stream_.nDeviceChannels[mode] = channels;
3009 stream_.nUserChannels[mode] = channels;
3010 stream_.channelOffset[mode] = firstChannel;
3012 // Verify the sample rate is supported.
3013 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3014 if ( result != ASE_OK ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3016 errorText_ = errorStream_.str();
3020 // Get the current sample rate
3021 ASIOSampleRate currentRate;
3022 result = ASIOGetSampleRate( ¤tRate );
3023 if ( result != ASE_OK ) {
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3025 errorText_ = errorStream_.str();
3029 // Set the sample rate only if necessary
3030 if ( currentRate != sampleRate ) {
3031 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3032 if ( result != ASE_OK ) {
3033 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3034 errorText_ = errorStream_.str();
3039 // Determine the driver data type.
3040 ASIOChannelInfo channelInfo;
3041 channelInfo.channel = 0;
3042 if ( mode == OUTPUT ) channelInfo.isInput = false;
3043 else channelInfo.isInput = true;
3044 result = ASIOGetChannelInfo( &channelInfo );
3045 if ( result != ASE_OK ) {
3046 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3047 errorText_ = errorStream_.str();
3051 // Assuming WINDOWS host is always little-endian.
3052 stream_.doByteSwap[mode] = false;
3053 stream_.userFormat = format;
3054 stream_.deviceFormat[mode] = 0;
3055 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3056 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3057 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3059 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3060 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3061 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3063 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3064 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3065 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3067 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3068 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3069 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3071 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3072 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3073 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3076 if ( stream_.deviceFormat[mode] == 0 ) {
3077 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3078 errorText_ = errorStream_.str();
3082 // Set the buffer size. For a duplex stream, this will end up
3083 // setting the buffer size based on the input constraints, which
3085 long minSize, maxSize, preferSize, granularity;
3086 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3087 if ( result != ASE_OK ) {
3088 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3089 errorText_ = errorStream_.str();
3093 if ( isDuplexInput ) {
3094 // When this is the duplex input (output was opened before), then we have to use the same
3095 // buffersize as the output, because it might use the preferred buffer size, which most
3096 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3097 // So instead of throwing an error, make them equal. The caller uses the reference
3098 // to the "bufferSize" param as usual to set up processing buffers.
3100 *bufferSize = stream_.bufferSize;
3103 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3104 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3105 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3106 else if ( granularity == -1 ) {
3107 // Make sure bufferSize is a power of two.
3108 int log2_of_min_size = 0;
3109 int log2_of_max_size = 0;
3111 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3112 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3113 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3116 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3117 int min_delta_num = log2_of_min_size;
3119 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3120 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3121 if (current_delta < min_delta) {
3122 min_delta = current_delta;
3127 *bufferSize = ( (unsigned int)1 << min_delta_num );
3128 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3129 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3131 else if ( granularity != 0 ) {
3132 // Set to an even multiple of granularity, rounding up.
3133 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3138 // we don't use it anymore, see above!
3139 // Just left it here for the case...
3140 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3141 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3146 stream_.bufferSize = *bufferSize;
3147 stream_.nBuffers = 2;
3149 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3150 else stream_.userInterleaved = true;
3152 // ASIO always uses non-interleaved buffers.
3153 stream_.deviceInterleaved[mode] = false;
3155 // Allocate, if necessary, our AsioHandle structure for the stream.
3156 if ( handle == 0 ) {
3158 handle = new AsioHandle;
3160 catch ( std::bad_alloc& ) {
3161 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3164 handle->bufferInfos = 0;
3166 // Create a manual-reset event.
3167 handle->condition = CreateEvent( NULL, // no security
3168 TRUE, // manual-reset
3169 FALSE, // non-signaled initially
3171 stream_.apiHandle = (void *) handle;
3174 // Create the ASIO internal buffers. Since RtAudio sets up input
3175 // and output separately, we'll have to dispose of previously
3176 // created output buffers for a duplex stream.
3177 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3178 ASIODisposeBuffers();
3179 if ( handle->bufferInfos ) free( handle->bufferInfos );
3182 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3184 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3185 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3186 if ( handle->bufferInfos == NULL ) {
3187 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3188 errorText_ = errorStream_.str();
3192 ASIOBufferInfo *infos;
3193 infos = handle->bufferInfos;
3194 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3195 infos->isInput = ASIOFalse;
3196 infos->channelNum = i + stream_.channelOffset[0];
3197 infos->buffers[0] = infos->buffers[1] = 0;
3199 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3200 infos->isInput = ASIOTrue;
3201 infos->channelNum = i + stream_.channelOffset[1];
3202 infos->buffers[0] = infos->buffers[1] = 0;
3205 // prepare for callbacks
3206 stream_.sampleRate = sampleRate;
3207 stream_.device[mode] = device;
3208 stream_.mode = isDuplexInput ? DUPLEX : mode;
3210 // store this class instance before registering callbacks, that are going to use it
3211 asioCallbackInfo = &stream_.callbackInfo;
3212 stream_.callbackInfo.object = (void *) this;
3214 // Set up the ASIO callback structure and create the ASIO data buffers.
3215 asioCallbacks.bufferSwitch = &bufferSwitch;
3216 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3217 asioCallbacks.asioMessage = &asioMessages;
3218 asioCallbacks.bufferSwitchTimeInfo = NULL;
3219 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3220 if ( result != ASE_OK ) {
3221 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3222 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3223 // in that case, let's be naïve and try that instead
3224 *bufferSize = preferSize;
3225 stream_.bufferSize = *bufferSize;
3226 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3229 if ( result != ASE_OK ) {
3230 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3231 errorText_ = errorStream_.str();
3234 buffersAllocated = true;
3235 stream_.state = STREAM_STOPPED;
3237 // Set flags for buffer conversion.
3238 stream_.doConvertBuffer[mode] = false;
3239 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3240 stream_.doConvertBuffer[mode] = true;
3241 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3242 stream_.nUserChannels[mode] > 1 )
3243 stream_.doConvertBuffer[mode] = true;
3245 // Allocate necessary internal buffers
3246 unsigned long bufferBytes;
3247 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3248 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3249 if ( stream_.userBuffer[mode] == NULL ) {
3250 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3254 if ( stream_.doConvertBuffer[mode] ) {
3256 bool makeBuffer = true;
3257 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3258 if ( isDuplexInput && stream_.deviceBuffer ) {
3259 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3260 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3264 bufferBytes *= *bufferSize;
3265 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3266 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3267 if ( stream_.deviceBuffer == NULL ) {
3268 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3274 // Determine device latencies
3275 long inputLatency, outputLatency;
3276 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3277 if ( result != ASE_OK ) {
3278 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3279 errorText_ = errorStream_.str();
3280 error( RtAudioError::WARNING); // warn but don't fail
3283 stream_.latency[0] = outputLatency;
3284 stream_.latency[1] = inputLatency;
3287 // Setup the buffer conversion information structure. We don't use
3288 // buffers to do channel offsets, so we override that parameter
3290 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3295 if ( !isDuplexInput ) {
3296 // the cleanup for error in the duplex input, is done by RtApi::openStream
3297 // So we clean up for single channel only
3299 if ( buffersAllocated )
3300 ASIODisposeBuffers();
3302 drivers.removeCurrentDriver();
3305 CloseHandle( handle->condition );
3306 if ( handle->bufferInfos )
3307 free( handle->bufferInfos );
3310 stream_.apiHandle = 0;
3314 if ( stream_.userBuffer[mode] ) {
3315 free( stream_.userBuffer[mode] );
3316 stream_.userBuffer[mode] = 0;
3319 if ( stream_.deviceBuffer ) {
3320 free( stream_.deviceBuffer );
3321 stream_.deviceBuffer = 0;
3326 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3328 void RtApiAsio :: closeStream()
3330 if ( stream_.state == STREAM_CLOSED ) {
3331 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3332 error( RtAudioError::WARNING );
3336 if ( stream_.state == STREAM_RUNNING ) {
3337 stream_.state = STREAM_STOPPED;
3340 ASIODisposeBuffers();
3341 drivers.removeCurrentDriver();
3343 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3345 CloseHandle( handle->condition );
3346 if ( handle->bufferInfos )
3347 free( handle->bufferInfos );
3349 stream_.apiHandle = 0;
3352 for ( int i=0; i<2; i++ ) {
3353 if ( stream_.userBuffer[i] ) {
3354 free( stream_.userBuffer[i] );
3355 stream_.userBuffer[i] = 0;
3359 if ( stream_.deviceBuffer ) {
3360 free( stream_.deviceBuffer );
3361 stream_.deviceBuffer = 0;
3364 stream_.mode = UNINITIALIZED;
3365 stream_.state = STREAM_CLOSED;
3368 bool stopThreadCalled = false;
3370 void RtApiAsio :: startStream()
3373 if ( stream_.state == STREAM_RUNNING ) {
3374 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3375 error( RtAudioError::WARNING );
3379 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3380 ASIOError result = ASIOStart();
3381 if ( result != ASE_OK ) {
3382 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3383 errorText_ = errorStream_.str();
3387 handle->drainCounter = 0;
3388 handle->internalDrain = false;
3389 ResetEvent( handle->condition );
3390 stream_.state = STREAM_RUNNING;
3394 stopThreadCalled = false;
3396 if ( result == ASE_OK ) return;
3397 error( RtAudioError::SYSTEM_ERROR );
3400 void RtApiAsio :: stopStream()
3403 if ( stream_.state == STREAM_STOPPED ) {
3404 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3405 error( RtAudioError::WARNING );
3409 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3411 if ( handle->drainCounter == 0 ) {
3412 handle->drainCounter = 2;
3413 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3417 stream_.state = STREAM_STOPPED;
3419 ASIOError result = ASIOStop();
3420 if ( result != ASE_OK ) {
3421 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3422 errorText_ = errorStream_.str();
3425 if ( result == ASE_OK ) return;
3426 error( RtAudioError::SYSTEM_ERROR );
3429 void RtApiAsio :: abortStream()
3432 if ( stream_.state == STREAM_STOPPED ) {
3433 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3434 error( RtAudioError::WARNING );
3438 // The following lines were commented-out because some behavior was
3439 // noted where the device buffers need to be zeroed to avoid
3440 // continuing sound, even when the device buffers are completely
3441 // disposed. So now, calling abort is the same as calling stop.
3442 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3443 // handle->drainCounter = 2;
3447 // This function will be called by a spawned thread when the user
3448 // callback function signals that the stream should be stopped or
3449 // aborted. It is necessary to handle it this way because the
3450 // callbackEvent() function must return before the ASIOStop()
3451 // function will return.
3452 static unsigned __stdcall asioStopStream( void *ptr )
3454 CallbackInfo *info = (CallbackInfo *) ptr;
3455 RtApiAsio *object = (RtApiAsio *) info->object;
3457 object->stopStream();
3462 bool RtApiAsio :: callbackEvent( long bufferIndex )
3464 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3465 if ( stream_.state == STREAM_CLOSED ) {
3466 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3467 error( RtAudioError::WARNING );
3471 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3472 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3474 // Check if we were draining the stream and signal if finished.
3475 if ( handle->drainCounter > 3 ) {
3477 stream_.state = STREAM_STOPPING;
3478 if ( handle->internalDrain == false )
3479 SetEvent( handle->condition );
3480 else { // spawn a thread to stop the stream
3482 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3483 &stream_.callbackInfo, 0, &threadId );
3488 // Invoke user callback to get fresh output data UNLESS we are
3490 if ( handle->drainCounter == 0 ) {
3491 RtAudioCallback callback = (RtAudioCallback) info->callback;
3492 double streamTime = getStreamTime();
3493 RtAudioStreamStatus status = 0;
3494 if ( stream_.mode != INPUT && asioXRun == true ) {
3495 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3498 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3499 status |= RTAUDIO_INPUT_OVERFLOW;
3502 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3503 stream_.bufferSize, streamTime, status, info->userData );
3504 if ( cbReturnValue == 2 ) {
3505 stream_.state = STREAM_STOPPING;
3506 handle->drainCounter = 2;
3508 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3509 &stream_.callbackInfo, 0, &threadId );
3512 else if ( cbReturnValue == 1 ) {
3513 handle->drainCounter = 1;
3514 handle->internalDrain = true;
3518 unsigned int nChannels, bufferBytes, i, j;
3519 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3520 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3522 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3524 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3526 for ( i=0, j=0; i<nChannels; i++ ) {
3527 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3528 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3532 else if ( stream_.doConvertBuffer[0] ) {
3534 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3535 if ( stream_.doByteSwap[0] )
3536 byteSwapBuffer( stream_.deviceBuffer,
3537 stream_.bufferSize * stream_.nDeviceChannels[0],
3538 stream_.deviceFormat[0] );
3540 for ( i=0, j=0; i<nChannels; i++ ) {
3541 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3542 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3543 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3549 if ( stream_.doByteSwap[0] )
3550 byteSwapBuffer( stream_.userBuffer[0],
3551 stream_.bufferSize * stream_.nUserChannels[0],
3552 stream_.userFormat );
3554 for ( i=0, j=0; i<nChannels; i++ ) {
3555 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3556 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3557 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3563 // Don't bother draining input
3564 if ( handle->drainCounter ) {
3565 handle->drainCounter++;
3569 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3571 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3573 if (stream_.doConvertBuffer[1]) {
3575 // Always interleave ASIO input data.
3576 for ( i=0, j=0; i<nChannels; i++ ) {
3577 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3578 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3579 handle->bufferInfos[i].buffers[bufferIndex],
3583 if ( stream_.doByteSwap[1] )
3584 byteSwapBuffer( stream_.deviceBuffer,
3585 stream_.bufferSize * stream_.nDeviceChannels[1],
3586 stream_.deviceFormat[1] );
3587 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3591 for ( i=0, j=0; i<nChannels; i++ ) {
3592 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3593 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3594 handle->bufferInfos[i].buffers[bufferIndex],
3599 if ( stream_.doByteSwap[1] )
3600 byteSwapBuffer( stream_.userBuffer[1],
3601 stream_.bufferSize * stream_.nUserChannels[1],
3602 stream_.userFormat );
3607 // The following call was suggested by Malte Clasen. While the API
3608 // documentation indicates it should not be required, some device
3609 // drivers apparently do not function correctly without it.
3612 RtApi::tickStreamTime();
3616 static void sampleRateChanged( ASIOSampleRate sRate )
3618 // The ASIO documentation says that this usually only happens during
3619 // external sync. Audio processing is not stopped by the driver,
3620 // actual sample rate might not have even changed, maybe only the
3621 // sample rate status of an AES/EBU or S/PDIF digital input at the
3624 RtApi *object = (RtApi *) asioCallbackInfo->object;
3626 object->stopStream();
3628 catch ( RtAudioError &exception ) {
3629 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3633 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3636 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3640 switch( selector ) {
3641 case kAsioSelectorSupported:
3642 if ( value == kAsioResetRequest
3643 || value == kAsioEngineVersion
3644 || value == kAsioResyncRequest
3645 || value == kAsioLatenciesChanged
3646 // The following three were added for ASIO 2.0, you don't
3647 // necessarily have to support them.
3648 || value == kAsioSupportsTimeInfo
3649 || value == kAsioSupportsTimeCode
3650 || value == kAsioSupportsInputMonitor)
3653 case kAsioResetRequest:
3654 // Defer the task and perform the reset of the driver during the
3655 // next "safe" situation. You cannot reset the driver right now,
3656 // as this code is called from the driver. Reset the driver is
3657 // done by completely destruct is. I.e. ASIOStop(),
3658 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3660 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3663 case kAsioResyncRequest:
3664 // This informs the application that the driver encountered some
3665 // non-fatal data loss. It is used for synchronization purposes
3666 // of different media. Added mainly to work around the Win16Mutex
3667 // problems in Windows 95/98 with the Windows Multimedia system,
3668 // which could lose data because the Mutex was held too long by
3669 // another thread. However a driver can issue it in other
3671 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3675 case kAsioLatenciesChanged:
3676 // This will inform the host application that the drivers were
3677 // latencies changed. Beware, it this does not mean that the
3678 // buffer sizes have changed! You might need to update internal
3680 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3683 case kAsioEngineVersion:
3684 // Return the supported ASIO version of the host application. If
3685 // a host application does not implement this selector, ASIO 1.0
3686 // is assumed by the driver.
3689 case kAsioSupportsTimeInfo:
3690 // Informs the driver whether the
3691 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3692 // For compatibility with ASIO 1.0 drivers the host application
3693 // should always support the "old" bufferSwitch method, too.
3696 case kAsioSupportsTimeCode:
3697 // Informs the driver whether application is interested in time
3698 // code info. If an application does not need to know about time
3699 // code, the driver has less work to do.
3706 static const char* getAsioErrorString( ASIOError result )
3714 static const Messages m[] =
3716 { ASE_NotPresent, "Hardware input or output is not present or available." },
3717 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3718 { ASE_InvalidParameter, "Invalid input parameter." },
3719 { ASE_InvalidMode, "Invalid mode." },
3720 { ASE_SPNotAdvancing, "Sample position not advancing." },
3721 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3722 { ASE_NoMemory, "Not enough memory to complete the request." }
3725 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3726 if ( m[i].value == result ) return m[i].message;
3728 return "Unknown error.";
3731 //******************** End of __WINDOWS_ASIO__ *********************//
3735 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3737 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3738 // - Introduces support for the Windows WASAPI API
3739 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3740 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3741 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3746 #include <audioclient.h>
3748 #include <mmdeviceapi.h>
3749 #include <functiondiscoverykeys_devpkey.h>
3752 //=============================================================================
3754 #define SAFE_RELEASE( objectPtr )\
3757 objectPtr->Release();\
3761 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3763 //-----------------------------------------------------------------------------
3765 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3766 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3767 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3768 // provide intermediate storage for read / write synchronization.
3782 // sets the length of the internal ring buffer
3783 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3786 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3788 bufferSize_ = bufferSize;
3793 // attempt to push a buffer into the ring buffer at the current "in" index
3794 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3796 if ( !buffer || // incoming buffer is NULL
3797 bufferSize == 0 || // incoming buffer has no data
3798 bufferSize > bufferSize_ ) // incoming buffer too large
3803 unsigned int relOutIndex = outIndex_;
3804 unsigned int inIndexEnd = inIndex_ + bufferSize;
3805 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3806 relOutIndex += bufferSize_;
3809 // "in" index can end on the "out" index but cannot begin at it
3810 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3811 return false; // not enough space between "in" index and "out" index
3814 // copy buffer from external to internal
3815 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3816 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3817 int fromInSize = bufferSize - fromZeroSize;
3822 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3823 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3825 case RTAUDIO_SINT16:
3826 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3827 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3829 case RTAUDIO_SINT24:
3830 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3831 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3833 case RTAUDIO_SINT32:
3834 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3835 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3837 case RTAUDIO_FLOAT32:
3838 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3839 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3841 case RTAUDIO_FLOAT64:
3842 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3843 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3847 // update "in" index
3848 inIndex_ += bufferSize;
3849 inIndex_ %= bufferSize_;
3854 // attempt to pull a buffer from the ring buffer from the current "out" index
3855 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3857 if ( !buffer || // incoming buffer is NULL
3858 bufferSize == 0 || // incoming buffer has no data
3859 bufferSize > bufferSize_ ) // incoming buffer too large
3864 unsigned int relInIndex = inIndex_;
3865 unsigned int outIndexEnd = outIndex_ + bufferSize;
3866 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3867 relInIndex += bufferSize_;
3870 // "out" index can begin at and end on the "in" index
3871 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3872 return false; // not enough space between "out" index and "in" index
3875 // copy buffer from internal to external
3876 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3877 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3878 int fromOutSize = bufferSize - fromZeroSize;
3883 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3884 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3886 case RTAUDIO_SINT16:
3887 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3888 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3890 case RTAUDIO_SINT24:
3891 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3892 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3894 case RTAUDIO_SINT32:
3895 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3896 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3898 case RTAUDIO_FLOAT32:
3899 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3900 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3902 case RTAUDIO_FLOAT64:
3903 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3904 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3908 // update "out" index
3909 outIndex_ += bufferSize;
3910 outIndex_ %= bufferSize_;
3917 unsigned int bufferSize_;
3918 unsigned int inIndex_;
3919 unsigned int outIndex_;
3922 //-----------------------------------------------------------------------------
3924 // A structure to hold various information related to the WASAPI implementation.
3927 IAudioClient* captureAudioClient;
3928 IAudioClient* renderAudioClient;
3929 IAudioCaptureClient* captureClient;
3930 IAudioRenderClient* renderClient;
3931 HANDLE captureEvent;
3935 : captureAudioClient( NULL ),
3936 renderAudioClient( NULL ),
3937 captureClient( NULL ),
3938 renderClient( NULL ),
3939 captureEvent( NULL ),
3940 renderEvent( NULL ) {}
3943 //=============================================================================
3945 RtApiWasapi::RtApiWasapi()
3946 : coInitialized_( false ), deviceEnumerator_( NULL )
3948 // WASAPI can run either apartment or multi-threaded
3949 HRESULT hr = CoInitialize( NULL );
3950 if ( !FAILED( hr ) )
3951 coInitialized_ = true;
3953 // Instantiate device enumerator
3954 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3955 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3956 ( void** ) &deviceEnumerator_ );
3958 if ( FAILED( hr ) ) {
3959 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3960 error( RtAudioError::DRIVER_ERROR );
3964 //-----------------------------------------------------------------------------
3966 RtApiWasapi::~RtApiWasapi()
3968 if ( stream_.state != STREAM_CLOSED )
3971 SAFE_RELEASE( deviceEnumerator_ );
3973 // If this object previously called CoInitialize()
3974 if ( coInitialized_ )
3978 //=============================================================================
3980 unsigned int RtApiWasapi::getDeviceCount( void )
3982 unsigned int captureDeviceCount = 0;
3983 unsigned int renderDeviceCount = 0;
3985 IMMDeviceCollection* captureDevices = NULL;
3986 IMMDeviceCollection* renderDevices = NULL;
3988 // Count capture devices
3990 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3991 if ( FAILED( hr ) ) {
3992 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3996 hr = captureDevices->GetCount( &captureDeviceCount );
3997 if ( FAILED( hr ) ) {
3998 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4002 // Count render devices
4003 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4004 if ( FAILED( hr ) ) {
4005 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4009 hr = renderDevices->GetCount( &renderDeviceCount );
4010 if ( FAILED( hr ) ) {
4011 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4016 // release all references
4017 SAFE_RELEASE( captureDevices );
4018 SAFE_RELEASE( renderDevices );
4020 if ( errorText_.empty() )
4021 return captureDeviceCount + renderDeviceCount;
4023 error( RtAudioError::DRIVER_ERROR );
4027 //-----------------------------------------------------------------------------
4029 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4031 RtAudio::DeviceInfo info;
4032 unsigned int captureDeviceCount = 0;
4033 unsigned int renderDeviceCount = 0;
4034 std::string defaultDeviceName;
4035 bool isCaptureDevice = false;
4037 PROPVARIANT deviceNameProp;
4038 PROPVARIANT defaultDeviceNameProp;
4040 IMMDeviceCollection* captureDevices = NULL;
4041 IMMDeviceCollection* renderDevices = NULL;
4042 IMMDevice* devicePtr = NULL;
4043 IMMDevice* defaultDevicePtr = NULL;
4044 IAudioClient* audioClient = NULL;
4045 IPropertyStore* devicePropStore = NULL;
4046 IPropertyStore* defaultDevicePropStore = NULL;
4048 WAVEFORMATEX* deviceFormat = NULL;
4049 WAVEFORMATEX* closestMatchFormat = NULL;
4052 info.probed = false;
4054 // Count capture devices
4056 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4057 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4058 if ( FAILED( hr ) ) {
4059 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4063 hr = captureDevices->GetCount( &captureDeviceCount );
4064 if ( FAILED( hr ) ) {
4065 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4069 // Count render devices
4070 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4071 if ( FAILED( hr ) ) {
4072 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4076 hr = renderDevices->GetCount( &renderDeviceCount );
4077 if ( FAILED( hr ) ) {
4078 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4082 // validate device index
4083 if ( device >= captureDeviceCount + renderDeviceCount ) {
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4085 errorType = RtAudioError::INVALID_USE;
4089 // determine whether index falls within capture or render devices
4090 if ( device >= renderDeviceCount ) {
4091 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4092 if ( FAILED( hr ) ) {
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4096 isCaptureDevice = true;
4099 hr = renderDevices->Item( device, &devicePtr );
4100 if ( FAILED( hr ) ) {
4101 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4104 isCaptureDevice = false;
4107 // get default device name
4108 if ( isCaptureDevice ) {
4109 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4110 if ( FAILED( hr ) ) {
4111 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4116 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4117 if ( FAILED( hr ) ) {
4118 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4123 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4124 if ( FAILED( hr ) ) {
4125 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4128 PropVariantInit( &defaultDeviceNameProp );
4130 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4131 if ( FAILED( hr ) ) {
4132 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4136 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4139 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4140 if ( FAILED( hr ) ) {
4141 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4145 PropVariantInit( &deviceNameProp );
4147 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4148 if ( FAILED( hr ) ) {
4149 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4153 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4156 if ( isCaptureDevice ) {
4157 info.isDefaultInput = info.name == defaultDeviceName;
4158 info.isDefaultOutput = false;
4161 info.isDefaultInput = false;
4162 info.isDefaultOutput = info.name == defaultDeviceName;
4166 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4167 if ( FAILED( hr ) ) {
4168 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4172 hr = audioClient->GetMixFormat( &deviceFormat );
4173 if ( FAILED( hr ) ) {
4174 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4178 if ( isCaptureDevice ) {
4179 info.inputChannels = deviceFormat->nChannels;
4180 info.outputChannels = 0;
4181 info.duplexChannels = 0;
4184 info.inputChannels = 0;
4185 info.outputChannels = deviceFormat->nChannels;
4186 info.duplexChannels = 0;
4189 // sample rates (WASAPI only supports the one native sample rate)
4190 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4192 info.sampleRates.clear();
4193 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4196 info.nativeFormats = 0;
4198 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4199 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4200 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4202 if ( deviceFormat->wBitsPerSample == 32 ) {
4203 info.nativeFormats |= RTAUDIO_FLOAT32;
4205 else if ( deviceFormat->wBitsPerSample == 64 ) {
4206 info.nativeFormats |= RTAUDIO_FLOAT64;
4209 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4210 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4211 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4213 if ( deviceFormat->wBitsPerSample == 8 ) {
4214 info.nativeFormats |= RTAUDIO_SINT8;
4216 else if ( deviceFormat->wBitsPerSample == 16 ) {
4217 info.nativeFormats |= RTAUDIO_SINT16;
4219 else if ( deviceFormat->wBitsPerSample == 24 ) {
4220 info.nativeFormats |= RTAUDIO_SINT24;
4222 else if ( deviceFormat->wBitsPerSample == 32 ) {
4223 info.nativeFormats |= RTAUDIO_SINT32;
4231 // release all references
4232 PropVariantClear( &deviceNameProp );
4233 PropVariantClear( &defaultDeviceNameProp );
4235 SAFE_RELEASE( captureDevices );
4236 SAFE_RELEASE( renderDevices );
4237 SAFE_RELEASE( devicePtr );
4238 SAFE_RELEASE( defaultDevicePtr );
4239 SAFE_RELEASE( audioClient );
4240 SAFE_RELEASE( devicePropStore );
4241 SAFE_RELEASE( defaultDevicePropStore );
4243 CoTaskMemFree( deviceFormat );
4244 CoTaskMemFree( closestMatchFormat );
4246 if ( !errorText_.empty() )
4251 //-----------------------------------------------------------------------------
4253 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4255 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4256 if ( getDeviceInfo( i ).isDefaultOutput ) {
4264 //-----------------------------------------------------------------------------
4266 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4268 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4269 if ( getDeviceInfo( i ).isDefaultInput ) {
4277 //-----------------------------------------------------------------------------
4279 void RtApiWasapi::closeStream( void )
4281 if ( stream_.state == STREAM_CLOSED ) {
4282 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4283 error( RtAudioError::WARNING );
4287 if ( stream_.state != STREAM_STOPPED )
4290 // clean up stream memory
4291 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4292 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4294 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4295 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4297 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4298 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4300 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4301 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4303 delete ( WasapiHandle* ) stream_.apiHandle;
4304 stream_.apiHandle = NULL;
4306 for ( int i = 0; i < 2; i++ ) {
4307 if ( stream_.userBuffer[i] ) {
4308 free( stream_.userBuffer[i] );
4309 stream_.userBuffer[i] = 0;
4313 if ( stream_.deviceBuffer ) {
4314 free( stream_.deviceBuffer );
4315 stream_.deviceBuffer = 0;
4318 // update stream state
4319 stream_.state = STREAM_CLOSED;
4322 //-----------------------------------------------------------------------------
4324 void RtApiWasapi::startStream( void )
4328 if ( stream_.state == STREAM_RUNNING ) {
4329 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4330 error( RtAudioError::WARNING );
4334 // update stream state
4335 stream_.state = STREAM_RUNNING;
4337 // create WASAPI stream thread
4338 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4340 if ( !stream_.callbackInfo.thread ) {
4341 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4342 error( RtAudioError::THREAD_ERROR );
4345 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4346 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4350 //-----------------------------------------------------------------------------
4352 void RtApiWasapi::stopStream( void )
4356 if ( stream_.state == STREAM_STOPPED ) {
4357 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4358 error( RtAudioError::WARNING );
4362 // inform stream thread by setting stream state to STREAM_STOPPING
4363 stream_.state = STREAM_STOPPING;
4365 // wait until stream thread is stopped
4366 while( stream_.state != STREAM_STOPPED ) {
4370 // Wait for the last buffer to play before stopping.
4371 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4373 // stop capture client if applicable
4374 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4375 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4376 if ( FAILED( hr ) ) {
4377 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4378 error( RtAudioError::DRIVER_ERROR );
4383 // stop render client if applicable
4384 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4385 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4386 if ( FAILED( hr ) ) {
4387 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4388 error( RtAudioError::DRIVER_ERROR );
4393 // close thread handle
4394 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4395 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4396 error( RtAudioError::THREAD_ERROR );
4400 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4403 //-----------------------------------------------------------------------------
4405 void RtApiWasapi::abortStream( void )
4409 if ( stream_.state == STREAM_STOPPED ) {
4410 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4411 error( RtAudioError::WARNING );
4415 // inform stream thread by setting stream state to STREAM_STOPPING
4416 stream_.state = STREAM_STOPPING;
4418 // wait until stream thread is stopped
4419 while ( stream_.state != STREAM_STOPPED ) {
4423 // stop capture client if applicable
4424 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4425 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4426 if ( FAILED( hr ) ) {
4427 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4428 error( RtAudioError::DRIVER_ERROR );
4433 // stop render client if applicable
4434 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4435 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4436 if ( FAILED( hr ) ) {
4437 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4438 error( RtAudioError::DRIVER_ERROR );
4443 // close thread handle
4444 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4445 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4446 error( RtAudioError::THREAD_ERROR );
4450 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4453 //-----------------------------------------------------------------------------
4455 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4456 unsigned int firstChannel, unsigned int sampleRate,
4457 RtAudioFormat format, unsigned int* bufferSize,
4458 RtAudio::StreamOptions* options )
4460 bool methodResult = FAILURE;
4461 unsigned int captureDeviceCount = 0;
4462 unsigned int renderDeviceCount = 0;
4464 IMMDeviceCollection* captureDevices = NULL;
4465 IMMDeviceCollection* renderDevices = NULL;
4466 IMMDevice* devicePtr = NULL;
4467 WAVEFORMATEX* deviceFormat = NULL;
4468 unsigned int bufferBytes;
4469 stream_.state = STREAM_STOPPED;
4470 RtAudio::DeviceInfo deviceInfo;
4472 // create API Handle if not already created
4473 if ( !stream_.apiHandle )
4474 stream_.apiHandle = ( void* ) new WasapiHandle();
4476 // Count capture devices
4478 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4479 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4480 if ( FAILED( hr ) ) {
4481 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4485 hr = captureDevices->GetCount( &captureDeviceCount );
4486 if ( FAILED( hr ) ) {
4487 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4491 // Count render devices
4492 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4493 if ( FAILED( hr ) ) {
4494 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4498 hr = renderDevices->GetCount( &renderDeviceCount );
4499 if ( FAILED( hr ) ) {
4500 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4504 // validate device index
4505 if ( device >= captureDeviceCount + renderDeviceCount ) {
4506 errorType = RtAudioError::INVALID_USE;
4507 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4511 deviceInfo = getDeviceInfo( device );
4513 // validate sample rate
4514 if ( sampleRate != deviceInfo.preferredSampleRate )
4516 errorType = RtAudioError::INVALID_USE;
4517 std::stringstream ss;
4518 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4519 << "Hz sample rate not supported. This device only supports "
4520 << deviceInfo.preferredSampleRate << "Hz.";
4521 errorText_ = ss.str();
4525 // determine whether index falls within capture or render devices
4526 if ( device >= renderDeviceCount ) {
4527 if ( mode != INPUT ) {
4528 errorType = RtAudioError::INVALID_USE;
4529 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4533 // retrieve captureAudioClient from devicePtr
4534 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4536 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4537 if ( FAILED( hr ) ) {
4538 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4542 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4543 NULL, ( void** ) &captureAudioClient );
4544 if ( FAILED( hr ) ) {
4545 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4549 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4550 if ( FAILED( hr ) ) {
4551 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4555 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4556 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4559 if ( mode != OUTPUT ) {
4560 errorType = RtAudioError::INVALID_USE;
4561 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4565 // retrieve renderAudioClient from devicePtr
4566 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4568 hr = renderDevices->Item( device, &devicePtr );
4569 if ( FAILED( hr ) ) {
4570 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4574 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4575 NULL, ( void** ) &renderAudioClient );
4576 if ( FAILED( hr ) ) {
4577 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4581 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4582 if ( FAILED( hr ) ) {
4583 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4587 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4588 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4592 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4593 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4594 stream_.mode = DUPLEX;
4597 stream_.mode = mode;
4600 stream_.device[mode] = device;
4601 stream_.doByteSwap[mode] = false;
4602 stream_.sampleRate = sampleRate;
4603 stream_.bufferSize = *bufferSize;
4604 stream_.nBuffers = 1;
4605 stream_.nUserChannels[mode] = channels;
4606 stream_.channelOffset[mode] = firstChannel;
4607 stream_.userFormat = format;
4608 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4610 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4611 stream_.userInterleaved = false;
4613 stream_.userInterleaved = true;
4614 stream_.deviceInterleaved[mode] = true;
4616 // Set flags for buffer conversion.
4617 stream_.doConvertBuffer[mode] = false;
4618 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4619 stream_.nUserChannels != stream_.nDeviceChannels )
4620 stream_.doConvertBuffer[mode] = true;
4621 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4622 stream_.nUserChannels[mode] > 1 )
4623 stream_.doConvertBuffer[mode] = true;
4625 if ( stream_.doConvertBuffer[mode] )
4626 setConvertInfo( mode, 0 );
4628 // Allocate necessary internal buffers
4629 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4631 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4632 if ( !stream_.userBuffer[mode] ) {
4633 errorType = RtAudioError::MEMORY_ERROR;
4634 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4638 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4639 stream_.callbackInfo.priority = 15;
4641 stream_.callbackInfo.priority = 0;
4643 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4644 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4646 methodResult = SUCCESS;
4650 SAFE_RELEASE( captureDevices );
4651 SAFE_RELEASE( renderDevices );
4652 SAFE_RELEASE( devicePtr );
4653 CoTaskMemFree( deviceFormat );
4655 // if method failed, close the stream
4656 if ( methodResult == FAILURE )
4659 if ( !errorText_.empty() )
4661 return methodResult;
4664 //=============================================================================
4666 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4669 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4674 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4677 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4682 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4685 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4690 //-----------------------------------------------------------------------------
4692 void RtApiWasapi::wasapiThread()
4694 // as this is a new thread, we must CoInitialize it
4695 CoInitialize( NULL );
4699 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4700 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4701 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4702 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4703 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4704 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4706 WAVEFORMATEX* captureFormat = NULL;
4707 WAVEFORMATEX* renderFormat = NULL;
4708 WasapiBuffer captureBuffer;
4709 WasapiBuffer renderBuffer;
4711 // declare local stream variables
4712 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4713 BYTE* streamBuffer = NULL;
4714 unsigned long captureFlags = 0;
4715 unsigned int bufferFrameCount = 0;
4716 unsigned int numFramesPadding = 0;
4717 bool callbackPushed = false;
4718 bool callbackPulled = false;
4719 bool callbackStopped = false;
4720 int callbackResult = 0;
4722 unsigned int deviceBuffSize = 0;
4725 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4727 // Attempt to assign "Pro Audio" characteristic to thread
4728 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4730 DWORD taskIndex = 0;
4731 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4732 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4733 FreeLibrary( AvrtDll );
4736 // start capture stream if applicable
4737 if ( captureAudioClient ) {
4738 hr = captureAudioClient->GetMixFormat( &captureFormat );
4739 if ( FAILED( hr ) ) {
4740 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4744 // initialize capture stream according to desire buffer size
4745 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4747 if ( !captureClient ) {
4748 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4749 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4750 desiredBufferPeriod,
4751 desiredBufferPeriod,
4754 if ( FAILED( hr ) ) {
4755 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4759 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4760 ( void** ) &captureClient );
4761 if ( FAILED( hr ) ) {
4762 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4766 // configure captureEvent to trigger on every available capture buffer
4767 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4768 if ( !captureEvent ) {
4769 errorType = RtAudioError::SYSTEM_ERROR;
4770 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4774 hr = captureAudioClient->SetEventHandle( captureEvent );
4775 if ( FAILED( hr ) ) {
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4780 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4781 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4784 unsigned int inBufferSize = 0;
4785 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4786 if ( FAILED( hr ) ) {
4787 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4791 // scale outBufferSize according to stream->user sample rate ratio
4792 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4793 inBufferSize *= stream_.nDeviceChannels[INPUT];
4795 // set captureBuffer size
4796 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4798 // reset the capture stream
4799 hr = captureAudioClient->Reset();
4800 if ( FAILED( hr ) ) {
4801 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4805 // start the capture stream
4806 hr = captureAudioClient->Start();
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4813 // start render stream if applicable
4814 if ( renderAudioClient ) {
4815 hr = renderAudioClient->GetMixFormat( &renderFormat );
4816 if ( FAILED( hr ) ) {
4817 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4821 // initialize render stream according to desire buffer size
4822 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4824 if ( !renderClient ) {
4825 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4826 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4827 desiredBufferPeriod,
4828 desiredBufferPeriod,
4831 if ( FAILED( hr ) ) {
4832 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4836 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4837 ( void** ) &renderClient );
4838 if ( FAILED( hr ) ) {
4839 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4843 // configure renderEvent to trigger on every available render buffer
4844 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4845 if ( !renderEvent ) {
4846 errorType = RtAudioError::SYSTEM_ERROR;
4847 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4851 hr = renderAudioClient->SetEventHandle( renderEvent );
4852 if ( FAILED( hr ) ) {
4853 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4857 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4858 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4861 unsigned int outBufferSize = 0;
4862 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4863 if ( FAILED( hr ) ) {
4864 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4868 // scale inBufferSize according to user->stream sample rate ratio
4869 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4870 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4872 // set renderBuffer size
4873 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4875 // reset the render stream
4876 hr = renderAudioClient->Reset();
4877 if ( FAILED( hr ) ) {
4878 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4882 // start the render stream
4883 hr = renderAudioClient->Start();
4884 if ( FAILED( hr ) ) {
4885 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4890 if ( stream_.mode == INPUT ) {
4891 using namespace std; // for roundf
4892 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4894 else if ( stream_.mode == OUTPUT ) {
4895 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4897 else if ( stream_.mode == DUPLEX ) {
4898 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4899 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4902 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4903 if ( !stream_.deviceBuffer ) {
4904 errorType = RtAudioError::MEMORY_ERROR;
4905 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4909 // stream process loop
4910 while ( stream_.state != STREAM_STOPPING ) {
4911 if ( !callbackPulled ) {
4914 // 1. Pull callback buffer from inputBuffer
4915 // 2. If 1. was successful: Convert callback buffer to user format
4917 if ( captureAudioClient ) {
4918 // Pull callback buffer from inputBuffer
4919 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4920 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4921 stream_.deviceFormat[INPUT] );
4923 if ( callbackPulled ) {
4924 if ( stream_.doConvertBuffer[INPUT] ) {
4925 // Convert callback buffer to user format
4926 convertBuffer( stream_.userBuffer[INPUT],
4927 stream_.deviceBuffer,
4928 stream_.convertInfo[INPUT] );
4931 // no further conversion, simple copy deviceBuffer to userBuffer
4932 memcpy( stream_.userBuffer[INPUT],
4933 stream_.deviceBuffer,
4934 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4939 // if there is no capture stream, set callbackPulled flag
4940 callbackPulled = true;
4945 // 1. Execute user callback method
4946 // 2. Handle return value from callback
4948 // if callback has not requested the stream to stop
4949 if ( callbackPulled && !callbackStopped ) {
4950 // Execute user callback method
4951 callbackResult = callback( stream_.userBuffer[OUTPUT],
4952 stream_.userBuffer[INPUT],
4955 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4956 stream_.callbackInfo.userData );
4958 // Handle return value from callback
4959 if ( callbackResult == 1 ) {
4960 // instantiate a thread to stop this thread
4961 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4962 if ( !threadHandle ) {
4963 errorType = RtAudioError::THREAD_ERROR;
4964 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4967 else if ( !CloseHandle( threadHandle ) ) {
4968 errorType = RtAudioError::THREAD_ERROR;
4969 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4973 callbackStopped = true;
4975 else if ( callbackResult == 2 ) {
4976 // instantiate a thread to stop this thread
4977 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4978 if ( !threadHandle ) {
4979 errorType = RtAudioError::THREAD_ERROR;
4980 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4983 else if ( !CloseHandle( threadHandle ) ) {
4984 errorType = RtAudioError::THREAD_ERROR;
4985 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4989 callbackStopped = true;
4996 // 1. Convert callback buffer to stream format
4997 // 2. Push callback buffer into outputBuffer
4999 if ( renderAudioClient && callbackPulled ) {
5000 if ( stream_.doConvertBuffer[OUTPUT] ) {
5001 // Convert callback buffer to stream format
5002 convertBuffer( stream_.deviceBuffer,
5003 stream_.userBuffer[OUTPUT],
5004 stream_.convertInfo[OUTPUT] );
5008 // Push callback buffer into outputBuffer
5009 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5010 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5011 stream_.deviceFormat[OUTPUT] );
5014 // if there is no render stream, set callbackPushed flag
5015 callbackPushed = true;
5020 // 1. Get capture buffer from stream
5021 // 2. Push capture buffer into inputBuffer
5022 // 3. If 2. was successful: Release capture buffer
5024 if ( captureAudioClient ) {
5025 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5026 if ( !callbackPulled ) {
5027 WaitForSingleObject( captureEvent, INFINITE );
5030 // Get capture buffer from stream
5031 hr = captureClient->GetBuffer( &streamBuffer,
5033 &captureFlags, NULL, NULL );
5034 if ( FAILED( hr ) ) {
5035 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5039 if ( bufferFrameCount != 0 ) {
5040 // Push capture buffer into inputBuffer
5041 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5042 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5043 stream_.deviceFormat[INPUT] ) )
5045 // Release capture buffer
5046 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5047 if ( FAILED( hr ) ) {
5048 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5054 // Inform WASAPI that capture was unsuccessful
5055 hr = captureClient->ReleaseBuffer( 0 );
5056 if ( FAILED( hr ) ) {
5057 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5064 // Inform WASAPI that capture was unsuccessful
5065 hr = captureClient->ReleaseBuffer( 0 );
5066 if ( FAILED( hr ) ) {
5067 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5075 // 1. Get render buffer from stream
5076 // 2. Pull next buffer from outputBuffer
5077 // 3. If 2. was successful: Fill render buffer with next buffer
5078 // Release render buffer
5080 if ( renderAudioClient ) {
5081 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5082 if ( callbackPulled && !callbackPushed ) {
5083 WaitForSingleObject( renderEvent, INFINITE );
5086 // Get render buffer from stream
5087 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5088 if ( FAILED( hr ) ) {
5089 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5093 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5094 if ( FAILED( hr ) ) {
5095 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5099 bufferFrameCount -= numFramesPadding;
5101 if ( bufferFrameCount != 0 ) {
5102 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5103 if ( FAILED( hr ) ) {
5104 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5108 // Pull next buffer from outputBuffer
5109 // Fill render buffer with next buffer
5110 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5111 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5112 stream_.deviceFormat[OUTPUT] ) )
5114 // Release render buffer
5115 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5116 if ( FAILED( hr ) ) {
5117 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5123 // Inform WASAPI that render was unsuccessful
5124 hr = renderClient->ReleaseBuffer( 0, 0 );
5125 if ( FAILED( hr ) ) {
5126 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5133 // Inform WASAPI that render was unsuccessful
5134 hr = renderClient->ReleaseBuffer( 0, 0 );
5135 if ( FAILED( hr ) ) {
5136 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5142 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5143 if ( callbackPushed ) {
5144 callbackPulled = false;
5146 RtApi::tickStreamTime();
5153 CoTaskMemFree( captureFormat );
5154 CoTaskMemFree( renderFormat );
5158 // update stream state
5159 stream_.state = STREAM_STOPPED;
5161 if ( errorText_.empty() )
5167 //******************** End of __WINDOWS_WASAPI__ *********************//
5171 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5173 // Modified by Robin Davies, October 2005
5174 // - Improvements to DirectX pointer chasing.
5175 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5176 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5177 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5178 // Changed device query structure for RtAudio 4.0.7, January 2010
5180 #include <windows.h>
5181 #include <process.h>
5182 #include <mmsystem.h>
5186 #include <algorithm>
5188 #if defined(__MINGW32__)
5189 // missing from latest mingw winapi
5190 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5191 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5192 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5193 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5196 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5198 #ifdef _MSC_VER // if Microsoft Visual C++
5199 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5202 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5204 if ( pointer > bufferSize ) pointer -= bufferSize;
5205 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5206 if ( pointer < earlierPointer ) pointer += bufferSize;
5207 return pointer >= earlierPointer && pointer < laterPointer;
5210 // A structure to hold various information related to the DirectSound
5211 // API implementation.
5213 unsigned int drainCounter; // Tracks callback counts when draining
5214 bool internalDrain; // Indicates if stop is initiated from callback or not.
5218 UINT bufferPointer[2];
5219 DWORD dsBufferSize[2];
5220 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5224 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5227 // Declarations for utility functions, callbacks, and structures
5228 // specific to the DirectSound implementation.
5229 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5230 LPCTSTR description,
5234 static const char* getErrorString( int code );
5236 static unsigned __stdcall callbackHandler( void *ptr );
5245 : found(false) { validId[0] = false; validId[1] = false; }
5248 struct DsProbeData {
5250 std::vector<struct DsDevice>* dsDevices;
5253 RtApiDs :: RtApiDs()
5255 // Dsound will run both-threaded. If CoInitialize fails, then just
5256 // accept whatever the mainline chose for a threading model.
5257 coInitialized_ = false;
5258 HRESULT hr = CoInitialize( NULL );
5259 if ( !FAILED( hr ) ) coInitialized_ = true;
5262 RtApiDs :: ~RtApiDs()
5264 if ( stream_.state != STREAM_CLOSED ) closeStream();
5265 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5268 // The DirectSound default output is always the first device.
5269 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5274 // The DirectSound default input is always the first input device,
5275 // which is the first capture device enumerated.
5276 unsigned int RtApiDs :: getDefaultInputDevice( void )
5281 unsigned int RtApiDs :: getDeviceCount( void )
5283 // Set query flag for previously found devices to false, so that we
5284 // can check for any devices that have disappeared.
5285 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5286 dsDevices[i].found = false;
5288 // Query DirectSound devices.
5289 struct DsProbeData probeInfo;
5290 probeInfo.isInput = false;
5291 probeInfo.dsDevices = &dsDevices;
5292 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5293 if ( FAILED( result ) ) {
5294 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5295 errorText_ = errorStream_.str();
5296 error( RtAudioError::WARNING );
5299 // Query DirectSoundCapture devices.
5300 probeInfo.isInput = true;
5301 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5302 if ( FAILED( result ) ) {
5303 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5304 errorText_ = errorStream_.str();
5305 error( RtAudioError::WARNING );
5308 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5309 for ( unsigned int i=0; i<dsDevices.size(); ) {
5310 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5314 return static_cast<unsigned int>(dsDevices.size());
5317 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5319 RtAudio::DeviceInfo info;
5320 info.probed = false;
5322 if ( dsDevices.size() == 0 ) {
5323 // Force a query of all devices
5325 if ( dsDevices.size() == 0 ) {
5326 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5327 error( RtAudioError::INVALID_USE );
5332 if ( device >= dsDevices.size() ) {
5333 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5334 error( RtAudioError::INVALID_USE );
5339 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5341 LPDIRECTSOUND output;
5343 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5344 if ( FAILED( result ) ) {
5345 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5346 errorText_ = errorStream_.str();
5347 error( RtAudioError::WARNING );
5351 outCaps.dwSize = sizeof( outCaps );
5352 result = output->GetCaps( &outCaps );
5353 if ( FAILED( result ) ) {
5355 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5356 errorText_ = errorStream_.str();
5357 error( RtAudioError::WARNING );
5361 // Get output channel information.
5362 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5364 // Get sample rate information.
5365 info.sampleRates.clear();
5366 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5367 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5368 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5369 info.sampleRates.push_back( SAMPLE_RATES[k] );
5371 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5372 info.preferredSampleRate = SAMPLE_RATES[k];
5376 // Get format information.
5377 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5378 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5382 if ( getDefaultOutputDevice() == device )
5383 info.isDefaultOutput = true;
5385 if ( dsDevices[ device ].validId[1] == false ) {
5386 info.name = dsDevices[ device ].name;
5393 LPDIRECTSOUNDCAPTURE input;
5394 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5395 if ( FAILED( result ) ) {
5396 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5397 errorText_ = errorStream_.str();
5398 error( RtAudioError::WARNING );
5403 inCaps.dwSize = sizeof( inCaps );
5404 result = input->GetCaps( &inCaps );
5405 if ( FAILED( result ) ) {
5407 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5408 errorText_ = errorStream_.str();
5409 error( RtAudioError::WARNING );
5413 // Get input channel information.
5414 info.inputChannels = inCaps.dwChannels;
5416 // Get sample rate and format information.
5417 std::vector<unsigned int> rates;
5418 if ( inCaps.dwChannels >= 2 ) {
5419 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5420 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5421 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5422 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5423 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5424 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5425 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5426 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5428 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5429 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5430 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5431 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5432 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5434 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5435 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5436 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5437 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5438 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5441 else if ( inCaps.dwChannels == 1 ) {
5442 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5443 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5444 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5445 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5446 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5447 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5448 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5449 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5451 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5452 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5453 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5454 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5455 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5457 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5458 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5459 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5460 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5461 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5464 else info.inputChannels = 0; // technically, this would be an error
5468 if ( info.inputChannels == 0 ) return info;
5470 // Copy the supported rates to the info structure but avoid duplication.
5472 for ( unsigned int i=0; i<rates.size(); i++ ) {
5474 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5475 if ( rates[i] == info.sampleRates[j] ) {
5480 if ( found == false ) info.sampleRates.push_back( rates[i] );
5482 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5484 // If device opens for both playback and capture, we determine the channels.
5485 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5486 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5488 if ( device == 0 ) info.isDefaultInput = true;
5490 // Copy name and return.
5491 info.name = dsDevices[ device ].name;
5496 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5497 unsigned int firstChannel, unsigned int sampleRate,
5498 RtAudioFormat format, unsigned int *bufferSize,
5499 RtAudio::StreamOptions *options )
5501 if ( channels + firstChannel > 2 ) {
5502 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5506 size_t nDevices = dsDevices.size();
5507 if ( nDevices == 0 ) {
5508 // This should not happen because a check is made before this function is called.
5509 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5513 if ( device >= nDevices ) {
5514 // This should not happen because a check is made before this function is called.
5515 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5519 if ( mode == OUTPUT ) {
5520 if ( dsDevices[ device ].validId[0] == false ) {
5521 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5522 errorText_ = errorStream_.str();
5526 else { // mode == INPUT
5527 if ( dsDevices[ device ].validId[1] == false ) {
5528 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5529 errorText_ = errorStream_.str();
5534 // According to a note in PortAudio, using GetDesktopWindow()
5535 // instead of GetForegroundWindow() is supposed to avoid problems
5536 // that occur when the application's window is not the foreground
5537 // window. Also, if the application window closes before the
5538 // DirectSound buffer, DirectSound can crash. In the past, I had
5539 // problems when using GetDesktopWindow() but it seems fine now
5540 // (January 2010). I'll leave it commented here.
5541 // HWND hWnd = GetForegroundWindow();
5542 HWND hWnd = GetDesktopWindow();
5544 // Check the numberOfBuffers parameter and limit the lowest value to
5545 // two. This is a judgement call and a value of two is probably too
5546 // low for capture, but it should work for playback.
5548 if ( options ) nBuffers = options->numberOfBuffers;
5549 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5550 if ( nBuffers < 2 ) nBuffers = 3;
5552 // Check the lower range of the user-specified buffer size and set
5553 // (arbitrarily) to a lower bound of 32.
5554 if ( *bufferSize < 32 ) *bufferSize = 32;
5556 // Create the wave format structure. The data format setting will
5557 // be determined later.
5558 WAVEFORMATEX waveFormat;
5559 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5560 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5561 waveFormat.nChannels = channels + firstChannel;
5562 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5564 // Determine the device buffer size. By default, we'll use the value
5565 // defined above (32K), but we will grow it to make allowances for
5566 // very large software buffer sizes.
5567 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5568 DWORD dsPointerLeadTime = 0;
5570 void *ohandle = 0, *bhandle = 0;
5572 if ( mode == OUTPUT ) {
5574 LPDIRECTSOUND output;
5575 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5576 if ( FAILED( result ) ) {
5577 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5578 errorText_ = errorStream_.str();
5583 outCaps.dwSize = sizeof( outCaps );
5584 result = output->GetCaps( &outCaps );
5585 if ( FAILED( result ) ) {
5587 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5588 errorText_ = errorStream_.str();
5592 // Check channel information.
5593 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5594 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5595 errorText_ = errorStream_.str();
5599 // Check format information. Use 16-bit format unless not
5600 // supported or user requests 8-bit.
5601 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5602 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5603 waveFormat.wBitsPerSample = 16;
5604 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5607 waveFormat.wBitsPerSample = 8;
5608 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5610 stream_.userFormat = format;
5612 // Update wave format structure and buffer information.
5613 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5614 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5615 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5617 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5618 while ( dsPointerLeadTime * 2U > dsBufferSize )
5621 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5622 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5623 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5624 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5625 if ( FAILED( result ) ) {
5627 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5628 errorText_ = errorStream_.str();
5632 // Even though we will write to the secondary buffer, we need to
5633 // access the primary buffer to set the correct output format
5634 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5635 // buffer description.
5636 DSBUFFERDESC bufferDescription;
5637 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5638 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5639 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5641 // Obtain the primary buffer
5642 LPDIRECTSOUNDBUFFER buffer;
5643 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5644 if ( FAILED( result ) ) {
5646 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5647 errorText_ = errorStream_.str();
5651 // Set the primary DS buffer sound format.
5652 result = buffer->SetFormat( &waveFormat );
5653 if ( FAILED( result ) ) {
5655 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5656 errorText_ = errorStream_.str();
5660 // Setup the secondary DS buffer description.
5661 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5662 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5663 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5664 DSBCAPS_GLOBALFOCUS |
5665 DSBCAPS_GETCURRENTPOSITION2 |
5666 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5667 bufferDescription.dwBufferBytes = dsBufferSize;
5668 bufferDescription.lpwfxFormat = &waveFormat;
5670 // Try to create the secondary DS buffer. If that doesn't work,
5671 // try to use software mixing. Otherwise, there's a problem.
5672 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5673 if ( FAILED( result ) ) {
5674 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5675 DSBCAPS_GLOBALFOCUS |
5676 DSBCAPS_GETCURRENTPOSITION2 |
5677 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5678 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5679 if ( FAILED( result ) ) {
5681 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5682 errorText_ = errorStream_.str();
5687 // Get the buffer size ... might be different from what we specified.
5689 dsbcaps.dwSize = sizeof( DSBCAPS );
5690 result = buffer->GetCaps( &dsbcaps );
5691 if ( FAILED( result ) ) {
5694 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5695 errorText_ = errorStream_.str();
5699 dsBufferSize = dsbcaps.dwBufferBytes;
5701 // Lock the DS buffer
5704 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5705 if ( FAILED( result ) ) {
5708 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5709 errorText_ = errorStream_.str();
5713 // Zero the DS buffer
5714 ZeroMemory( audioPtr, dataLen );
5716 // Unlock the DS buffer
5717 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5718 if ( FAILED( result ) ) {
5721 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5722 errorText_ = errorStream_.str();
5726 ohandle = (void *) output;
5727 bhandle = (void *) buffer;
5730 if ( mode == INPUT ) {
5732 LPDIRECTSOUNDCAPTURE input;
5733 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5734 if ( FAILED( result ) ) {
5735 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5736 errorText_ = errorStream_.str();
5741 inCaps.dwSize = sizeof( inCaps );
5742 result = input->GetCaps( &inCaps );
5743 if ( FAILED( result ) ) {
5745 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5746 errorText_ = errorStream_.str();
5750 // Check channel information.
5751 if ( inCaps.dwChannels < channels + firstChannel ) {
5752 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5756 // Check format information. Use 16-bit format unless user
5758 DWORD deviceFormats;
5759 if ( channels + firstChannel == 2 ) {
5760 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5761 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5762 waveFormat.wBitsPerSample = 8;
5763 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5765 else { // assume 16-bit is supported
5766 waveFormat.wBitsPerSample = 16;
5767 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5770 else { // channel == 1
5771 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5772 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5773 waveFormat.wBitsPerSample = 8;
5774 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5776 else { // assume 16-bit is supported
5777 waveFormat.wBitsPerSample = 16;
5778 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5781 stream_.userFormat = format;
5783 // Update wave format structure and buffer information.
5784 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5785 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5786 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5788 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5789 while ( dsPointerLeadTime * 2U > dsBufferSize )
5792 // Setup the secondary DS buffer description.
5793 DSCBUFFERDESC bufferDescription;
5794 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5795 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5796 bufferDescription.dwFlags = 0;
5797 bufferDescription.dwReserved = 0;
5798 bufferDescription.dwBufferBytes = dsBufferSize;
5799 bufferDescription.lpwfxFormat = &waveFormat;
5801 // Create the capture buffer.
5802 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5803 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5804 if ( FAILED( result ) ) {
5806 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5807 errorText_ = errorStream_.str();
5811 // Get the buffer size ... might be different from what we specified.
5813 dscbcaps.dwSize = sizeof( DSCBCAPS );
5814 result = buffer->GetCaps( &dscbcaps );
5815 if ( FAILED( result ) ) {
5818 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5819 errorText_ = errorStream_.str();
5823 dsBufferSize = dscbcaps.dwBufferBytes;
5825 // NOTE: We could have a problem here if this is a duplex stream
5826 // and the play and capture hardware buffer sizes are different
5827 // (I'm actually not sure if that is a problem or not).
5828 // Currently, we are not verifying that.
5830 // Lock the capture buffer
5833 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5834 if ( FAILED( result ) ) {
5837 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5838 errorText_ = errorStream_.str();
5843 ZeroMemory( audioPtr, dataLen );
5845 // Unlock the buffer
5846 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5847 if ( FAILED( result ) ) {
5850 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5851 errorText_ = errorStream_.str();
5855 ohandle = (void *) input;
5856 bhandle = (void *) buffer;
5859 // Set various stream parameters
5860 DsHandle *handle = 0;
5861 stream_.nDeviceChannels[mode] = channels + firstChannel;
5862 stream_.nUserChannels[mode] = channels;
5863 stream_.bufferSize = *bufferSize;
5864 stream_.channelOffset[mode] = firstChannel;
5865 stream_.deviceInterleaved[mode] = true;
5866 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5867 else stream_.userInterleaved = true;
5869 // Set flag for buffer conversion
5870 stream_.doConvertBuffer[mode] = false;
5871 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5872 stream_.doConvertBuffer[mode] = true;
5873 if (stream_.userFormat != stream_.deviceFormat[mode])
5874 stream_.doConvertBuffer[mode] = true;
5875 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5876 stream_.nUserChannels[mode] > 1 )
5877 stream_.doConvertBuffer[mode] = true;
5879 // Allocate necessary internal buffers
5880 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5881 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5882 if ( stream_.userBuffer[mode] == NULL ) {
5883 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5887 if ( stream_.doConvertBuffer[mode] ) {
5889 bool makeBuffer = true;
5890 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5891 if ( mode == INPUT ) {
5892 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5893 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5894 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5899 bufferBytes *= *bufferSize;
5900 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5901 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5902 if ( stream_.deviceBuffer == NULL ) {
5903 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5909 // Allocate our DsHandle structures for the stream.
5910 if ( stream_.apiHandle == 0 ) {
5912 handle = new DsHandle;
5914 catch ( std::bad_alloc& ) {
5915 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5919 // Create a manual-reset event.
5920 handle->condition = CreateEvent( NULL, // no security
5921 TRUE, // manual-reset
5922 FALSE, // non-signaled initially
5924 stream_.apiHandle = (void *) handle;
5927 handle = (DsHandle *) stream_.apiHandle;
5928 handle->id[mode] = ohandle;
5929 handle->buffer[mode] = bhandle;
5930 handle->dsBufferSize[mode] = dsBufferSize;
5931 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5933 stream_.device[mode] = device;
5934 stream_.state = STREAM_STOPPED;
5935 if ( stream_.mode == OUTPUT && mode == INPUT )
5936 // We had already set up an output stream.
5937 stream_.mode = DUPLEX;
5939 stream_.mode = mode;
5940 stream_.nBuffers = nBuffers;
5941 stream_.sampleRate = sampleRate;
5943 // Setup the buffer conversion information structure.
5944 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5946 // Setup the callback thread.
5947 if ( stream_.callbackInfo.isRunning == false ) {
5949 stream_.callbackInfo.isRunning = true;
5950 stream_.callbackInfo.object = (void *) this;
5951 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5952 &stream_.callbackInfo, 0, &threadId );
5953 if ( stream_.callbackInfo.thread == 0 ) {
5954 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5958 // Boost DS thread priority
5959 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5965 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5966 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5967 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5968 if ( buffer ) buffer->Release();
5971 if ( handle->buffer[1] ) {
5972 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5973 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5974 if ( buffer ) buffer->Release();
5977 CloseHandle( handle->condition );
5979 stream_.apiHandle = 0;
5982 for ( int i=0; i<2; i++ ) {
5983 if ( stream_.userBuffer[i] ) {
5984 free( stream_.userBuffer[i] );
5985 stream_.userBuffer[i] = 0;
5989 if ( stream_.deviceBuffer ) {
5990 free( stream_.deviceBuffer );
5991 stream_.deviceBuffer = 0;
5994 stream_.state = STREAM_CLOSED;
5998 void RtApiDs :: closeStream()
6000 if ( stream_.state == STREAM_CLOSED ) {
6001 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6002 error( RtAudioError::WARNING );
6006 // Stop the callback thread.
6007 stream_.callbackInfo.isRunning = false;
6008 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6009 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6011 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6013 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6014 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6015 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6022 if ( handle->buffer[1] ) {
6023 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6024 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6031 CloseHandle( handle->condition );
6033 stream_.apiHandle = 0;
6036 for ( int i=0; i<2; i++ ) {
6037 if ( stream_.userBuffer[i] ) {
6038 free( stream_.userBuffer[i] );
6039 stream_.userBuffer[i] = 0;
6043 if ( stream_.deviceBuffer ) {
6044 free( stream_.deviceBuffer );
6045 stream_.deviceBuffer = 0;
6048 stream_.mode = UNINITIALIZED;
6049 stream_.state = STREAM_CLOSED;
6052 void RtApiDs :: startStream()
6055 if ( stream_.state == STREAM_RUNNING ) {
6056 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6057 error( RtAudioError::WARNING );
6061 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6063 // Increase scheduler frequency on lesser windows (a side-effect of
6064 // increasing timer accuracy). On greater windows (Win2K or later),
6065 // this is already in effect.
6066 timeBeginPeriod( 1 );
6068 buffersRolling = false;
6069 duplexPrerollBytes = 0;
6071 if ( stream_.mode == DUPLEX ) {
6072 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6073 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6077 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6079 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6080 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6081 if ( FAILED( result ) ) {
6082 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6083 errorText_ = errorStream_.str();
6088 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6090 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6091 result = buffer->Start( DSCBSTART_LOOPING );
6092 if ( FAILED( result ) ) {
6093 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6094 errorText_ = errorStream_.str();
6099 handle->drainCounter = 0;
6100 handle->internalDrain = false;
6101 ResetEvent( handle->condition );
6102 stream_.state = STREAM_RUNNING;
6105 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6108 void RtApiDs :: stopStream()
6111 if ( stream_.state == STREAM_STOPPED ) {
6112 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6113 error( RtAudioError::WARNING );
6120 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6121 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6122 if ( handle->drainCounter == 0 ) {
6123 handle->drainCounter = 2;
6124 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6127 stream_.state = STREAM_STOPPED;
6129 MUTEX_LOCK( &stream_.mutex );
6131 // Stop the buffer and clear memory
6132 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6133 result = buffer->Stop();
6134 if ( FAILED( result ) ) {
6135 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6136 errorText_ = errorStream_.str();
6140 // Lock the buffer and clear it so that if we start to play again,
6141 // we won't have old data playing.
6142 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6143 if ( FAILED( result ) ) {
6144 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6145 errorText_ = errorStream_.str();
6149 // Zero the DS buffer
6150 ZeroMemory( audioPtr, dataLen );
6152 // Unlock the DS buffer
6153 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6154 if ( FAILED( result ) ) {
6155 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6156 errorText_ = errorStream_.str();
6160 // If we start playing again, we must begin at beginning of buffer.
6161 handle->bufferPointer[0] = 0;
6164 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6165 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6169 stream_.state = STREAM_STOPPED;
6171 if ( stream_.mode != DUPLEX )
6172 MUTEX_LOCK( &stream_.mutex );
6174 result = buffer->Stop();
6175 if ( FAILED( result ) ) {
6176 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6177 errorText_ = errorStream_.str();
6181 // Lock the buffer and clear it so that if we start to play again,
6182 // we won't have old data playing.
6183 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6184 if ( FAILED( result ) ) {
6185 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6186 errorText_ = errorStream_.str();
6190 // Zero the DS buffer
6191 ZeroMemory( audioPtr, dataLen );
6193 // Unlock the DS buffer
6194 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6195 if ( FAILED( result ) ) {
6196 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6197 errorText_ = errorStream_.str();
6201 // If we start recording again, we must begin at beginning of buffer.
6202 handle->bufferPointer[1] = 0;
6206 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6207 MUTEX_UNLOCK( &stream_.mutex );
6209 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6212 void RtApiDs :: abortStream()
6215 if ( stream_.state == STREAM_STOPPED ) {
6216 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6217 error( RtAudioError::WARNING );
6221 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6222 handle->drainCounter = 2;
6227 void RtApiDs :: callbackEvent()
6229 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6230 Sleep( 50 ); // sleep 50 milliseconds
6234 if ( stream_.state == STREAM_CLOSED ) {
6235 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6236 error( RtAudioError::WARNING );
6240 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6241 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6243 // Check if we were draining the stream and signal is finished.
6244 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6246 stream_.state = STREAM_STOPPING;
6247 if ( handle->internalDrain == false )
6248 SetEvent( handle->condition );
6254 // Invoke user callback to get fresh output data UNLESS we are
6256 if ( handle->drainCounter == 0 ) {
6257 RtAudioCallback callback = (RtAudioCallback) info->callback;
6258 double streamTime = getStreamTime();
6259 RtAudioStreamStatus status = 0;
6260 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6261 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6262 handle->xrun[0] = false;
6264 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6265 status |= RTAUDIO_INPUT_OVERFLOW;
6266 handle->xrun[1] = false;
6268 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6269 stream_.bufferSize, streamTime, status, info->userData );
6270 if ( cbReturnValue == 2 ) {
6271 stream_.state = STREAM_STOPPING;
6272 handle->drainCounter = 2;
6276 else if ( cbReturnValue == 1 ) {
6277 handle->drainCounter = 1;
6278 handle->internalDrain = true;
6283 DWORD currentWritePointer, safeWritePointer;
6284 DWORD currentReadPointer, safeReadPointer;
6285 UINT nextWritePointer;
6287 LPVOID buffer1 = NULL;
6288 LPVOID buffer2 = NULL;
6289 DWORD bufferSize1 = 0;
6290 DWORD bufferSize2 = 0;
6295 MUTEX_LOCK( &stream_.mutex );
6296 if ( stream_.state == STREAM_STOPPED ) {
6297 MUTEX_UNLOCK( &stream_.mutex );
6301 if ( buffersRolling == false ) {
6302 if ( stream_.mode == DUPLEX ) {
6303 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6305 // It takes a while for the devices to get rolling. As a result,
6306 // there's no guarantee that the capture and write device pointers
6307 // will move in lockstep. Wait here for both devices to start
6308 // rolling, and then set our buffer pointers accordingly.
6309 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6310 // bytes later than the write buffer.
6312 // Stub: a serious risk of having a pre-emptive scheduling round
6313 // take place between the two GetCurrentPosition calls... but I'm
6314 // really not sure how to solve the problem. Temporarily boost to
6315 // Realtime priority, maybe; but I'm not sure what priority the
6316 // DirectSound service threads run at. We *should* be roughly
6317 // within a ms or so of correct.
6319 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6320 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6322 DWORD startSafeWritePointer, startSafeReadPointer;
6324 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6325 if ( FAILED( result ) ) {
6326 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6327 errorText_ = errorStream_.str();
6328 MUTEX_UNLOCK( &stream_.mutex );
6329 error( RtAudioError::SYSTEM_ERROR );
6332 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6333 if ( FAILED( result ) ) {
6334 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6335 errorText_ = errorStream_.str();
6336 MUTEX_UNLOCK( &stream_.mutex );
6337 error( RtAudioError::SYSTEM_ERROR );
6341 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6342 if ( FAILED( result ) ) {
6343 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6344 errorText_ = errorStream_.str();
6345 MUTEX_UNLOCK( &stream_.mutex );
6346 error( RtAudioError::SYSTEM_ERROR );
6349 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6350 if ( FAILED( result ) ) {
6351 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6352 errorText_ = errorStream_.str();
6353 MUTEX_UNLOCK( &stream_.mutex );
6354 error( RtAudioError::SYSTEM_ERROR );
6357 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6361 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6363 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6364 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6365 handle->bufferPointer[1] = safeReadPointer;
6367 else if ( stream_.mode == OUTPUT ) {
6369 // Set the proper nextWritePosition after initial startup.
6370 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6371 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6372 if ( FAILED( result ) ) {
6373 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6374 errorText_ = errorStream_.str();
6375 MUTEX_UNLOCK( &stream_.mutex );
6376 error( RtAudioError::SYSTEM_ERROR );
6379 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6380 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6383 buffersRolling = true;
6386 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6388 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6390 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6391 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6392 bufferBytes *= formatBytes( stream_.userFormat );
6393 memset( stream_.userBuffer[0], 0, bufferBytes );
6396 // Setup parameters and do buffer conversion if necessary.
6397 if ( stream_.doConvertBuffer[0] ) {
6398 buffer = stream_.deviceBuffer;
6399 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6400 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6401 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6404 buffer = stream_.userBuffer[0];
6405 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6406 bufferBytes *= formatBytes( stream_.userFormat );
6409 // No byte swapping necessary in DirectSound implementation.
6411 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6412 // unsigned. So, we need to convert our signed 8-bit data here to
6414 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6415 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6417 DWORD dsBufferSize = handle->dsBufferSize[0];
6418 nextWritePointer = handle->bufferPointer[0];
6420 DWORD endWrite, leadPointer;
6422 // Find out where the read and "safe write" pointers are.
6423 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6424 if ( FAILED( result ) ) {
6425 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6426 errorText_ = errorStream_.str();
6427 MUTEX_UNLOCK( &stream_.mutex );
6428 error( RtAudioError::SYSTEM_ERROR );
6432 // We will copy our output buffer into the region between
6433 // safeWritePointer and leadPointer. If leadPointer is not
6434 // beyond the next endWrite position, wait until it is.
6435 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6436 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6437 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6438 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6439 endWrite = nextWritePointer + bufferBytes;
6441 // Check whether the entire write region is behind the play pointer.
6442 if ( leadPointer >= endWrite ) break;
6444 // If we are here, then we must wait until the leadPointer advances
6445 // beyond the end of our next write region. We use the
6446 // Sleep() function to suspend operation until that happens.
6447 double millis = ( endWrite - leadPointer ) * 1000.0;
6448 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6449 if ( millis < 1.0 ) millis = 1.0;
6450 Sleep( (DWORD) millis );
6453 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6454 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6455 // We've strayed into the forbidden zone ... resync the read pointer.
6456 handle->xrun[0] = true;
6457 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6458 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6459 handle->bufferPointer[0] = nextWritePointer;
6460 endWrite = nextWritePointer + bufferBytes;
6463 // Lock free space in the buffer
6464 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6465 &bufferSize1, &buffer2, &bufferSize2, 0 );
6466 if ( FAILED( result ) ) {
6467 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6468 errorText_ = errorStream_.str();
6469 MUTEX_UNLOCK( &stream_.mutex );
6470 error( RtAudioError::SYSTEM_ERROR );
6474 // Copy our buffer into the DS buffer
6475 CopyMemory( buffer1, buffer, bufferSize1 );
6476 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6478 // Update our buffer offset and unlock sound buffer
6479 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6480 if ( FAILED( result ) ) {
6481 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6482 errorText_ = errorStream_.str();
6483 MUTEX_UNLOCK( &stream_.mutex );
6484 error( RtAudioError::SYSTEM_ERROR );
6487 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6488 handle->bufferPointer[0] = nextWritePointer;
6491 // Don't bother draining input
6492 if ( handle->drainCounter ) {
6493 handle->drainCounter++;
6497 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6499 // Setup parameters.
6500 if ( stream_.doConvertBuffer[1] ) {
6501 buffer = stream_.deviceBuffer;
6502 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6503 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6506 buffer = stream_.userBuffer[1];
6507 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6508 bufferBytes *= formatBytes( stream_.userFormat );
6511 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6512 long nextReadPointer = handle->bufferPointer[1];
6513 DWORD dsBufferSize = handle->dsBufferSize[1];
6515 // Find out where the write and "safe read" pointers are.
6516 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6517 if ( FAILED( result ) ) {
6518 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6519 errorText_ = errorStream_.str();
6520 MUTEX_UNLOCK( &stream_.mutex );
6521 error( RtAudioError::SYSTEM_ERROR );
6525 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6526 DWORD endRead = nextReadPointer + bufferBytes;
6528 // Handling depends on whether we are INPUT or DUPLEX.
6529 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6530 // then a wait here will drag the write pointers into the forbidden zone.
6532 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6533 // it's in a safe position. This causes dropouts, but it seems to be the only
6534 // practical way to sync up the read and write pointers reliably, given the
6535 // the very complex relationship between phase and increment of the read and write
6538 // In order to minimize audible dropouts in DUPLEX mode, we will
6539 // provide a pre-roll period of 0.5 seconds in which we return
6540 // zeros from the read buffer while the pointers sync up.
6542 if ( stream_.mode == DUPLEX ) {
6543 if ( safeReadPointer < endRead ) {
6544 if ( duplexPrerollBytes <= 0 ) {
6545 // Pre-roll time over. Be more agressive.
6546 int adjustment = endRead-safeReadPointer;
6548 handle->xrun[1] = true;
6550 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6551 // and perform fine adjustments later.
6552 // - small adjustments: back off by twice as much.
6553 if ( adjustment >= 2*bufferBytes )
6554 nextReadPointer = safeReadPointer-2*bufferBytes;
6556 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6558 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6562 // In pre=roll time. Just do it.
6563 nextReadPointer = safeReadPointer - bufferBytes;
6564 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6566 endRead = nextReadPointer + bufferBytes;
6569 else { // mode == INPUT
6570 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6571 // See comments for playback.
6572 double millis = (endRead - safeReadPointer) * 1000.0;
6573 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6574 if ( millis < 1.0 ) millis = 1.0;
6575 Sleep( (DWORD) millis );
6577 // Wake up and find out where we are now.
6578 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6579 if ( FAILED( result ) ) {
6580 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6581 errorText_ = errorStream_.str();
6582 MUTEX_UNLOCK( &stream_.mutex );
6583 error( RtAudioError::SYSTEM_ERROR );
6587 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6591 // Lock free space in the buffer
6592 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6593 &bufferSize1, &buffer2, &bufferSize2, 0 );
6594 if ( FAILED( result ) ) {
6595 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6596 errorText_ = errorStream_.str();
6597 MUTEX_UNLOCK( &stream_.mutex );
6598 error( RtAudioError::SYSTEM_ERROR );
6602 if ( duplexPrerollBytes <= 0 ) {
6603 // Copy our buffer into the DS buffer
6604 CopyMemory( buffer, buffer1, bufferSize1 );
6605 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6608 memset( buffer, 0, bufferSize1 );
6609 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6610 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6613 // Update our buffer offset and unlock sound buffer
6614 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6615 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6616 if ( FAILED( result ) ) {
6617 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6618 errorText_ = errorStream_.str();
6619 MUTEX_UNLOCK( &stream_.mutex );
6620 error( RtAudioError::SYSTEM_ERROR );
6623 handle->bufferPointer[1] = nextReadPointer;
6625 // No byte swapping necessary in DirectSound implementation.
6627 // If necessary, convert 8-bit data from unsigned to signed.
6628 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6629 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6631 // Do buffer conversion if necessary.
6632 if ( stream_.doConvertBuffer[1] )
6633 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6637 MUTEX_UNLOCK( &stream_.mutex );
6638 RtApi::tickStreamTime();
6641 // Definitions for utility functions and callbacks
6642 // specific to the DirectSound implementation.
6644 static unsigned __stdcall callbackHandler( void *ptr )
6646 CallbackInfo *info = (CallbackInfo *) ptr;
6647 RtApiDs *object = (RtApiDs *) info->object;
6648 bool* isRunning = &info->isRunning;
6650 while ( *isRunning == true ) {
6651 object->callbackEvent();
6658 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6659 LPCTSTR description,
6663 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6664 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6667 bool validDevice = false;
6668 if ( probeInfo.isInput == true ) {
6670 LPDIRECTSOUNDCAPTURE object;
6672 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6673 if ( hr != DS_OK ) return TRUE;
6675 caps.dwSize = sizeof(caps);
6676 hr = object->GetCaps( &caps );
6677 if ( hr == DS_OK ) {
6678 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6685 LPDIRECTSOUND object;
6686 hr = DirectSoundCreate( lpguid, &object, NULL );
6687 if ( hr != DS_OK ) return TRUE;
6689 caps.dwSize = sizeof(caps);
6690 hr = object->GetCaps( &caps );
6691 if ( hr == DS_OK ) {
6692 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6698 // If good device, then save its name and guid.
6699 std::string name = convertCharPointerToStdString( description );
6700 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6701 if ( lpguid == NULL )
6702 name = "Default Device";
6703 if ( validDevice ) {
6704 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6705 if ( dsDevices[i].name == name ) {
6706 dsDevices[i].found = true;
6707 if ( probeInfo.isInput ) {
6708 dsDevices[i].id[1] = lpguid;
6709 dsDevices[i].validId[1] = true;
6712 dsDevices[i].id[0] = lpguid;
6713 dsDevices[i].validId[0] = true;
6721 device.found = true;
6722 if ( probeInfo.isInput ) {
6723 device.id[1] = lpguid;
6724 device.validId[1] = true;
6727 device.id[0] = lpguid;
6728 device.validId[0] = true;
6730 dsDevices.push_back( device );
6736 static const char* getErrorString( int code )
6740 case DSERR_ALLOCATED:
6741 return "Already allocated";
6743 case DSERR_CONTROLUNAVAIL:
6744 return "Control unavailable";
6746 case DSERR_INVALIDPARAM:
6747 return "Invalid parameter";
6749 case DSERR_INVALIDCALL:
6750 return "Invalid call";
6753 return "Generic error";
6755 case DSERR_PRIOLEVELNEEDED:
6756 return "Priority level needed";
6758 case DSERR_OUTOFMEMORY:
6759 return "Out of memory";
6761 case DSERR_BADFORMAT:
6762 return "The sample rate or the channel format is not supported";
6764 case DSERR_UNSUPPORTED:
6765 return "Not supported";
6767 case DSERR_NODRIVER:
6770 case DSERR_ALREADYINITIALIZED:
6771 return "Already initialized";
6773 case DSERR_NOAGGREGATION:
6774 return "No aggregation";
6776 case DSERR_BUFFERLOST:
6777 return "Buffer lost";
6779 case DSERR_OTHERAPPHASPRIO:
6780 return "Another application already has priority";
6782 case DSERR_UNINITIALIZED:
6783 return "Uninitialized";
6786 return "DirectSound unknown error";
6789 //******************** End of __WINDOWS_DS__ *********************//
6793 #if defined(__LINUX_ALSA__)
6795 #include <alsa/asoundlib.h>
6798 // A structure to hold various information related to the ALSA API
6801 snd_pcm_t *handles[2];
6804 pthread_cond_t runnable_cv;
6808 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6811 static void *alsaCallbackHandler( void * ptr );
6813 RtApiAlsa :: RtApiAlsa()
6815 // Nothing to do here.
6818 RtApiAlsa :: ~RtApiAlsa()
6820 if ( stream_.state != STREAM_CLOSED ) closeStream();
6823 unsigned int RtApiAlsa :: getDeviceCount( void )
6825 unsigned nDevices = 0;
6826 int result, subdevice, card;
6830 // Count cards and devices
6832 snd_card_next( &card );
6833 while ( card >= 0 ) {
6834 sprintf( name, "hw:%d", card );
6835 result = snd_ctl_open( &handle, name, 0 );
6837 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6838 errorText_ = errorStream_.str();
6839 error( RtAudioError::WARNING );
6844 result = snd_ctl_pcm_next_device( handle, &subdevice );
6846 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6847 errorText_ = errorStream_.str();
6848 error( RtAudioError::WARNING );
6851 if ( subdevice < 0 )
6856 snd_ctl_close( handle );
6857 snd_card_next( &card );
6860 result = snd_ctl_open( &handle, "default", 0 );
6863 snd_ctl_close( handle );
6869 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6871 RtAudio::DeviceInfo info;
6872 info.probed = false;
6874 unsigned nDevices = 0;
6875 int result, subdevice, card;
6879 // Count cards and devices
6882 snd_card_next( &card );
6883 while ( card >= 0 ) {
6884 sprintf( name, "hw:%d", card );
6885 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6887 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6888 errorText_ = errorStream_.str();
6889 error( RtAudioError::WARNING );
6894 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6896 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6897 errorText_ = errorStream_.str();
6898 error( RtAudioError::WARNING );
6901 if ( subdevice < 0 ) break;
6902 if ( nDevices == device ) {
6903 sprintf( name, "hw:%d,%d", card, subdevice );
6909 snd_ctl_close( chandle );
6910 snd_card_next( &card );
6913 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6914 if ( result == 0 ) {
6915 if ( nDevices == device ) {
6916 strcpy( name, "default" );
6922 if ( nDevices == 0 ) {
6923 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6924 error( RtAudioError::INVALID_USE );
6928 if ( device >= nDevices ) {
6929 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6930 error( RtAudioError::INVALID_USE );
6936 // If a stream is already open, we cannot probe the stream devices.
6937 // Thus, use the saved results.
6938 if ( stream_.state != STREAM_CLOSED &&
6939 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6940 snd_ctl_close( chandle );
6941 if ( device >= devices_.size() ) {
6942 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6943 error( RtAudioError::WARNING );
6946 return devices_[ device ];
6949 int openMode = SND_PCM_ASYNC;
6950 snd_pcm_stream_t stream;
6951 snd_pcm_info_t *pcminfo;
6952 snd_pcm_info_alloca( &pcminfo );
6954 snd_pcm_hw_params_t *params;
6955 snd_pcm_hw_params_alloca( ¶ms );
6957 // First try for playback unless default device (which has subdev -1)
6958 stream = SND_PCM_STREAM_PLAYBACK;
6959 snd_pcm_info_set_stream( pcminfo, stream );
6960 if ( subdevice != -1 ) {
6961 snd_pcm_info_set_device( pcminfo, subdevice );
6962 snd_pcm_info_set_subdevice( pcminfo, 0 );
6964 result = snd_ctl_pcm_info( chandle, pcminfo );
6966 // Device probably doesn't support playback.
6971 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6973 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6974 errorText_ = errorStream_.str();
6975 error( RtAudioError::WARNING );
6979 // The device is open ... fill the parameter structure.
6980 result = snd_pcm_hw_params_any( phandle, params );
6982 snd_pcm_close( phandle );
6983 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6984 errorText_ = errorStream_.str();
6985 error( RtAudioError::WARNING );
6989 // Get output channel information.
6991 result = snd_pcm_hw_params_get_channels_max( params, &value );
6993 snd_pcm_close( phandle );
6994 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6995 errorText_ = errorStream_.str();
6996 error( RtAudioError::WARNING );
6999 info.outputChannels = value;
7000 snd_pcm_close( phandle );
7003 stream = SND_PCM_STREAM_CAPTURE;
7004 snd_pcm_info_set_stream( pcminfo, stream );
7006 // Now try for capture unless default device (with subdev = -1)
7007 if ( subdevice != -1 ) {
7008 result = snd_ctl_pcm_info( chandle, pcminfo );
7009 snd_ctl_close( chandle );
7011 // Device probably doesn't support capture.
7012 if ( info.outputChannels == 0 ) return info;
7013 goto probeParameters;
7017 snd_ctl_close( chandle );
7019 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7021 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7022 errorText_ = errorStream_.str();
7023 error( RtAudioError::WARNING );
7024 if ( info.outputChannels == 0 ) return info;
7025 goto probeParameters;
7028 // The device is open ... fill the parameter structure.
7029 result = snd_pcm_hw_params_any( phandle, params );
7031 snd_pcm_close( phandle );
7032 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7033 errorText_ = errorStream_.str();
7034 error( RtAudioError::WARNING );
7035 if ( info.outputChannels == 0 ) return info;
7036 goto probeParameters;
7039 result = snd_pcm_hw_params_get_channels_max( params, &value );
7041 snd_pcm_close( phandle );
7042 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7043 errorText_ = errorStream_.str();
7044 error( RtAudioError::WARNING );
7045 if ( info.outputChannels == 0 ) return info;
7046 goto probeParameters;
7048 info.inputChannels = value;
7049 snd_pcm_close( phandle );
7051 // If device opens for both playback and capture, we determine the channels.
7052 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7053 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7055 // ALSA doesn't provide default devices so we'll use the first available one.
7056 if ( device == 0 && info.outputChannels > 0 )
7057 info.isDefaultOutput = true;
7058 if ( device == 0 && info.inputChannels > 0 )
7059 info.isDefaultInput = true;
7062 // At this point, we just need to figure out the supported data
7063 // formats and sample rates. We'll proceed by opening the device in
7064 // the direction with the maximum number of channels, or playback if
7065 // they are equal. This might limit our sample rate options, but so
7068 if ( info.outputChannels >= info.inputChannels )
7069 stream = SND_PCM_STREAM_PLAYBACK;
7071 stream = SND_PCM_STREAM_CAPTURE;
7072 snd_pcm_info_set_stream( pcminfo, stream );
7074 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7076 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7077 errorText_ = errorStream_.str();
7078 error( RtAudioError::WARNING );
7082 // The device is open ... fill the parameter structure.
7083 result = snd_pcm_hw_params_any( phandle, params );
7085 snd_pcm_close( phandle );
7086 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7087 errorText_ = errorStream_.str();
7088 error( RtAudioError::WARNING );
7092 // Test our discrete set of sample rate values.
7093 info.sampleRates.clear();
7094 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7095 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7096 info.sampleRates.push_back( SAMPLE_RATES[i] );
7098 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7099 info.preferredSampleRate = SAMPLE_RATES[i];
7102 if ( info.sampleRates.size() == 0 ) {
7103 snd_pcm_close( phandle );
7104 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7105 errorText_ = errorStream_.str();
7106 error( RtAudioError::WARNING );
7110 // Probe the supported data formats ... we don't care about endian-ness just yet
7111 snd_pcm_format_t format;
7112 info.nativeFormats = 0;
7113 format = SND_PCM_FORMAT_S8;
7114 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7115 info.nativeFormats |= RTAUDIO_SINT8;
7116 format = SND_PCM_FORMAT_S16;
7117 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7118 info.nativeFormats |= RTAUDIO_SINT16;
7119 format = SND_PCM_FORMAT_S24;
7120 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7121 info.nativeFormats |= RTAUDIO_SINT24;
7122 format = SND_PCM_FORMAT_S32;
7123 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7124 info.nativeFormats |= RTAUDIO_SINT32;
7125 format = SND_PCM_FORMAT_FLOAT;
7126 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7127 info.nativeFormats |= RTAUDIO_FLOAT32;
7128 format = SND_PCM_FORMAT_FLOAT64;
7129 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7130 info.nativeFormats |= RTAUDIO_FLOAT64;
7132 // Check that we have at least one supported format
7133 if ( info.nativeFormats == 0 ) {
7134 snd_pcm_close( phandle );
7135 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7136 errorText_ = errorStream_.str();
7137 error( RtAudioError::WARNING );
7141 // Get the device name
7143 result = snd_card_get_name( card, &cardname );
7144 if ( result >= 0 ) {
7145 sprintf( name, "hw:%s,%d", cardname, subdevice );
7150 // That's all ... close the device and return
7151 snd_pcm_close( phandle );
7156 void RtApiAlsa :: saveDeviceInfo( void )
7160 unsigned int nDevices = getDeviceCount();
7161 devices_.resize( nDevices );
7162 for ( unsigned int i=0; i<nDevices; i++ )
7163 devices_[i] = getDeviceInfo( i );
7166 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7167 unsigned int firstChannel, unsigned int sampleRate,
7168 RtAudioFormat format, unsigned int *bufferSize,
7169 RtAudio::StreamOptions *options )
7172 #if defined(__RTAUDIO_DEBUG__)
7174 snd_output_stdio_attach(&out, stderr, 0);
7177 // I'm not using the "plug" interface ... too much inconsistent behavior.
7179 unsigned nDevices = 0;
7180 int result, subdevice, card;
7184 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7185 snprintf(name, sizeof(name), "%s", "default");
7187 // Count cards and devices
7189 snd_card_next( &card );
7190 while ( card >= 0 ) {
7191 sprintf( name, "hw:%d", card );
7192 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7194 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7195 errorText_ = errorStream_.str();
7200 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7201 if ( result < 0 ) break;
7202 if ( subdevice < 0 ) break;
7203 if ( nDevices == device ) {
7204 sprintf( name, "hw:%d,%d", card, subdevice );
7205 snd_ctl_close( chandle );
7210 snd_ctl_close( chandle );
7211 snd_card_next( &card );
7214 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7215 if ( result == 0 ) {
7216 if ( nDevices == device ) {
7217 strcpy( name, "default" );
7223 if ( nDevices == 0 ) {
7224 // This should not happen because a check is made before this function is called.
7225 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7229 if ( device >= nDevices ) {
7230 // This should not happen because a check is made before this function is called.
7231 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7238 // The getDeviceInfo() function will not work for a device that is
7239 // already open. Thus, we'll probe the system before opening a
7240 // stream and save the results for use by getDeviceInfo().
7241 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7242 this->saveDeviceInfo();
7244 snd_pcm_stream_t stream;
7245 if ( mode == OUTPUT )
7246 stream = SND_PCM_STREAM_PLAYBACK;
7248 stream = SND_PCM_STREAM_CAPTURE;
7251 int openMode = SND_PCM_ASYNC;
7252 result = snd_pcm_open( &phandle, name, stream, openMode );
7254 if ( mode == OUTPUT )
7255 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7257 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7258 errorText_ = errorStream_.str();
7262 // Fill the parameter structure.
7263 snd_pcm_hw_params_t *hw_params;
7264 snd_pcm_hw_params_alloca( &hw_params );
7265 result = snd_pcm_hw_params_any( phandle, hw_params );
7267 snd_pcm_close( phandle );
7268 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7269 errorText_ = errorStream_.str();
7273 #if defined(__RTAUDIO_DEBUG__)
7274 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7275 snd_pcm_hw_params_dump( hw_params, out );
7278 // Set access ... check user preference.
7279 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7280 stream_.userInterleaved = false;
7281 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7283 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7284 stream_.deviceInterleaved[mode] = true;
7287 stream_.deviceInterleaved[mode] = false;
7290 stream_.userInterleaved = true;
7291 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7293 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7294 stream_.deviceInterleaved[mode] = false;
7297 stream_.deviceInterleaved[mode] = true;
7301 snd_pcm_close( phandle );
7302 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7303 errorText_ = errorStream_.str();
7307 // Determine how to set the device format.
7308 stream_.userFormat = format;
7309 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7311 if ( format == RTAUDIO_SINT8 )
7312 deviceFormat = SND_PCM_FORMAT_S8;
7313 else if ( format == RTAUDIO_SINT16 )
7314 deviceFormat = SND_PCM_FORMAT_S16;
7315 else if ( format == RTAUDIO_SINT24 )
7316 deviceFormat = SND_PCM_FORMAT_S24;
7317 else if ( format == RTAUDIO_SINT32 )
7318 deviceFormat = SND_PCM_FORMAT_S32;
7319 else if ( format == RTAUDIO_FLOAT32 )
7320 deviceFormat = SND_PCM_FORMAT_FLOAT;
7321 else if ( format == RTAUDIO_FLOAT64 )
7322 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7324 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7325 stream_.deviceFormat[mode] = format;
7329 // The user requested format is not natively supported by the device.
7330 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7331 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7332 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7336 deviceFormat = SND_PCM_FORMAT_FLOAT;
7337 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7338 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7342 deviceFormat = SND_PCM_FORMAT_S32;
7343 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7344 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7348 deviceFormat = SND_PCM_FORMAT_S24;
7349 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7350 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7354 deviceFormat = SND_PCM_FORMAT_S16;
7355 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7356 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7360 deviceFormat = SND_PCM_FORMAT_S8;
7361 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7362 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7366 // If we get here, no supported format was found.
7367 snd_pcm_close( phandle );
7368 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7369 errorText_ = errorStream_.str();
7373 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7375 snd_pcm_close( phandle );
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7377 errorText_ = errorStream_.str();
7381 // Determine whether byte-swaping is necessary.
7382 stream_.doByteSwap[mode] = false;
7383 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7384 result = snd_pcm_format_cpu_endian( deviceFormat );
7386 stream_.doByteSwap[mode] = true;
7387 else if (result < 0) {
7388 snd_pcm_close( phandle );
7389 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7390 errorText_ = errorStream_.str();
7395 // Set the sample rate.
7396 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7398 snd_pcm_close( phandle );
7399 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7400 errorText_ = errorStream_.str();
7404 // Determine the number of channels for this device. We support a possible
7405 // minimum device channel number > than the value requested by the user.
7406 stream_.nUserChannels[mode] = channels;
7408 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7409 unsigned int deviceChannels = value;
7410 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7411 snd_pcm_close( phandle );
7412 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7413 errorText_ = errorStream_.str();
7417 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7419 snd_pcm_close( phandle );
7420 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7421 errorText_ = errorStream_.str();
7424 deviceChannels = value;
7425 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7426 stream_.nDeviceChannels[mode] = deviceChannels;
7428 // Set the device channels.
7429 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7431 snd_pcm_close( phandle );
7432 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7433 errorText_ = errorStream_.str();
7437 // Set the buffer (or period) size.
7439 snd_pcm_uframes_t periodSize = *bufferSize;
7440 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7442 snd_pcm_close( phandle );
7443 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7444 errorText_ = errorStream_.str();
7447 *bufferSize = periodSize;
7449 // Set the buffer number, which in ALSA is referred to as the "period".
7450 unsigned int periods = 0;
7451 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7452 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7453 if ( periods < 2 ) periods = 4; // a fairly safe default value
7454 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7456 snd_pcm_close( phandle );
7457 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7458 errorText_ = errorStream_.str();
7462 // If attempting to setup a duplex stream, the bufferSize parameter
7463 // MUST be the same in both directions!
7464 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7465 snd_pcm_close( phandle );
7466 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7467 errorText_ = errorStream_.str();
7471 stream_.bufferSize = *bufferSize;
7473 // Install the hardware configuration
7474 result = snd_pcm_hw_params( phandle, hw_params );
7476 snd_pcm_close( phandle );
7477 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7478 errorText_ = errorStream_.str();
7482 #if defined(__RTAUDIO_DEBUG__)
7483 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7484 snd_pcm_hw_params_dump( hw_params, out );
7487 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7488 snd_pcm_sw_params_t *sw_params = NULL;
7489 snd_pcm_sw_params_alloca( &sw_params );
7490 snd_pcm_sw_params_current( phandle, sw_params );
7491 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7492 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7493 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7495 // The following two settings were suggested by Theo Veenker
7496 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7497 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7499 // here are two options for a fix
7500 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7501 snd_pcm_uframes_t val;
7502 snd_pcm_sw_params_get_boundary( sw_params, &val );
7503 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7505 result = snd_pcm_sw_params( phandle, sw_params );
7507 snd_pcm_close( phandle );
7508 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7509 errorText_ = errorStream_.str();
7513 #if defined(__RTAUDIO_DEBUG__)
7514 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7515 snd_pcm_sw_params_dump( sw_params, out );
7518 // Set flags for buffer conversion
7519 stream_.doConvertBuffer[mode] = false;
7520 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7521 stream_.doConvertBuffer[mode] = true;
7522 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7523 stream_.doConvertBuffer[mode] = true;
7524 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7525 stream_.nUserChannels[mode] > 1 )
7526 stream_.doConvertBuffer[mode] = true;
7528 // Allocate the ApiHandle if necessary and then save.
7529 AlsaHandle *apiInfo = 0;
7530 if ( stream_.apiHandle == 0 ) {
7532 apiInfo = (AlsaHandle *) new AlsaHandle;
7534 catch ( std::bad_alloc& ) {
7535 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7539 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7540 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7544 stream_.apiHandle = (void *) apiInfo;
7545 apiInfo->handles[0] = 0;
7546 apiInfo->handles[1] = 0;
7549 apiInfo = (AlsaHandle *) stream_.apiHandle;
7551 apiInfo->handles[mode] = phandle;
7554 // Allocate necessary internal buffers.
7555 unsigned long bufferBytes;
7556 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7557 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7558 if ( stream_.userBuffer[mode] == NULL ) {
7559 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7563 if ( stream_.doConvertBuffer[mode] ) {
7565 bool makeBuffer = true;
7566 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7567 if ( mode == INPUT ) {
7568 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7569 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7570 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7575 bufferBytes *= *bufferSize;
7576 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7577 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7578 if ( stream_.deviceBuffer == NULL ) {
7579 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7585 stream_.sampleRate = sampleRate;
7586 stream_.nBuffers = periods;
7587 stream_.device[mode] = device;
7588 stream_.state = STREAM_STOPPED;
7590 // Setup the buffer conversion information structure.
7591 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7593 // Setup thread if necessary.
7594 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7595 // We had already set up an output stream.
7596 stream_.mode = DUPLEX;
7597 // Link the streams if possible.
7598 apiInfo->synchronized = false;
7599 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7600 apiInfo->synchronized = true;
7602 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7603 error( RtAudioError::WARNING );
7607 stream_.mode = mode;
7609 // Setup callback thread.
7610 stream_.callbackInfo.object = (void *) this;
7612 // Set the thread attributes for joinable and realtime scheduling
7613 // priority (optional). The higher priority will only take affect
7614 // if the program is run as root or suid. Note, under Linux
7615 // processes with CAP_SYS_NICE privilege, a user can change
7616 // scheduling policy and priority (thus need not be root). See
7617 // POSIX "capabilities".
7618 pthread_attr_t attr;
7619 pthread_attr_init( &attr );
7620 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7621 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7622 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7623 stream_.callbackInfo.doRealtime = true;
7624 struct sched_param param;
7625 int priority = options->priority;
7626 int min = sched_get_priority_min( SCHED_RR );
7627 int max = sched_get_priority_max( SCHED_RR );
7628 if ( priority < min ) priority = min;
7629 else if ( priority > max ) priority = max;
7630 param.sched_priority = priority;
7632 // Set the policy BEFORE the priority. Otherwise it fails.
7633 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7634 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7635 // This is definitely required. Otherwise it fails.
7636 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7637 pthread_attr_setschedparam(&attr, ¶m);
7640 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7642 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7645 stream_.callbackInfo.isRunning = true;
7646 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7647 pthread_attr_destroy( &attr );
7649 // Failed. Try instead with default attributes.
7650 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7652 stream_.callbackInfo.isRunning = false;
7653 errorText_ = "RtApiAlsa::error creating callback thread!";
7663 pthread_cond_destroy( &apiInfo->runnable_cv );
7664 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7665 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7667 stream_.apiHandle = 0;
7670 if ( phandle) snd_pcm_close( phandle );
7672 for ( int i=0; i<2; i++ ) {
7673 if ( stream_.userBuffer[i] ) {
7674 free( stream_.userBuffer[i] );
7675 stream_.userBuffer[i] = 0;
7679 if ( stream_.deviceBuffer ) {
7680 free( stream_.deviceBuffer );
7681 stream_.deviceBuffer = 0;
7684 stream_.state = STREAM_CLOSED;
7688 void RtApiAlsa :: closeStream()
7690 if ( stream_.state == STREAM_CLOSED ) {
7691 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7692 error( RtAudioError::WARNING );
7696 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7697 stream_.callbackInfo.isRunning = false;
7698 MUTEX_LOCK( &stream_.mutex );
7699 if ( stream_.state == STREAM_STOPPED ) {
7700 apiInfo->runnable = true;
7701 pthread_cond_signal( &apiInfo->runnable_cv );
7703 MUTEX_UNLOCK( &stream_.mutex );
7704 pthread_join( stream_.callbackInfo.thread, NULL );
7706 if ( stream_.state == STREAM_RUNNING ) {
7707 stream_.state = STREAM_STOPPED;
7708 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7709 snd_pcm_drop( apiInfo->handles[0] );
7710 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7711 snd_pcm_drop( apiInfo->handles[1] );
7715 pthread_cond_destroy( &apiInfo->runnable_cv );
7716 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7717 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7719 stream_.apiHandle = 0;
7722 for ( int i=0; i<2; i++ ) {
7723 if ( stream_.userBuffer[i] ) {
7724 free( stream_.userBuffer[i] );
7725 stream_.userBuffer[i] = 0;
7729 if ( stream_.deviceBuffer ) {
7730 free( stream_.deviceBuffer );
7731 stream_.deviceBuffer = 0;
7734 stream_.mode = UNINITIALIZED;
7735 stream_.state = STREAM_CLOSED;
7738 void RtApiAlsa :: startStream()
7740 // This method calls snd_pcm_prepare if the device isn't already in that state.
7743 if ( stream_.state == STREAM_RUNNING ) {
7744 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7745 error( RtAudioError::WARNING );
7749 MUTEX_LOCK( &stream_.mutex );
7752 snd_pcm_state_t state;
7753 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7754 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7755 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7756 state = snd_pcm_state( handle[0] );
7757 if ( state != SND_PCM_STATE_PREPARED ) {
7758 result = snd_pcm_prepare( handle[0] );
7760 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7761 errorText_ = errorStream_.str();
7767 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7768 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7769 state = snd_pcm_state( handle[1] );
7770 if ( state != SND_PCM_STATE_PREPARED ) {
7771 result = snd_pcm_prepare( handle[1] );
7773 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7774 errorText_ = errorStream_.str();
7780 stream_.state = STREAM_RUNNING;
7783 apiInfo->runnable = true;
7784 pthread_cond_signal( &apiInfo->runnable_cv );
7785 MUTEX_UNLOCK( &stream_.mutex );
7787 if ( result >= 0 ) return;
7788 error( RtAudioError::SYSTEM_ERROR );
7791 void RtApiAlsa :: stopStream()
7794 if ( stream_.state == STREAM_STOPPED ) {
7795 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7796 error( RtAudioError::WARNING );
7800 stream_.state = STREAM_STOPPED;
7801 MUTEX_LOCK( &stream_.mutex );
7804 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7805 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7806 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7807 if ( apiInfo->synchronized )
7808 result = snd_pcm_drop( handle[0] );
7810 result = snd_pcm_drain( handle[0] );
7812 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7813 errorText_ = errorStream_.str();
7818 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7819 result = snd_pcm_drop( handle[1] );
7821 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7822 errorText_ = errorStream_.str();
7828 apiInfo->runnable = false; // fixes high CPU usage when stopped
7829 MUTEX_UNLOCK( &stream_.mutex );
7831 if ( result >= 0 ) return;
7832 error( RtAudioError::SYSTEM_ERROR );
7835 void RtApiAlsa :: abortStream()
7838 if ( stream_.state == STREAM_STOPPED ) {
7839 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7840 error( RtAudioError::WARNING );
7844 stream_.state = STREAM_STOPPED;
7845 MUTEX_LOCK( &stream_.mutex );
7848 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7849 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7850 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7851 result = snd_pcm_drop( handle[0] );
7853 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7854 errorText_ = errorStream_.str();
7859 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7860 result = snd_pcm_drop( handle[1] );
7862 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7863 errorText_ = errorStream_.str();
7869 apiInfo->runnable = false; // fixes high CPU usage when stopped
7870 MUTEX_UNLOCK( &stream_.mutex );
7872 if ( result >= 0 ) return;
7873 error( RtAudioError::SYSTEM_ERROR );
7876 void RtApiAlsa :: callbackEvent()
7878 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7879 if ( stream_.state == STREAM_STOPPED ) {
7880 MUTEX_LOCK( &stream_.mutex );
7881 while ( !apiInfo->runnable )
7882 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7884 if ( stream_.state != STREAM_RUNNING ) {
7885 MUTEX_UNLOCK( &stream_.mutex );
7888 MUTEX_UNLOCK( &stream_.mutex );
7891 if ( stream_.state == STREAM_CLOSED ) {
7892 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7893 error( RtAudioError::WARNING );
7897 int doStopStream = 0;
7898 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7899 double streamTime = getStreamTime();
7900 RtAudioStreamStatus status = 0;
7901 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7902 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7903 apiInfo->xrun[0] = false;
7905 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7906 status |= RTAUDIO_INPUT_OVERFLOW;
7907 apiInfo->xrun[1] = false;
7909 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7910 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7912 if ( doStopStream == 2 ) {
7917 MUTEX_LOCK( &stream_.mutex );
7919 // The state might change while waiting on a mutex.
7920 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7926 snd_pcm_sframes_t frames;
7927 RtAudioFormat format;
7928 handle = (snd_pcm_t **) apiInfo->handles;
7930 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7932 // Setup parameters.
7933 if ( stream_.doConvertBuffer[1] ) {
7934 buffer = stream_.deviceBuffer;
7935 channels = stream_.nDeviceChannels[1];
7936 format = stream_.deviceFormat[1];
7939 buffer = stream_.userBuffer[1];
7940 channels = stream_.nUserChannels[1];
7941 format = stream_.userFormat;
7944 // Read samples from device in interleaved/non-interleaved format.
7945 if ( stream_.deviceInterleaved[1] )
7946 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7948 void *bufs[channels];
7949 size_t offset = stream_.bufferSize * formatBytes( format );
7950 for ( int i=0; i<channels; i++ )
7951 bufs[i] = (void *) (buffer + (i * offset));
7952 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7955 if ( result < (int) stream_.bufferSize ) {
7956 // Either an error or overrun occured.
7957 if ( result == -EPIPE ) {
7958 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7959 if ( state == SND_PCM_STATE_XRUN ) {
7960 apiInfo->xrun[1] = true;
7961 result = snd_pcm_prepare( handle[1] );
7963 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7964 errorText_ = errorStream_.str();
7968 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7969 errorText_ = errorStream_.str();
7973 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7974 errorText_ = errorStream_.str();
7976 error( RtAudioError::WARNING );
7980 // Do byte swapping if necessary.
7981 if ( stream_.doByteSwap[1] )
7982 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7984 // Do buffer conversion if necessary.
7985 if ( stream_.doConvertBuffer[1] )
7986 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7988 // Check stream latency
7989 result = snd_pcm_delay( handle[1], &frames );
7990 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7995 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7997 // Setup parameters and do buffer conversion if necessary.
7998 if ( stream_.doConvertBuffer[0] ) {
7999 buffer = stream_.deviceBuffer;
8000 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8001 channels = stream_.nDeviceChannels[0];
8002 format = stream_.deviceFormat[0];
8005 buffer = stream_.userBuffer[0];
8006 channels = stream_.nUserChannels[0];
8007 format = stream_.userFormat;
8010 // Do byte swapping if necessary.
8011 if ( stream_.doByteSwap[0] )
8012 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8014 // Write samples to device in interleaved/non-interleaved format.
8015 if ( stream_.deviceInterleaved[0] )
8016 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8018 void *bufs[channels];
8019 size_t offset = stream_.bufferSize * formatBytes( format );
8020 for ( int i=0; i<channels; i++ )
8021 bufs[i] = (void *) (buffer + (i * offset));
8022 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8025 if ( result < (int) stream_.bufferSize ) {
8026 // Either an error or underrun occured.
8027 if ( result == -EPIPE ) {
8028 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8029 if ( state == SND_PCM_STATE_XRUN ) {
8030 apiInfo->xrun[0] = true;
8031 result = snd_pcm_prepare( handle[0] );
8033 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8034 errorText_ = errorStream_.str();
8037 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8040 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8041 errorText_ = errorStream_.str();
8045 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8046 errorText_ = errorStream_.str();
8048 error( RtAudioError::WARNING );
8052 // Check stream latency
8053 result = snd_pcm_delay( handle[0], &frames );
8054 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8058 MUTEX_UNLOCK( &stream_.mutex );
8060 RtApi::tickStreamTime();
8061 if ( doStopStream == 1 ) this->stopStream();
8064 static void *alsaCallbackHandler( void *ptr )
8066 CallbackInfo *info = (CallbackInfo *) ptr;
8067 RtApiAlsa *object = (RtApiAlsa *) info->object;
8068 bool *isRunning = &info->isRunning;
8070 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8071 if ( info->doRealtime ) {
8072 std::cerr << "RtAudio alsa: " <<
8073 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8074 "running realtime scheduling" << std::endl;
8078 while ( *isRunning == true ) {
8079 pthread_testcancel();
8080 object->callbackEvent();
8083 pthread_exit( NULL );
8086 //******************** End of __LINUX_ALSA__ *********************//
8089 #if defined(__LINUX_PULSE__)
8091 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8092 // and Tristan Matthews.
8094 #include <pulse/error.h>
8095 #include <pulse/simple.h>
8098 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8099 44100, 48000, 96000, 0};
8101 struct rtaudio_pa_format_mapping_t {
8102 RtAudioFormat rtaudio_format;
8103 pa_sample_format_t pa_format;
8106 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8107 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8108 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8109 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8110 {0, PA_SAMPLE_INVALID}};
8112 struct PulseAudioHandle {
8116 pthread_cond_t runnable_cv;
8118 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8121 RtApiPulse::~RtApiPulse()
8123 if ( stream_.state != STREAM_CLOSED )
8127 unsigned int RtApiPulse::getDeviceCount( void )
8132 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8134 RtAudio::DeviceInfo info;
8136 info.name = "PulseAudio";
8137 info.outputChannels = 2;
8138 info.inputChannels = 2;
8139 info.duplexChannels = 2;
8140 info.isDefaultOutput = true;
8141 info.isDefaultInput = true;
8143 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8144 info.sampleRates.push_back( *sr );
8146 info.preferredSampleRate = 48000;
8147 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8152 static void *pulseaudio_callback( void * user )
8154 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8155 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8156 volatile bool *isRunning = &cbi->isRunning;
8158 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8159 if (cbi->doRealtime) {
8160 std::cerr << "RtAudio pulse: " <<
8161 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8162 "running realtime scheduling" << std::endl;
8166 while ( *isRunning ) {
8167 pthread_testcancel();
8168 context->callbackEvent();
8171 pthread_exit( NULL );
8174 void RtApiPulse::closeStream( void )
8176 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8178 stream_.callbackInfo.isRunning = false;
8180 MUTEX_LOCK( &stream_.mutex );
8181 if ( stream_.state == STREAM_STOPPED ) {
8182 pah->runnable = true;
8183 pthread_cond_signal( &pah->runnable_cv );
8185 MUTEX_UNLOCK( &stream_.mutex );
8187 pthread_join( pah->thread, 0 );
8188 if ( pah->s_play ) {
8189 pa_simple_flush( pah->s_play, NULL );
8190 pa_simple_free( pah->s_play );
8193 pa_simple_free( pah->s_rec );
8195 pthread_cond_destroy( &pah->runnable_cv );
8197 stream_.apiHandle = 0;
8200 if ( stream_.userBuffer[0] ) {
8201 free( stream_.userBuffer[0] );
8202 stream_.userBuffer[0] = 0;
8204 if ( stream_.userBuffer[1] ) {
8205 free( stream_.userBuffer[1] );
8206 stream_.userBuffer[1] = 0;
8209 stream_.state = STREAM_CLOSED;
8210 stream_.mode = UNINITIALIZED;
8213 void RtApiPulse::callbackEvent( void )
8215 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8217 if ( stream_.state == STREAM_STOPPED ) {
8218 MUTEX_LOCK( &stream_.mutex );
8219 while ( !pah->runnable )
8220 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8222 if ( stream_.state != STREAM_RUNNING ) {
8223 MUTEX_UNLOCK( &stream_.mutex );
8226 MUTEX_UNLOCK( &stream_.mutex );
8229 if ( stream_.state == STREAM_CLOSED ) {
8230 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8231 "this shouldn't happen!";
8232 error( RtAudioError::WARNING );
8236 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8237 double streamTime = getStreamTime();
8238 RtAudioStreamStatus status = 0;
8239 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8240 stream_.bufferSize, streamTime, status,
8241 stream_.callbackInfo.userData );
8243 if ( doStopStream == 2 ) {
8248 MUTEX_LOCK( &stream_.mutex );
8249 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8250 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8252 if ( stream_.state != STREAM_RUNNING )
8257 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8258 if ( stream_.doConvertBuffer[OUTPUT] ) {
8259 convertBuffer( stream_.deviceBuffer,
8260 stream_.userBuffer[OUTPUT],
8261 stream_.convertInfo[OUTPUT] );
8262 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8263 formatBytes( stream_.deviceFormat[OUTPUT] );
8265 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8266 formatBytes( stream_.userFormat );
8268 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8269 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8270 pa_strerror( pa_error ) << ".";
8271 errorText_ = errorStream_.str();
8272 error( RtAudioError::WARNING );
8276 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8277 if ( stream_.doConvertBuffer[INPUT] )
8278 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8279 formatBytes( stream_.deviceFormat[INPUT] );
8281 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8282 formatBytes( stream_.userFormat );
8284 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8285 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8286 pa_strerror( pa_error ) << ".";
8287 errorText_ = errorStream_.str();
8288 error( RtAudioError::WARNING );
8290 if ( stream_.doConvertBuffer[INPUT] ) {
8291 convertBuffer( stream_.userBuffer[INPUT],
8292 stream_.deviceBuffer,
8293 stream_.convertInfo[INPUT] );
8298 MUTEX_UNLOCK( &stream_.mutex );
8299 RtApi::tickStreamTime();
8301 if ( doStopStream == 1 )
8305 void RtApiPulse::startStream( void )
8307 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8309 if ( stream_.state == STREAM_CLOSED ) {
8310 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8311 error( RtAudioError::INVALID_USE );
8314 if ( stream_.state == STREAM_RUNNING ) {
8315 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8316 error( RtAudioError::WARNING );
8320 MUTEX_LOCK( &stream_.mutex );
8322 stream_.state = STREAM_RUNNING;
8324 pah->runnable = true;
8325 pthread_cond_signal( &pah->runnable_cv );
8326 MUTEX_UNLOCK( &stream_.mutex );
8329 void RtApiPulse::stopStream( void )
8331 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8333 if ( stream_.state == STREAM_CLOSED ) {
8334 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8335 error( RtAudioError::INVALID_USE );
8338 if ( stream_.state == STREAM_STOPPED ) {
8339 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8340 error( RtAudioError::WARNING );
8344 stream_.state = STREAM_STOPPED;
8345 MUTEX_LOCK( &stream_.mutex );
8347 if ( pah && pah->s_play ) {
8349 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8350 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8351 pa_strerror( pa_error ) << ".";
8352 errorText_ = errorStream_.str();
8353 MUTEX_UNLOCK( &stream_.mutex );
8354 error( RtAudioError::SYSTEM_ERROR );
8359 stream_.state = STREAM_STOPPED;
8360 MUTEX_UNLOCK( &stream_.mutex );
8363 void RtApiPulse::abortStream( void )
8365 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8367 if ( stream_.state == STREAM_CLOSED ) {
8368 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8369 error( RtAudioError::INVALID_USE );
8372 if ( stream_.state == STREAM_STOPPED ) {
8373 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8374 error( RtAudioError::WARNING );
8378 stream_.state = STREAM_STOPPED;
8379 MUTEX_LOCK( &stream_.mutex );
8381 if ( pah && pah->s_play ) {
8383 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8384 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8385 pa_strerror( pa_error ) << ".";
8386 errorText_ = errorStream_.str();
8387 MUTEX_UNLOCK( &stream_.mutex );
8388 error( RtAudioError::SYSTEM_ERROR );
8393 stream_.state = STREAM_STOPPED;
8394 MUTEX_UNLOCK( &stream_.mutex );
8397 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8398 unsigned int channels, unsigned int firstChannel,
8399 unsigned int sampleRate, RtAudioFormat format,
8400 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8402 PulseAudioHandle *pah = 0;
8403 unsigned long bufferBytes = 0;
8406 if ( device != 0 ) return false;
8407 if ( mode != INPUT && mode != OUTPUT ) return false;
8408 if ( channels != 1 && channels != 2 ) {
8409 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8412 ss.channels = channels;
8414 if ( firstChannel != 0 ) return false;
8416 bool sr_found = false;
8417 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8418 if ( sampleRate == *sr ) {
8420 stream_.sampleRate = sampleRate;
8421 ss.rate = sampleRate;
8426 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8431 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8432 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8433 if ( format == sf->rtaudio_format ) {
8435 stream_.userFormat = sf->rtaudio_format;
8436 stream_.deviceFormat[mode] = stream_.userFormat;
8437 ss.format = sf->pa_format;
8441 if ( !sf_found ) { // Use internal data format conversion.
8442 stream_.userFormat = format;
8443 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8444 ss.format = PA_SAMPLE_FLOAT32LE;
8447 // Set other stream parameters.
8448 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8449 else stream_.userInterleaved = true;
8450 stream_.deviceInterleaved[mode] = true;
8451 stream_.nBuffers = 1;
8452 stream_.doByteSwap[mode] = false;
8453 stream_.nUserChannels[mode] = channels;
8454 stream_.nDeviceChannels[mode] = channels + firstChannel;
8455 stream_.channelOffset[mode] = 0;
8456 std::string streamName = "RtAudio";
8458 // Set flags for buffer conversion.
8459 stream_.doConvertBuffer[mode] = false;
8460 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8461 stream_.doConvertBuffer[mode] = true;
8462 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8463 stream_.doConvertBuffer[mode] = true;
8465 // Allocate necessary internal buffers.
8466 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8467 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8468 if ( stream_.userBuffer[mode] == NULL ) {
8469 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8472 stream_.bufferSize = *bufferSize;
8474 if ( stream_.doConvertBuffer[mode] ) {
8476 bool makeBuffer = true;
8477 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8478 if ( mode == INPUT ) {
8479 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8480 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8481 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8486 bufferBytes *= *bufferSize;
8487 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8488 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8489 if ( stream_.deviceBuffer == NULL ) {
8490 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8496 stream_.device[mode] = device;
8498 // Setup the buffer conversion information structure.
8499 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8501 if ( !stream_.apiHandle ) {
8502 PulseAudioHandle *pah = new PulseAudioHandle;
8504 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8508 stream_.apiHandle = pah;
8509 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8510 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8514 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8517 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8520 pa_buffer_attr buffer_attr;
8521 buffer_attr.fragsize = bufferBytes;
8522 buffer_attr.maxlength = -1;
8524 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8525 if ( !pah->s_rec ) {
8526 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8531 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8532 if ( !pah->s_play ) {
8533 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8541 if ( stream_.mode == UNINITIALIZED )
8542 stream_.mode = mode;
8543 else if ( stream_.mode == mode )
8546 stream_.mode = DUPLEX;
8548 if ( !stream_.callbackInfo.isRunning ) {
8549 stream_.callbackInfo.object = this;
8551 stream_.state = STREAM_STOPPED;
8552 // Set the thread attributes for joinable and realtime scheduling
8553 // priority (optional). The higher priority will only take affect
8554 // if the program is run as root or suid. Note, under Linux
8555 // processes with CAP_SYS_NICE privilege, a user can change
8556 // scheduling policy and priority (thus need not be root). See
8557 // POSIX "capabilities".
8558 pthread_attr_t attr;
8559 pthread_attr_init( &attr );
8560 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8561 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8562 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8563 stream_.callbackInfo.doRealtime = true;
8564 struct sched_param param;
8565 int priority = options->priority;
8566 int min = sched_get_priority_min( SCHED_RR );
8567 int max = sched_get_priority_max( SCHED_RR );
8568 if ( priority < min ) priority = min;
8569 else if ( priority > max ) priority = max;
8570 param.sched_priority = priority;
8572 // Set the policy BEFORE the priority. Otherwise it fails.
8573 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8574 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8575 // This is definitely required. Otherwise it fails.
8576 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8577 pthread_attr_setschedparam(&attr, ¶m);
8580 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8582 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8585 stream_.callbackInfo.isRunning = true;
8586 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8587 pthread_attr_destroy(&attr);
8589 // Failed. Try instead with default attributes.
8590 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8592 stream_.callbackInfo.isRunning = false;
8593 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8602 if ( pah && stream_.callbackInfo.isRunning ) {
8603 pthread_cond_destroy( &pah->runnable_cv );
8605 stream_.apiHandle = 0;
8608 for ( int i=0; i<2; i++ ) {
8609 if ( stream_.userBuffer[i] ) {
8610 free( stream_.userBuffer[i] );
8611 stream_.userBuffer[i] = 0;
8615 if ( stream_.deviceBuffer ) {
8616 free( stream_.deviceBuffer );
8617 stream_.deviceBuffer = 0;
8620 stream_.state = STREAM_CLOSED;
8624 //******************** End of __LINUX_PULSE__ *********************//
8627 #if defined(__LINUX_OSS__)
8630 #include <sys/ioctl.h>
8633 #include <sys/soundcard.h>
8637 static void *ossCallbackHandler(void * ptr);
8639 // A structure to hold various information related to the OSS API
8642 int id[2]; // device ids
8645 pthread_cond_t runnable;
8648 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8651 RtApiOss :: RtApiOss()
8653 // Nothing to do here.
8656 RtApiOss :: ~RtApiOss()
8658 if ( stream_.state != STREAM_CLOSED ) closeStream();
8661 unsigned int RtApiOss :: getDeviceCount( void )
8663 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8664 if ( mixerfd == -1 ) {
8665 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8666 error( RtAudioError::WARNING );
8670 oss_sysinfo sysinfo;
8671 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8673 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8674 error( RtAudioError::WARNING );
8679 return sysinfo.numaudios;
8682 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8684 RtAudio::DeviceInfo info;
8685 info.probed = false;
8687 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8688 if ( mixerfd == -1 ) {
8689 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8690 error( RtAudioError::WARNING );
8694 oss_sysinfo sysinfo;
8695 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8696 if ( result == -1 ) {
8698 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8699 error( RtAudioError::WARNING );
8703 unsigned nDevices = sysinfo.numaudios;
8704 if ( nDevices == 0 ) {
8706 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8707 error( RtAudioError::INVALID_USE );
8711 if ( device >= nDevices ) {
8713 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8714 error( RtAudioError::INVALID_USE );
8718 oss_audioinfo ainfo;
8720 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8722 if ( result == -1 ) {
8723 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8724 errorText_ = errorStream_.str();
8725 error( RtAudioError::WARNING );
8730 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8731 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8732 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8733 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8734 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8737 // Probe data formats ... do for input
8738 unsigned long mask = ainfo.iformats;
8739 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8740 info.nativeFormats |= RTAUDIO_SINT16;
8741 if ( mask & AFMT_S8 )
8742 info.nativeFormats |= RTAUDIO_SINT8;
8743 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8744 info.nativeFormats |= RTAUDIO_SINT32;
8746 if ( mask & AFMT_FLOAT )
8747 info.nativeFormats |= RTAUDIO_FLOAT32;
8749 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8750 info.nativeFormats |= RTAUDIO_SINT24;
8752 // Check that we have at least one supported format
8753 if ( info.nativeFormats == 0 ) {
8754 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8755 errorText_ = errorStream_.str();
8756 error( RtAudioError::WARNING );
8760 // Probe the supported sample rates.
8761 info.sampleRates.clear();
8762 if ( ainfo.nrates ) {
8763 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8764 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8765 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8766 info.sampleRates.push_back( SAMPLE_RATES[k] );
8768 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8769 info.preferredSampleRate = SAMPLE_RATES[k];
8777 // Check min and max rate values;
8778 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8779 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8780 info.sampleRates.push_back( SAMPLE_RATES[k] );
8782 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8783 info.preferredSampleRate = SAMPLE_RATES[k];
8788 if ( info.sampleRates.size() == 0 ) {
8789 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8790 errorText_ = errorStream_.str();
8791 error( RtAudioError::WARNING );
8795 info.name = ainfo.name;
8802 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8803 unsigned int firstChannel, unsigned int sampleRate,
8804 RtAudioFormat format, unsigned int *bufferSize,
8805 RtAudio::StreamOptions *options )
8807 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8808 if ( mixerfd == -1 ) {
8809 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8813 oss_sysinfo sysinfo;
8814 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8815 if ( result == -1 ) {
8817 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8821 unsigned nDevices = sysinfo.numaudios;
8822 if ( nDevices == 0 ) {
8823 // This should not happen because a check is made before this function is called.
8825 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8829 if ( device >= nDevices ) {
8830 // This should not happen because a check is made before this function is called.
8832 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8836 oss_audioinfo ainfo;
8838 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8840 if ( result == -1 ) {
8841 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8842 errorText_ = errorStream_.str();
8846 // Check if device supports input or output
8847 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8848 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8849 if ( mode == OUTPUT )
8850 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8852 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8853 errorText_ = errorStream_.str();
8858 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8859 if ( mode == OUTPUT )
8861 else { // mode == INPUT
8862 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8863 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8864 close( handle->id[0] );
8866 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8867 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8868 errorText_ = errorStream_.str();
8871 // Check that the number previously set channels is the same.
8872 if ( stream_.nUserChannels[0] != channels ) {
8873 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8874 errorText_ = errorStream_.str();
8883 // Set exclusive access if specified.
8884 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8886 // Try to open the device.
8888 fd = open( ainfo.devnode, flags, 0 );
8890 if ( errno == EBUSY )
8891 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8893 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8894 errorText_ = errorStream_.str();
8898 // For duplex operation, specifically set this mode (this doesn't seem to work).
8900 if ( flags | O_RDWR ) {
8901 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8902 if ( result == -1) {
8903 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8904 errorText_ = errorStream_.str();
8910 // Check the device channel support.
8911 stream_.nUserChannels[mode] = channels;
8912 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8914 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8915 errorText_ = errorStream_.str();
8919 // Set the number of channels.
8920 int deviceChannels = channels + firstChannel;
8921 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8922 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8924 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8925 errorText_ = errorStream_.str();
8928 stream_.nDeviceChannels[mode] = deviceChannels;
8930 // Get the data format mask
8932 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8933 if ( result == -1 ) {
8935 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8936 errorText_ = errorStream_.str();
8940 // Determine how to set the device format.
8941 stream_.userFormat = format;
8942 int deviceFormat = -1;
8943 stream_.doByteSwap[mode] = false;
8944 if ( format == RTAUDIO_SINT8 ) {
8945 if ( mask & AFMT_S8 ) {
8946 deviceFormat = AFMT_S8;
8947 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8950 else if ( format == RTAUDIO_SINT16 ) {
8951 if ( mask & AFMT_S16_NE ) {
8952 deviceFormat = AFMT_S16_NE;
8953 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8955 else if ( mask & AFMT_S16_OE ) {
8956 deviceFormat = AFMT_S16_OE;
8957 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8958 stream_.doByteSwap[mode] = true;
8961 else if ( format == RTAUDIO_SINT24 ) {
8962 if ( mask & AFMT_S24_NE ) {
8963 deviceFormat = AFMT_S24_NE;
8964 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8966 else if ( mask & AFMT_S24_OE ) {
8967 deviceFormat = AFMT_S24_OE;
8968 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8969 stream_.doByteSwap[mode] = true;
8972 else if ( format == RTAUDIO_SINT32 ) {
8973 if ( mask & AFMT_S32_NE ) {
8974 deviceFormat = AFMT_S32_NE;
8975 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8977 else if ( mask & AFMT_S32_OE ) {
8978 deviceFormat = AFMT_S32_OE;
8979 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8980 stream_.doByteSwap[mode] = true;
8984 if ( deviceFormat == -1 ) {
8985 // The user requested format is not natively supported by the device.
8986 if ( mask & AFMT_S16_NE ) {
8987 deviceFormat = AFMT_S16_NE;
8988 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8990 else if ( mask & AFMT_S32_NE ) {
8991 deviceFormat = AFMT_S32_NE;
8992 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8994 else if ( mask & AFMT_S24_NE ) {
8995 deviceFormat = AFMT_S24_NE;
8996 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8998 else if ( mask & AFMT_S16_OE ) {
8999 deviceFormat = AFMT_S16_OE;
9000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9001 stream_.doByteSwap[mode] = true;
9003 else if ( mask & AFMT_S32_OE ) {
9004 deviceFormat = AFMT_S32_OE;
9005 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9006 stream_.doByteSwap[mode] = true;
9008 else if ( mask & AFMT_S24_OE ) {
9009 deviceFormat = AFMT_S24_OE;
9010 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9011 stream_.doByteSwap[mode] = true;
9013 else if ( mask & AFMT_S8) {
9014 deviceFormat = AFMT_S8;
9015 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9019 if ( stream_.deviceFormat[mode] == 0 ) {
9020 // This really shouldn't happen ...
9022 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9023 errorText_ = errorStream_.str();
9027 // Set the data format.
9028 int temp = deviceFormat;
9029 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9030 if ( result == -1 || deviceFormat != temp ) {
9032 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9033 errorText_ = errorStream_.str();
9037 // Attempt to set the buffer size. According to OSS, the minimum
9038 // number of buffers is two. The supposed minimum buffer size is 16
9039 // bytes, so that will be our lower bound. The argument to this
9040 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9041 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9042 // We'll check the actual value used near the end of the setup
9044 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9045 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9047 if ( options ) buffers = options->numberOfBuffers;
9048 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9049 if ( buffers < 2 ) buffers = 3;
9050 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9051 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9052 if ( result == -1 ) {
9054 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9055 errorText_ = errorStream_.str();
9058 stream_.nBuffers = buffers;
9060 // Save buffer size (in sample frames).
9061 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9062 stream_.bufferSize = *bufferSize;
9064 // Set the sample rate.
9065 int srate = sampleRate;
9066 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9067 if ( result == -1 ) {
9069 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9070 errorText_ = errorStream_.str();
9074 // Verify the sample rate setup worked.
9075 if ( abs( srate - (int)sampleRate ) > 100 ) {
9077 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9078 errorText_ = errorStream_.str();
9081 stream_.sampleRate = sampleRate;
9083 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9084 // We're doing duplex setup here.
9085 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9086 stream_.nDeviceChannels[0] = deviceChannels;
9089 // Set interleaving parameters.
9090 stream_.userInterleaved = true;
9091 stream_.deviceInterleaved[mode] = true;
9092 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9093 stream_.userInterleaved = false;
9095 // Set flags for buffer conversion
9096 stream_.doConvertBuffer[mode] = false;
9097 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9098 stream_.doConvertBuffer[mode] = true;
9099 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9100 stream_.doConvertBuffer[mode] = true;
9101 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9102 stream_.nUserChannels[mode] > 1 )
9103 stream_.doConvertBuffer[mode] = true;
9105 // Allocate the stream handles if necessary and then save.
9106 if ( stream_.apiHandle == 0 ) {
9108 handle = new OssHandle;
9110 catch ( std::bad_alloc& ) {
9111 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9115 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9116 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9120 stream_.apiHandle = (void *) handle;
9123 handle = (OssHandle *) stream_.apiHandle;
9125 handle->id[mode] = fd;
9127 // Allocate necessary internal buffers.
9128 unsigned long bufferBytes;
9129 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9130 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9131 if ( stream_.userBuffer[mode] == NULL ) {
9132 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9136 if ( stream_.doConvertBuffer[mode] ) {
9138 bool makeBuffer = true;
9139 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9140 if ( mode == INPUT ) {
9141 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9142 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9143 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9148 bufferBytes *= *bufferSize;
9149 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9150 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9151 if ( stream_.deviceBuffer == NULL ) {
9152 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9158 stream_.device[mode] = device;
9159 stream_.state = STREAM_STOPPED;
9161 // Setup the buffer conversion information structure.
9162 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9164 // Setup thread if necessary.
9165 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9166 // We had already set up an output stream.
9167 stream_.mode = DUPLEX;
9168 if ( stream_.device[0] == device ) handle->id[0] = fd;
9171 stream_.mode = mode;
9173 // Setup callback thread.
9174 stream_.callbackInfo.object = (void *) this;
9176 // Set the thread attributes for joinable and realtime scheduling
9177 // priority. The higher priority will only take affect if the
9178 // program is run as root or suid.
9179 pthread_attr_t attr;
9180 pthread_attr_init( &attr );
9181 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9182 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9183 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9184 stream_.callbackInfo.doRealtime = true;
9185 struct sched_param param;
9186 int priority = options->priority;
9187 int min = sched_get_priority_min( SCHED_RR );
9188 int max = sched_get_priority_max( SCHED_RR );
9189 if ( priority < min ) priority = min;
9190 else if ( priority > max ) priority = max;
9191 param.sched_priority = priority;
9193 // Set the policy BEFORE the priority. Otherwise it fails.
9194 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9195 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9196 // This is definitely required. Otherwise it fails.
9197 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9198 pthread_attr_setschedparam(&attr, ¶m);
9201 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9203 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9206 stream_.callbackInfo.isRunning = true;
9207 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9208 pthread_attr_destroy( &attr );
9210 // Failed. Try instead with default attributes.
9211 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9213 stream_.callbackInfo.isRunning = false;
9214 errorText_ = "RtApiOss::error creating callback thread!";
9224 pthread_cond_destroy( &handle->runnable );
9225 if ( handle->id[0] ) close( handle->id[0] );
9226 if ( handle->id[1] ) close( handle->id[1] );
9228 stream_.apiHandle = 0;
9231 for ( int i=0; i<2; i++ ) {
9232 if ( stream_.userBuffer[i] ) {
9233 free( stream_.userBuffer[i] );
9234 stream_.userBuffer[i] = 0;
9238 if ( stream_.deviceBuffer ) {
9239 free( stream_.deviceBuffer );
9240 stream_.deviceBuffer = 0;
9243 stream_.state = STREAM_CLOSED;
9247 void RtApiOss :: closeStream()
9249 if ( stream_.state == STREAM_CLOSED ) {
9250 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9251 error( RtAudioError::WARNING );
9255 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9256 stream_.callbackInfo.isRunning = false;
9257 MUTEX_LOCK( &stream_.mutex );
9258 if ( stream_.state == STREAM_STOPPED )
9259 pthread_cond_signal( &handle->runnable );
9260 MUTEX_UNLOCK( &stream_.mutex );
9261 pthread_join( stream_.callbackInfo.thread, NULL );
9263 if ( stream_.state == STREAM_RUNNING ) {
9264 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9265 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9267 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9268 stream_.state = STREAM_STOPPED;
9272 pthread_cond_destroy( &handle->runnable );
9273 if ( handle->id[0] ) close( handle->id[0] );
9274 if ( handle->id[1] ) close( handle->id[1] );
9276 stream_.apiHandle = 0;
9279 for ( int i=0; i<2; i++ ) {
9280 if ( stream_.userBuffer[i] ) {
9281 free( stream_.userBuffer[i] );
9282 stream_.userBuffer[i] = 0;
9286 if ( stream_.deviceBuffer ) {
9287 free( stream_.deviceBuffer );
9288 stream_.deviceBuffer = 0;
9291 stream_.mode = UNINITIALIZED;
9292 stream_.state = STREAM_CLOSED;
9295 void RtApiOss :: startStream()
9298 if ( stream_.state == STREAM_RUNNING ) {
9299 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9300 error( RtAudioError::WARNING );
9304 MUTEX_LOCK( &stream_.mutex );
9306 stream_.state = STREAM_RUNNING;
9308 // No need to do anything else here ... OSS automatically starts
9309 // when fed samples.
9311 MUTEX_UNLOCK( &stream_.mutex );
9313 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9314 pthread_cond_signal( &handle->runnable );
9317 void RtApiOss :: stopStream()
9320 if ( stream_.state == STREAM_STOPPED ) {
9321 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9322 error( RtAudioError::WARNING );
9326 MUTEX_LOCK( &stream_.mutex );
9328 // The state might change while waiting on a mutex.
9329 if ( stream_.state == STREAM_STOPPED ) {
9330 MUTEX_UNLOCK( &stream_.mutex );
9335 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9336 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9338 // Flush the output with zeros a few times.
9341 RtAudioFormat format;
9343 if ( stream_.doConvertBuffer[0] ) {
9344 buffer = stream_.deviceBuffer;
9345 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9346 format = stream_.deviceFormat[0];
9349 buffer = stream_.userBuffer[0];
9350 samples = stream_.bufferSize * stream_.nUserChannels[0];
9351 format = stream_.userFormat;
9354 memset( buffer, 0, samples * formatBytes(format) );
9355 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9356 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9357 if ( result == -1 ) {
9358 errorText_ = "RtApiOss::stopStream: audio write error.";
9359 error( RtAudioError::WARNING );
9363 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9364 if ( result == -1 ) {
9365 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9366 errorText_ = errorStream_.str();
9369 handle->triggered = false;
9372 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9373 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9374 if ( result == -1 ) {
9375 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9376 errorText_ = errorStream_.str();
9382 stream_.state = STREAM_STOPPED;
9383 MUTEX_UNLOCK( &stream_.mutex );
9385 if ( result != -1 ) return;
9386 error( RtAudioError::SYSTEM_ERROR );
9389 void RtApiOss :: abortStream()
9392 if ( stream_.state == STREAM_STOPPED ) {
9393 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9394 error( RtAudioError::WARNING );
9398 MUTEX_LOCK( &stream_.mutex );
9400 // The state might change while waiting on a mutex.
9401 if ( stream_.state == STREAM_STOPPED ) {
9402 MUTEX_UNLOCK( &stream_.mutex );
9407 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9409 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9410 if ( result == -1 ) {
9411 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9412 errorText_ = errorStream_.str();
9415 handle->triggered = false;
9418 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9419 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9420 if ( result == -1 ) {
9421 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9422 errorText_ = errorStream_.str();
9428 stream_.state = STREAM_STOPPED;
9429 MUTEX_UNLOCK( &stream_.mutex );
9431 if ( result != -1 ) return;
9432 error( RtAudioError::SYSTEM_ERROR );
9435 void RtApiOss :: callbackEvent()
9437 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9438 if ( stream_.state == STREAM_STOPPED ) {
9439 MUTEX_LOCK( &stream_.mutex );
9440 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9441 if ( stream_.state != STREAM_RUNNING ) {
9442 MUTEX_UNLOCK( &stream_.mutex );
9445 MUTEX_UNLOCK( &stream_.mutex );
9448 if ( stream_.state == STREAM_CLOSED ) {
9449 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9450 error( RtAudioError::WARNING );
9454 // Invoke user callback to get fresh output data.
9455 int doStopStream = 0;
9456 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9457 double streamTime = getStreamTime();
9458 RtAudioStreamStatus status = 0;
9459 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9460 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9461 handle->xrun[0] = false;
9463 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9464 status |= RTAUDIO_INPUT_OVERFLOW;
9465 handle->xrun[1] = false;
9467 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9468 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9469 if ( doStopStream == 2 ) {
9470 this->abortStream();
9474 MUTEX_LOCK( &stream_.mutex );
9476 // The state might change while waiting on a mutex.
9477 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9482 RtAudioFormat format;
9484 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9486 // Setup parameters and do buffer conversion if necessary.
9487 if ( stream_.doConvertBuffer[0] ) {
9488 buffer = stream_.deviceBuffer;
9489 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9490 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9491 format = stream_.deviceFormat[0];
9494 buffer = stream_.userBuffer[0];
9495 samples = stream_.bufferSize * stream_.nUserChannels[0];
9496 format = stream_.userFormat;
9499 // Do byte swapping if necessary.
9500 if ( stream_.doByteSwap[0] )
9501 byteSwapBuffer( buffer, samples, format );
9503 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9505 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9506 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9507 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9508 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9509 handle->triggered = true;
9512 // Write samples to device.
9513 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9515 if ( result == -1 ) {
9516 // We'll assume this is an underrun, though there isn't a
9517 // specific means for determining that.
9518 handle->xrun[0] = true;
9519 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9520 error( RtAudioError::WARNING );
9521 // Continue on to input section.
9525 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9527 // Setup parameters.
9528 if ( stream_.doConvertBuffer[1] ) {
9529 buffer = stream_.deviceBuffer;
9530 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9531 format = stream_.deviceFormat[1];
9534 buffer = stream_.userBuffer[1];
9535 samples = stream_.bufferSize * stream_.nUserChannels[1];
9536 format = stream_.userFormat;
9539 // Read samples from device.
9540 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9542 if ( result == -1 ) {
9543 // We'll assume this is an overrun, though there isn't a
9544 // specific means for determining that.
9545 handle->xrun[1] = true;
9546 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9547 error( RtAudioError::WARNING );
9551 // Do byte swapping if necessary.
9552 if ( stream_.doByteSwap[1] )
9553 byteSwapBuffer( buffer, samples, format );
9555 // Do buffer conversion if necessary.
9556 if ( stream_.doConvertBuffer[1] )
9557 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9561 MUTEX_UNLOCK( &stream_.mutex );
9563 RtApi::tickStreamTime();
9564 if ( doStopStream == 1 ) this->stopStream();
9567 static void *ossCallbackHandler( void *ptr )
9569 CallbackInfo *info = (CallbackInfo *) ptr;
9570 RtApiOss *object = (RtApiOss *) info->object;
9571 bool *isRunning = &info->isRunning;
9573 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9574 if (info->doRealtime) {
9575 std::cerr << "RtAudio oss: " <<
9576 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9577 "running realtime scheduling" << std::endl;
9581 while ( *isRunning == true ) {
9582 pthread_testcancel();
9583 object->callbackEvent();
9586 pthread_exit( NULL );
9589 //******************** End of __LINUX_OSS__ *********************//
9593 // *************************************************** //
9595 // Protected common (OS-independent) RtAudio methods.
9597 // *************************************************** //
9599 // This method can be modified to control the behavior of error
9600 // message printing.
9601 void RtApi :: error( RtAudioError::Type type )
9603 errorStream_.str(""); // clear the ostringstream
9605 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9606 if ( errorCallback ) {
9607 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9609 if ( firstErrorOccurred_ )
9612 firstErrorOccurred_ = true;
9613 const std::string errorMessage = errorText_;
9615 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9616 stream_.callbackInfo.isRunning = false; // exit from the thread
9620 errorCallback( type, errorMessage );
9621 firstErrorOccurred_ = false;
9625 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9626 std::cerr << '\n' << errorText_ << "\n\n";
9627 else if ( type != RtAudioError::WARNING )
9628 throw( RtAudioError( errorText_, type ) );
9631 void RtApi :: verifyStream()
9633 if ( stream_.state == STREAM_CLOSED ) {
9634 errorText_ = "RtApi:: a stream is not open!";
9635 error( RtAudioError::INVALID_USE );
9639 void RtApi :: clearStreamInfo()
9641 stream_.mode = UNINITIALIZED;
9642 stream_.state = STREAM_CLOSED;
9643 stream_.sampleRate = 0;
9644 stream_.bufferSize = 0;
9645 stream_.nBuffers = 0;
9646 stream_.userFormat = 0;
9647 stream_.userInterleaved = true;
9648 stream_.streamTime = 0.0;
9649 stream_.apiHandle = 0;
9650 stream_.deviceBuffer = 0;
9651 stream_.callbackInfo.callback = 0;
9652 stream_.callbackInfo.userData = 0;
9653 stream_.callbackInfo.isRunning = false;
9654 stream_.callbackInfo.errorCallback = 0;
9655 for ( int i=0; i<2; i++ ) {
9656 stream_.device[i] = 11111;
9657 stream_.doConvertBuffer[i] = false;
9658 stream_.deviceInterleaved[i] = true;
9659 stream_.doByteSwap[i] = false;
9660 stream_.nUserChannels[i] = 0;
9661 stream_.nDeviceChannels[i] = 0;
9662 stream_.channelOffset[i] = 0;
9663 stream_.deviceFormat[i] = 0;
9664 stream_.latency[i] = 0;
9665 stream_.userBuffer[i] = 0;
9666 stream_.convertInfo[i].channels = 0;
9667 stream_.convertInfo[i].inJump = 0;
9668 stream_.convertInfo[i].outJump = 0;
9669 stream_.convertInfo[i].inFormat = 0;
9670 stream_.convertInfo[i].outFormat = 0;
9671 stream_.convertInfo[i].inOffset.clear();
9672 stream_.convertInfo[i].outOffset.clear();
9676 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9678 if ( format == RTAUDIO_SINT16 )
9680 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9682 else if ( format == RTAUDIO_FLOAT64 )
9684 else if ( format == RTAUDIO_SINT24 )
9686 else if ( format == RTAUDIO_SINT8 )
9689 errorText_ = "RtApi::formatBytes: undefined format.";
9690 error( RtAudioError::WARNING );
9695 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9697 if ( mode == INPUT ) { // convert device to user buffer
9698 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9699 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9700 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9701 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9703 else { // convert user to device buffer
9704 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9705 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9706 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9707 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9710 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9711 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9713 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9715 // Set up the interleave/deinterleave offsets.
9716 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9717 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9718 ( mode == INPUT && stream_.userInterleaved ) ) {
9719 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9720 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9721 stream_.convertInfo[mode].outOffset.push_back( k );
9722 stream_.convertInfo[mode].inJump = 1;
9726 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9727 stream_.convertInfo[mode].inOffset.push_back( k );
9728 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9729 stream_.convertInfo[mode].outJump = 1;
9733 else { // no (de)interleaving
9734 if ( stream_.userInterleaved ) {
9735 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9736 stream_.convertInfo[mode].inOffset.push_back( k );
9737 stream_.convertInfo[mode].outOffset.push_back( k );
9741 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9742 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9743 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9744 stream_.convertInfo[mode].inJump = 1;
9745 stream_.convertInfo[mode].outJump = 1;
9750 // Add channel offset.
9751 if ( firstChannel > 0 ) {
9752 if ( stream_.deviceInterleaved[mode] ) {
9753 if ( mode == OUTPUT ) {
9754 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9755 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9758 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9759 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9763 if ( mode == OUTPUT ) {
9764 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9765 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9768 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9769 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9775 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9777 // This function does format conversion, input/output channel compensation, and
9778 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9779 // the lower three bytes of a 32-bit integer.
9781 // Clear our device buffer when in/out duplex device channels are different
9782 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9783 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9784 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9787 if (info.outFormat == RTAUDIO_FLOAT64) {
9789 Float64 *out = (Float64 *)outBuffer;
9791 if (info.inFormat == RTAUDIO_SINT8) {
9792 signed char *in = (signed char *)inBuffer;
9793 scale = 1.0 / 127.5;
9794 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9795 for (j=0; j<info.channels; j++) {
9796 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9797 out[info.outOffset[j]] += 0.5;
9798 out[info.outOffset[j]] *= scale;
9801 out += info.outJump;
9804 else if (info.inFormat == RTAUDIO_SINT16) {
9805 Int16 *in = (Int16 *)inBuffer;
9806 scale = 1.0 / 32767.5;
9807 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9808 for (j=0; j<info.channels; j++) {
9809 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9810 out[info.outOffset[j]] += 0.5;
9811 out[info.outOffset[j]] *= scale;
9814 out += info.outJump;
9817 else if (info.inFormat == RTAUDIO_SINT24) {
9818 Int24 *in = (Int24 *)inBuffer;
9819 scale = 1.0 / 8388607.5;
9820 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9821 for (j=0; j<info.channels; j++) {
9822 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9823 out[info.outOffset[j]] += 0.5;
9824 out[info.outOffset[j]] *= scale;
9827 out += info.outJump;
9830 else if (info.inFormat == RTAUDIO_SINT32) {
9831 Int32 *in = (Int32 *)inBuffer;
9832 scale = 1.0 / 2147483647.5;
9833 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9834 for (j=0; j<info.channels; j++) {
9835 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9836 out[info.outOffset[j]] += 0.5;
9837 out[info.outOffset[j]] *= scale;
9840 out += info.outJump;
9843 else if (info.inFormat == RTAUDIO_FLOAT32) {
9844 Float32 *in = (Float32 *)inBuffer;
9845 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9846 for (j=0; j<info.channels; j++) {
9847 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9850 out += info.outJump;
9853 else if (info.inFormat == RTAUDIO_FLOAT64) {
9854 // Channel compensation and/or (de)interleaving only.
9855 Float64 *in = (Float64 *)inBuffer;
9856 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9857 for (j=0; j<info.channels; j++) {
9858 out[info.outOffset[j]] = in[info.inOffset[j]];
9861 out += info.outJump;
9865 else if (info.outFormat == RTAUDIO_FLOAT32) {
9867 Float32 *out = (Float32 *)outBuffer;
9869 if (info.inFormat == RTAUDIO_SINT8) {
9870 signed char *in = (signed char *)inBuffer;
9871 scale = (Float32) ( 1.0 / 127.5 );
9872 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9873 for (j=0; j<info.channels; j++) {
9874 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9875 out[info.outOffset[j]] += 0.5;
9876 out[info.outOffset[j]] *= scale;
9879 out += info.outJump;
9882 else if (info.inFormat == RTAUDIO_SINT16) {
9883 Int16 *in = (Int16 *)inBuffer;
9884 scale = (Float32) ( 1.0 / 32767.5 );
9885 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9886 for (j=0; j<info.channels; j++) {
9887 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9888 out[info.outOffset[j]] += 0.5;
9889 out[info.outOffset[j]] *= scale;
9892 out += info.outJump;
9895 else if (info.inFormat == RTAUDIO_SINT24) {
9896 Int24 *in = (Int24 *)inBuffer;
9897 scale = (Float32) ( 1.0 / 8388607.5 );
9898 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9899 for (j=0; j<info.channels; j++) {
9900 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9901 out[info.outOffset[j]] += 0.5;
9902 out[info.outOffset[j]] *= scale;
9905 out += info.outJump;
9908 else if (info.inFormat == RTAUDIO_SINT32) {
9909 Int32 *in = (Int32 *)inBuffer;
9910 scale = (Float32) ( 1.0 / 2147483647.5 );
9911 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9912 for (j=0; j<info.channels; j++) {
9913 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9914 out[info.outOffset[j]] += 0.5;
9915 out[info.outOffset[j]] *= scale;
9918 out += info.outJump;
9921 else if (info.inFormat == RTAUDIO_FLOAT32) {
9922 // Channel compensation and/or (de)interleaving only.
9923 Float32 *in = (Float32 *)inBuffer;
9924 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9925 for (j=0; j<info.channels; j++) {
9926 out[info.outOffset[j]] = in[info.inOffset[j]];
9929 out += info.outJump;
9932 else if (info.inFormat == RTAUDIO_FLOAT64) {
9933 Float64 *in = (Float64 *)inBuffer;
9934 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9935 for (j=0; j<info.channels; j++) {
9936 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9939 out += info.outJump;
9943 else if (info.outFormat == RTAUDIO_SINT32) {
9944 Int32 *out = (Int32 *)outBuffer;
9945 if (info.inFormat == RTAUDIO_SINT8) {
9946 signed char *in = (signed char *)inBuffer;
9947 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9948 for (j=0; j<info.channels; j++) {
9949 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9950 out[info.outOffset[j]] <<= 24;
9953 out += info.outJump;
9956 else if (info.inFormat == RTAUDIO_SINT16) {
9957 Int16 *in = (Int16 *)inBuffer;
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9959 for (j=0; j<info.channels; j++) {
9960 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9961 out[info.outOffset[j]] <<= 16;
9964 out += info.outJump;
9967 else if (info.inFormat == RTAUDIO_SINT24) {
9968 Int24 *in = (Int24 *)inBuffer;
9969 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9970 for (j=0; j<info.channels; j++) {
9971 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9972 out[info.outOffset[j]] <<= 8;
9975 out += info.outJump;
9978 else if (info.inFormat == RTAUDIO_SINT32) {
9979 // Channel compensation and/or (de)interleaving only.
9980 Int32 *in = (Int32 *)inBuffer;
9981 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9982 for (j=0; j<info.channels; j++) {
9983 out[info.outOffset[j]] = in[info.inOffset[j]];
9986 out += info.outJump;
9989 else if (info.inFormat == RTAUDIO_FLOAT32) {
9990 Float32 *in = (Float32 *)inBuffer;
9991 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9992 for (j=0; j<info.channels; j++) {
9993 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9996 out += info.outJump;
9999 else if (info.inFormat == RTAUDIO_FLOAT64) {
10000 Float64 *in = (Float64 *)inBuffer;
10001 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10002 for (j=0; j<info.channels; j++) {
10003 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10006 out += info.outJump;
10010 else if (info.outFormat == RTAUDIO_SINT24) {
10011 Int24 *out = (Int24 *)outBuffer;
10012 if (info.inFormat == RTAUDIO_SINT8) {
10013 signed char *in = (signed char *)inBuffer;
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10015 for (j=0; j<info.channels; j++) {
10016 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10017 //out[info.outOffset[j]] <<= 16;
10020 out += info.outJump;
10023 else if (info.inFormat == RTAUDIO_SINT16) {
10024 Int16 *in = (Int16 *)inBuffer;
10025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10026 for (j=0; j<info.channels; j++) {
10027 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10028 //out[info.outOffset[j]] <<= 8;
10031 out += info.outJump;
10034 else if (info.inFormat == RTAUDIO_SINT24) {
10035 // Channel compensation and/or (de)interleaving only.
10036 Int24 *in = (Int24 *)inBuffer;
10037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10038 for (j=0; j<info.channels; j++) {
10039 out[info.outOffset[j]] = in[info.inOffset[j]];
10042 out += info.outJump;
10045 else if (info.inFormat == RTAUDIO_SINT32) {
10046 Int32 *in = (Int32 *)inBuffer;
10047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10048 for (j=0; j<info.channels; j++) {
10049 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10050 //out[info.outOffset[j]] >>= 8;
10053 out += info.outJump;
10056 else if (info.inFormat == RTAUDIO_FLOAT32) {
10057 Float32 *in = (Float32 *)inBuffer;
10058 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10059 for (j=0; j<info.channels; j++) {
10060 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10063 out += info.outJump;
10066 else if (info.inFormat == RTAUDIO_FLOAT64) {
10067 Float64 *in = (Float64 *)inBuffer;
10068 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10069 for (j=0; j<info.channels; j++) {
10070 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10073 out += info.outJump;
10077 else if (info.outFormat == RTAUDIO_SINT16) {
10078 Int16 *out = (Int16 *)outBuffer;
10079 if (info.inFormat == RTAUDIO_SINT8) {
10080 signed char *in = (signed char *)inBuffer;
10081 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10082 for (j=0; j<info.channels; j++) {
10083 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10084 out[info.outOffset[j]] <<= 8;
10087 out += info.outJump;
10090 else if (info.inFormat == RTAUDIO_SINT16) {
10091 // Channel compensation and/or (de)interleaving only.
10092 Int16 *in = (Int16 *)inBuffer;
10093 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10094 for (j=0; j<info.channels; j++) {
10095 out[info.outOffset[j]] = in[info.inOffset[j]];
10098 out += info.outJump;
10101 else if (info.inFormat == RTAUDIO_SINT24) {
10102 Int24 *in = (Int24 *)inBuffer;
10103 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10104 for (j=0; j<info.channels; j++) {
10105 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10108 out += info.outJump;
10111 else if (info.inFormat == RTAUDIO_SINT32) {
10112 Int32 *in = (Int32 *)inBuffer;
10113 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10114 for (j=0; j<info.channels; j++) {
10115 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10118 out += info.outJump;
10121 else if (info.inFormat == RTAUDIO_FLOAT32) {
10122 Float32 *in = (Float32 *)inBuffer;
10123 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10124 for (j=0; j<info.channels; j++) {
10125 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10128 out += info.outJump;
10131 else if (info.inFormat == RTAUDIO_FLOAT64) {
10132 Float64 *in = (Float64 *)inBuffer;
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10134 for (j=0; j<info.channels; j++) {
10135 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10138 out += info.outJump;
10142 else if (info.outFormat == RTAUDIO_SINT8) {
10143 signed char *out = (signed char *)outBuffer;
10144 if (info.inFormat == RTAUDIO_SINT8) {
10145 // Channel compensation and/or (de)interleaving only.
10146 signed char *in = (signed char *)inBuffer;
10147 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10148 for (j=0; j<info.channels; j++) {
10149 out[info.outOffset[j]] = in[info.inOffset[j]];
10152 out += info.outJump;
10155 if (info.inFormat == RTAUDIO_SINT16) {
10156 Int16 *in = (Int16 *)inBuffer;
10157 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10158 for (j=0; j<info.channels; j++) {
10159 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10162 out += info.outJump;
10165 else if (info.inFormat == RTAUDIO_SINT24) {
10166 Int24 *in = (Int24 *)inBuffer;
10167 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10168 for (j=0; j<info.channels; j++) {
10169 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10172 out += info.outJump;
10175 else if (info.inFormat == RTAUDIO_SINT32) {
10176 Int32 *in = (Int32 *)inBuffer;
10177 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10178 for (j=0; j<info.channels; j++) {
10179 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10182 out += info.outJump;
10185 else if (info.inFormat == RTAUDIO_FLOAT32) {
10186 Float32 *in = (Float32 *)inBuffer;
10187 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10188 for (j=0; j<info.channels; j++) {
10189 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10192 out += info.outJump;
10195 else if (info.inFormat == RTAUDIO_FLOAT64) {
10196 Float64 *in = (Float64 *)inBuffer;
10197 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10198 for (j=0; j<info.channels; j++) {
10199 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10202 out += info.outJump;
10208 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10209 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10210 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10212 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10218 if ( format == RTAUDIO_SINT16 ) {
10219 for ( unsigned int i=0; i<samples; i++ ) {
10220 // Swap 1st and 2nd bytes.
10225 // Increment 2 bytes.
10229 else if ( format == RTAUDIO_SINT32 ||
10230 format == RTAUDIO_FLOAT32 ) {
10231 for ( unsigned int i=0; i<samples; i++ ) {
10232 // Swap 1st and 4th bytes.
10237 // Swap 2nd and 3rd bytes.
10243 // Increment 3 more bytes.
10247 else if ( format == RTAUDIO_SINT24 ) {
10248 for ( unsigned int i=0; i<samples; i++ ) {
10249 // Swap 1st and 3rd bytes.
10254 // Increment 2 more bytes.
10258 else if ( format == RTAUDIO_FLOAT64 ) {
10259 for ( unsigned int i=0; i<samples; i++ ) {
10260 // Swap 1st and 8th bytes
10265 // Swap 2nd and 7th bytes
10271 // Swap 3rd and 6th bytes
10277 // Swap 4th and 5th bytes
10283 // Increment 5 more bytes.
10289 // Indentation settings for Vim and Emacs
10291 // Local Variables:
10292 // c-basic-offset: 2
10293 // indent-tabs-mode: nil
10296 // vim: et sts=2 sw=2