1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 6.0.0beta1
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
118 const unsigned int rtaudio_num_api_names =
119 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121 // The order here will control the order of RtAudio's API search in
123 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
124 #if defined(__UNIX_JACK__)
127 #if defined(__LINUX_PULSE__)
128 RtAudio::LINUX_PULSE,
130 #if defined(__LINUX_ALSA__)
133 #if defined(__LINUX_OSS__)
136 #if defined(__WINDOWS_ASIO__)
137 RtAudio::WINDOWS_ASIO,
139 #if defined(__WINDOWS_WASAPI__)
140 RtAudio::WINDOWS_WASAPI,
142 #if defined(__WINDOWS_DS__)
145 #if defined(__MACOSX_CORE__)
146 RtAudio::MACOSX_CORE,
148 #if defined(__RTAUDIO_DUMMY__)
149 RtAudio::RTAUDIO_DUMMY,
151 RtAudio::UNSPECIFIED,
154 extern "C" const unsigned int rtaudio_num_compiled_apis =
155 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
158 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
159 // If the build breaks here, check that they match.
160 template<bool b> class StaticAssert { private: StaticAssert() {} };
161 template<> class StaticAssert<true>{ public: StaticAssert() {} };
162 class StaticAssertions { StaticAssertions() {
163 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
166 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
168 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
169 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
172 std::string RtAudio :: getApiName( RtAudio::Api api )
174 if (api < 0 || api >= RtAudio::NUM_APIS)
176 return rtaudio_api_names[api][0];
179 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
181 if (api < 0 || api >= RtAudio::NUM_APIS)
183 return rtaudio_api_names[api][1];
186 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
189 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
190 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
191 return rtaudio_compiled_apis[i];
192 return RtAudio::UNSPECIFIED;
195 void RtAudio :: openRtApi( RtAudio::Api api )
201 #if defined(__UNIX_JACK__)
202 if ( api == UNIX_JACK )
203 rtapi_ = new RtApiJack();
205 #if defined(__LINUX_ALSA__)
206 if ( api == LINUX_ALSA )
207 rtapi_ = new RtApiAlsa();
209 #if defined(__LINUX_PULSE__)
210 if ( api == LINUX_PULSE )
211 rtapi_ = new RtApiPulse();
213 #if defined(__LINUX_OSS__)
214 if ( api == LINUX_OSS )
215 rtapi_ = new RtApiOss();
217 #if defined(__WINDOWS_ASIO__)
218 if ( api == WINDOWS_ASIO )
219 rtapi_ = new RtApiAsio();
221 #if defined(__WINDOWS_WASAPI__)
222 if ( api == WINDOWS_WASAPI )
223 rtapi_ = new RtApiWasapi();
225 #if defined(__WINDOWS_DS__)
226 if ( api == WINDOWS_DS )
227 rtapi_ = new RtApiDs();
229 #if defined(__MACOSX_CORE__)
230 if ( api == MACOSX_CORE )
231 rtapi_ = new RtApiCore();
233 #if defined(__RTAUDIO_DUMMY__)
234 if ( api == RTAUDIO_DUMMY )
235 rtapi_ = new RtApiDummy();
239 RtAudio :: RtAudio( RtAudio::Api api, RtAudioErrorCallback errorCallback )
243 std::string errorMessage;
244 if ( api != UNSPECIFIED ) {
245 // Attempt to open the specified API.
249 if ( errorCallback ) rtapi_->setErrorCallback( errorCallback );
253 // No compiled support for specified API value. Issue a warning
254 // and continue as if no API was specified.
255 errorMessage = "RtAudio: no compiled support for specified API argument!";
257 errorCallback( RTAUDIO_INVALID_USE, errorMessage );
259 std::cerr << '\n' << errorMessage << '\n' << std::endl;
262 // Iterate through the compiled APIs and return as soon as we find
263 // one with at least one device or we reach the end of the list.
264 std::vector< RtAudio::Api > apis;
265 getCompiledApi( apis );
266 for ( unsigned int i=0; i<apis.size(); i++ ) {
267 openRtApi( apis[i] );
268 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
272 if ( errorCallback ) rtapi_->setErrorCallback( errorCallback );
276 // It should not be possible to get here because the preprocessor
277 // definition __RTAUDIO_DUMMY__ is automatically defined in RtAudio.h
278 // if no API-specific definitions are passed to the compiler. But just
279 // in case something weird happens, issue an error message and abort.
280 errorMessage = "RtAudio: no compiled API support found ... critical error!";
282 errorCallback( RTAUDIO_INVALID_USE, errorMessage );
284 std::cerr << '\n' << errorMessage << '\n' << std::endl;
288 RtAudio :: ~RtAudio()
294 RtAudioErrorType RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
295 RtAudio::StreamParameters *inputParameters,
296 RtAudioFormat format, unsigned int sampleRate,
297 unsigned int *bufferFrames,
298 RtAudioCallback callback, void *userData,
299 RtAudio::StreamOptions *options )
301 return rtapi_->openStream( outputParameters, inputParameters, format,
302 sampleRate, bufferFrames, callback,
306 // *************************************************** //
308 // Public RtApi definitions (see end of file for
309 // private or protected utility functions).
311 // *************************************************** //
316 MUTEX_INITIALIZE( &stream_.mutex );
318 showWarnings_ = true;
323 MUTEX_DESTROY( &stream_.mutex );
326 RtAudioErrorType RtApi :: openStream( RtAudio::StreamParameters *oParams,
327 RtAudio::StreamParameters *iParams,
328 RtAudioFormat format, unsigned int sampleRate,
329 unsigned int *bufferFrames,
330 RtAudioCallback callback, void *userData,
331 RtAudio::StreamOptions *options )
333 if ( stream_.state != STREAM_CLOSED ) {
334 errorText_ = "RtApi::openStream: a stream is already open!";
335 return error( RTAUDIO_INVALID_USE );
338 // Clear stream information potentially left from a previously open stream.
341 if ( oParams && oParams->nChannels < 1 ) {
342 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
343 return error( RTAUDIO_INVALID_USE );
346 if ( iParams && iParams->nChannels < 1 ) {
347 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
348 return error( RTAUDIO_INVALID_USE );
351 if ( oParams == NULL && iParams == NULL ) {
352 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
353 return error( RTAUDIO_INVALID_USE );
356 if ( formatBytes(format) == 0 ) {
357 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
358 return error( RTAUDIO_INVALID_USE );
361 unsigned int nDevices = getDeviceCount();
362 unsigned int oChannels = 0;
364 oChannels = oParams->nChannels;
365 if ( oParams->deviceId >= nDevices ) {
366 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
367 return error( RTAUDIO_INVALID_USE );
371 unsigned int iChannels = 0;
373 iChannels = iParams->nChannels;
374 if ( iParams->deviceId >= nDevices ) {
375 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
376 return error( RTAUDIO_INVALID_USE );
382 if ( oChannels > 0 ) {
384 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
385 sampleRate, format, bufferFrames, options );
386 if ( result == false ) {
387 return error( RTAUDIO_SYSTEM_ERROR );
391 if ( iChannels > 0 ) {
393 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
394 sampleRate, format, bufferFrames, options );
395 if ( result == false ) {
396 return error( RTAUDIO_SYSTEM_ERROR );
400 stream_.callbackInfo.callback = (void *) callback;
401 stream_.callbackInfo.userData = userData;
403 if ( options ) options->numberOfBuffers = stream_.nBuffers;
404 stream_.state = STREAM_STOPPED;
405 return RTAUDIO_NO_ERROR;
408 unsigned int RtApi :: getDefaultInputDevice( void )
410 // Should be implemented in subclasses if possible.
414 unsigned int RtApi :: getDefaultOutputDevice( void )
416 // Should be implemented in subclasses if possible.
420 void RtApi :: closeStream( void )
422 // MUST be implemented in subclasses!
426 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
427 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
428 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
429 RtAudio::StreamOptions * /*options*/ )
431 // MUST be implemented in subclasses!
435 void RtApi :: tickStreamTime( void )
437 // Subclasses that do not provide their own implementation of
438 // getStreamTime should call this function once per buffer I/O to
439 // provide basic stream time support.
441 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
444 #if defined( HAVE_GETTIMEOFDAY )
445 gettimeofday( &stream_.lastTickTimestamp, NULL );
450 long RtApi :: getStreamLatency( void )
452 long totalLatency = 0;
453 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
454 totalLatency = stream_.latency[0];
455 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
456 totalLatency += stream_.latency[1];
462 double RtApi :: getStreamTime( void )
464 #if defined( HAVE_GETTIMEOFDAY )
465 // Return a very accurate estimate of the stream time by
466 // adding in the elapsed time since the last tick.
470 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
471 return stream_.streamTime;
473 gettimeofday( &now, NULL );
474 then = stream_.lastTickTimestamp;
475 return stream_.streamTime +
476 ((now.tv_sec + 0.000001 * now.tv_usec) -
477 (then.tv_sec + 0.000001 * then.tv_usec));
479 return stream_.streamTime;
484 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
489 #if defined( HAVE_GETTIMEOFDAY )
490 gettimeofday( &stream_.lastTickTimestamp, NULL );
495 unsigned int RtApi :: getStreamSampleRate( void )
497 if ( isStreamOpen() ) return stream_.sampleRate;
502 // *************************************************** //
504 // OS/API-specific methods.
506 // *************************************************** //
508 #if defined(__MACOSX_CORE__)
510 // The OS X CoreAudio API is designed to use a separate callback
511 // procedure for each of its audio devices. A single RtAudio duplex
512 // stream using two different devices is supported here, though it
513 // cannot be guaranteed to always behave correctly because we cannot
514 // synchronize these two callbacks.
516 // A property listener is installed for over/underrun information.
517 // However, no functionality is currently provided to allow property
518 // listeners to trigger user handlers because it is unclear what could
519 // be done if a critical stream parameter (buffer size, sample rate,
520 // device disconnect) notification arrived. The listeners entail
521 // quite a bit of extra code and most likely, a user program wouldn't
522 // be prepared for the result anyway. However, we do provide a flag
523 // to the client callback function to inform of an over/underrun.
525 // A structure to hold various information related to the CoreAudio API
528 AudioDeviceID id[2]; // device ids
529 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
530 AudioDeviceIOProcID procId[2];
532 UInt32 iStream[2]; // device stream index (or first if using multiple)
533 UInt32 nStreams[2]; // number of streams to use
536 pthread_cond_t condition;
537 int drainCounter; // Tracks callback counts when draining
538 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 bool xrunListenerAdded[2];
540 bool disconnectListenerAdded[2];
543 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; procId[0] = 0; procId[1] = 0; xrun[0] = false; xrun[1] = false; xrunListenerAdded[0] = false; xrunListenerAdded[1] = false; disconnectListenerAdded[0] = false; disconnectListenerAdded[1] = false; }
546 RtApiCore:: RtApiCore()
548 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
549 // This is a largely undocumented but absolutely necessary
550 // requirement starting with OS-X 10.6. If not called, queries and
551 // updates to various audio device properties are not handled
553 CFRunLoopRef theRunLoop = NULL;
554 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
555 kAudioObjectPropertyScopeGlobal,
556 kAudioObjectPropertyElementMaster };
557 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
558 if ( result != noErr ) {
559 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
560 error( RTAUDIO_SYSTEM_ERROR );
565 RtApiCore :: ~RtApiCore()
567 // The subclass destructor gets called before the base class
568 // destructor, so close an existing stream before deallocating
569 // apiDeviceId memory.
570 if ( stream_.state != STREAM_CLOSED ) closeStream();
573 unsigned int RtApiCore :: getDeviceCount( void )
575 // Find out how many audio devices there are, if any.
577 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
578 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
579 if ( result != noErr ) {
580 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
581 error( RTAUDIO_SYSTEM_ERROR );
585 return dataSize / sizeof( AudioDeviceID );
588 unsigned int RtApiCore :: getDefaultInputDevice( void )
590 unsigned int nDevices = getDeviceCount();
591 if ( nDevices <= 1 ) return 0;
594 UInt32 dataSize = sizeof( AudioDeviceID );
595 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
596 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
597 if ( result != noErr ) {
598 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
599 error( RTAUDIO_SYSTEM_ERROR );
603 dataSize *= nDevices;
604 AudioDeviceID deviceList[ nDevices ];
605 property.mSelector = kAudioHardwarePropertyDevices;
606 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
607 if ( result != noErr ) {
608 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
609 error( RTAUDIO_SYSTEM_ERROR );
613 for ( unsigned int i=0; i<nDevices; i++ )
614 if ( id == deviceList[i] ) return i;
616 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
617 error( RTAUDIO_WARNING );
621 unsigned int RtApiCore :: getDefaultOutputDevice( void )
623 unsigned int nDevices = getDeviceCount();
624 if ( nDevices <= 1 ) return 0;
627 UInt32 dataSize = sizeof( AudioDeviceID );
628 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
629 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
630 if ( result != noErr ) {
631 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
632 error( RTAUDIO_SYSTEM_ERROR );
636 dataSize = sizeof( AudioDeviceID ) * nDevices;
637 AudioDeviceID deviceList[ nDevices ];
638 property.mSelector = kAudioHardwarePropertyDevices;
639 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
640 if ( result != noErr ) {
641 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
642 error( RTAUDIO_SYSTEM_ERROR );
646 for ( unsigned int i=0; i<nDevices; i++ )
647 if ( id == deviceList[i] ) return i;
649 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
650 error( RTAUDIO_WARNING );
654 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
656 RtAudio::DeviceInfo info;
660 unsigned int nDevices = getDeviceCount();
661 if ( nDevices == 0 ) {
662 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
663 error( RTAUDIO_INVALID_USE );
667 if ( device >= nDevices ) {
668 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
669 error( RTAUDIO_INVALID_USE );
673 AudioDeviceID deviceList[ nDevices ];
674 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
675 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
676 kAudioObjectPropertyScopeGlobal,
677 kAudioObjectPropertyElementMaster };
678 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
679 0, NULL, &dataSize, (void *) &deviceList );
680 if ( result != noErr ) {
681 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
682 error( RTAUDIO_WARNING );
686 AudioDeviceID id = deviceList[ device ];
688 // Get the device name.
691 dataSize = sizeof( CFStringRef );
692 property.mSelector = kAudioObjectPropertyManufacturer;
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
694 if ( result != noErr ) {
695 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
696 errorText_ = errorStream_.str();
697 error( RTAUDIO_WARNING );
701 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
702 long length = CFStringGetLength(cfname);
703 char *mname = (char *)malloc(length * 3 + 1);
704 #if defined( UNICODE ) || defined( _UNICODE )
705 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
707 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
709 info.name.append( (const char *)mname, strlen(mname) );
710 info.name.append( ": " );
714 property.mSelector = kAudioObjectPropertyName;
715 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
716 if ( result != noErr ) {
717 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
718 errorText_ = errorStream_.str();
719 error( RTAUDIO_WARNING );
723 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
724 length = CFStringGetLength(cfname);
725 char *name = (char *)malloc(length * 3 + 1);
726 #if defined( UNICODE ) || defined( _UNICODE )
727 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
729 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
731 info.name.append( (const char *)name, strlen(name) );
735 // Get the output stream "configuration".
736 AudioBufferList *bufferList = nil;
737 property.mSelector = kAudioDevicePropertyStreamConfiguration;
738 property.mScope = kAudioDevicePropertyScopeOutput;
739 // property.mElement = kAudioObjectPropertyElementWildcard;
741 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
742 if ( result != noErr || dataSize == 0 ) {
743 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
744 errorText_ = errorStream_.str();
745 error( RTAUDIO_WARNING );
749 // Allocate the AudioBufferList.
750 bufferList = (AudioBufferList *) malloc( dataSize );
751 if ( bufferList == NULL ) {
752 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
753 error( RTAUDIO_WARNING );
757 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
758 if ( result != noErr || dataSize == 0 ) {
760 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
761 errorText_ = errorStream_.str();
762 error( RTAUDIO_WARNING );
766 // Get output channel information.
767 unsigned int i, nStreams = bufferList->mNumberBuffers;
768 for ( i=0; i<nStreams; i++ )
769 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
772 // Get the input stream "configuration".
773 property.mScope = kAudioDevicePropertyScopeInput;
774 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
775 if ( result != noErr || dataSize == 0 ) {
776 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
777 errorText_ = errorStream_.str();
778 error( RTAUDIO_WARNING );
782 // Allocate the AudioBufferList.
783 bufferList = (AudioBufferList *) malloc( dataSize );
784 if ( bufferList == NULL ) {
785 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
786 error( RTAUDIO_WARNING );
790 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
791 if (result != noErr || dataSize == 0) {
793 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
794 errorText_ = errorStream_.str();
795 error( RTAUDIO_WARNING );
799 // Get input channel information.
800 nStreams = bufferList->mNumberBuffers;
801 for ( i=0; i<nStreams; i++ )
802 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
805 // If device opens for both playback and capture, we determine the channels.
806 if ( info.outputChannels > 0 && info.inputChannels > 0 )
807 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
809 // Probe the device sample rates.
810 bool isInput = false;
811 if ( info.outputChannels == 0 ) isInput = true;
813 // Determine the supported sample rates.
814 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
815 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
816 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
817 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
818 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
819 errorText_ = errorStream_.str();
820 error( RTAUDIO_WARNING );
824 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
825 AudioValueRange rangeList[ nRanges ];
826 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
827 if ( result != kAudioHardwareNoError ) {
828 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
829 errorText_ = errorStream_.str();
830 error( RTAUDIO_WARNING );
834 // The sample rate reporting mechanism is a bit of a mystery. It
835 // seems that it can either return individual rates or a range of
836 // rates. I assume that if the min / max range values are the same,
837 // then that represents a single supported rate and if the min / max
838 // range values are different, the device supports an arbitrary
839 // range of values (though there might be multiple ranges, so we'll
840 // use the most conservative range).
841 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
842 bool haveValueRange = false;
843 info.sampleRates.clear();
844 for ( UInt32 i=0; i<nRanges; i++ ) {
845 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
846 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
847 info.sampleRates.push_back( tmpSr );
849 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
850 info.preferredSampleRate = tmpSr;
853 haveValueRange = true;
854 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
855 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
859 if ( haveValueRange ) {
860 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
861 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
862 info.sampleRates.push_back( SAMPLE_RATES[k] );
864 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
865 info.preferredSampleRate = SAMPLE_RATES[k];
870 // Sort and remove any redundant values
871 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
872 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
874 if ( info.sampleRates.size() == 0 ) {
875 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
876 errorText_ = errorStream_.str();
877 error( RTAUDIO_WARNING );
881 // Probe the currently configured sample rate
883 dataSize = sizeof( Float64 );
884 property.mSelector = kAudioDevicePropertyNominalSampleRate;
885 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
886 if ( result == noErr ) info.currentSampleRate = (unsigned int) nominalRate;
888 // CoreAudio always uses 32-bit floating point data for PCM streams.
889 // Thus, any other "physical" formats supported by the device are of
890 // no interest to the client.
891 info.nativeFormats = RTAUDIO_FLOAT32;
893 if ( info.outputChannels > 0 )
894 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
895 if ( info.inputChannels > 0 )
896 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
902 static OSStatus callbackHandler( AudioDeviceID inDevice,
903 const AudioTimeStamp* /*inNow*/,
904 const AudioBufferList* inInputData,
905 const AudioTimeStamp* /*inInputTime*/,
906 AudioBufferList* outOutputData,
907 const AudioTimeStamp* /*inOutputTime*/,
910 CallbackInfo *info = (CallbackInfo *) infoPointer;
912 RtApiCore *object = (RtApiCore *) info->object;
913 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
914 return kAudioHardwareUnspecifiedError;
916 return kAudioHardwareNoError;
919 static OSStatus disconnectListener( AudioObjectID /*inDevice*/,
921 const AudioObjectPropertyAddress properties[],
924 for ( UInt32 i=0; i<nAddresses; i++ ) {
925 if ( properties[i].mSelector == kAudioDevicePropertyDeviceIsAlive ) {
926 CallbackInfo *info = (CallbackInfo *) infoPointer;
927 RtApiCore *object = (RtApiCore *) info->object;
928 info->deviceDisconnected = true;
929 object->closeStream();
930 return kAudioHardwareUnspecifiedError;
934 return kAudioHardwareNoError;
937 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
939 const AudioObjectPropertyAddress properties[],
940 void* handlePointer )
942 CoreHandle *handle = (CoreHandle *) handlePointer;
943 for ( UInt32 i=0; i<nAddresses; i++ ) {
944 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
945 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
946 handle->xrun[1] = true;
948 handle->xrun[0] = true;
952 return kAudioHardwareNoError;
955 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
956 unsigned int firstChannel, unsigned int sampleRate,
957 RtAudioFormat format, unsigned int *bufferSize,
958 RtAudio::StreamOptions *options )
961 unsigned int nDevices = getDeviceCount();
962 if ( nDevices == 0 ) {
963 // This should not happen because a check is made before this function is called.
964 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
968 if ( device >= nDevices ) {
969 // This should not happen because a check is made before this function is called.
970 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
974 AudioDeviceID deviceList[ nDevices ];
975 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
976 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
977 kAudioObjectPropertyScopeGlobal,
978 kAudioObjectPropertyElementMaster };
979 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
980 0, NULL, &dataSize, (void *) &deviceList );
981 if ( result != noErr ) {
982 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
986 AudioDeviceID id = deviceList[ device ];
988 // Setup for stream mode.
989 bool isInput = false;
990 if ( mode == INPUT ) {
992 property.mScope = kAudioDevicePropertyScopeInput;
995 property.mScope = kAudioDevicePropertyScopeOutput;
997 // Get the stream "configuration".
998 AudioBufferList *bufferList = nil;
1000 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1001 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1002 if ( result != noErr || dataSize == 0 ) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Allocate the AudioBufferList.
1009 bufferList = (AudioBufferList *) malloc( dataSize );
1010 if ( bufferList == NULL ) {
1011 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1015 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1016 if (result != noErr || dataSize == 0) {
1018 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1019 errorText_ = errorStream_.str();
1023 // Search for one or more streams that contain the desired number of
1024 // channels. CoreAudio devices can have an arbitrary number of
1025 // streams and each stream can have an arbitrary number of channels.
1026 // For each stream, a single buffer of interleaved samples is
1027 // provided. RtAudio prefers the use of one stream of interleaved
1028 // data or multiple consecutive single-channel streams. However, we
1029 // now support multiple consecutive multi-channel streams of
1030 // interleaved data as well.
1031 UInt32 iStream, offsetCounter = firstChannel;
1032 UInt32 nStreams = bufferList->mNumberBuffers;
1033 bool monoMode = false;
1034 bool foundStream = false;
1036 // First check that the device supports the requested number of
1038 UInt32 deviceChannels = 0;
1039 for ( iStream=0; iStream<nStreams; iStream++ )
1040 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1042 if ( deviceChannels < ( channels + firstChannel ) ) {
1044 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1045 errorText_ = errorStream_.str();
1049 // Look for a single stream meeting our needs.
1050 UInt32 firstStream = 0, streamCount = 1, streamChannels = 0, channelOffset = 0;
1051 for ( iStream=0; iStream<nStreams; iStream++ ) {
1052 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1053 if ( streamChannels >= channels + offsetCounter ) {
1054 firstStream = iStream;
1055 channelOffset = offsetCounter;
1059 if ( streamChannels > offsetCounter ) break;
1060 offsetCounter -= streamChannels;
1063 // If we didn't find a single stream above, then we should be able
1064 // to meet the channel specification with multiple streams.
1065 if ( foundStream == false ) {
1067 offsetCounter = firstChannel;
1068 for ( iStream=0; iStream<nStreams; iStream++ ) {
1069 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1070 if ( streamChannels > offsetCounter ) break;
1071 offsetCounter -= streamChannels;
1074 firstStream = iStream;
1075 channelOffset = offsetCounter;
1076 Int32 channelCounter = channels + offsetCounter - streamChannels;
1078 if ( streamChannels > 1 ) monoMode = false;
1079 while ( channelCounter > 0 ) {
1080 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1081 if ( streamChannels > 1 ) monoMode = false;
1082 channelCounter -= streamChannels;
1089 // Determine the buffer size.
1090 AudioValueRange bufferRange;
1091 dataSize = sizeof( AudioValueRange );
1092 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1093 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1095 if ( result != noErr ) {
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1097 errorText_ = errorStream_.str();
1101 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1102 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned int) bufferRange.mMaximum;
1103 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned int) bufferRange.mMinimum;
1105 // Set the buffer size. For multiple streams, I'm assuming we only
1106 // need to make this setting for the master channel.
1107 UInt32 theSize = (UInt32) *bufferSize;
1108 dataSize = sizeof( UInt32 );
1109 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1110 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1112 if ( result != noErr ) {
1113 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1114 errorText_ = errorStream_.str();
1118 // If attempting to setup a duplex stream, the bufferSize parameter
1119 // MUST be the same in both directions!
1120 *bufferSize = theSize;
1121 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1123 errorText_ = errorStream_.str();
1127 stream_.bufferSize = *bufferSize;
1128 stream_.nBuffers = 1;
1130 // Try to set "hog" mode ... it's not clear to me this is working.
1131 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1133 dataSize = sizeof( hog_pid );
1134 property.mSelector = kAudioDevicePropertyHogMode;
1135 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1136 if ( result != noErr ) {
1137 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1138 errorText_ = errorStream_.str();
1142 if ( hog_pid != getpid() ) {
1144 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1145 if ( result != noErr ) {
1146 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1147 errorText_ = errorStream_.str();
1153 // Check and if necessary, change the sample rate for the device.
1154 Float64 nominalRate;
1155 dataSize = sizeof( Float64 );
1156 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1157 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1158 if ( result != noErr ) {
1159 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1160 errorText_ = errorStream_.str();
1164 // Only try to change the sample rate if off by more than 1 Hz.
1165 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1167 nominalRate = (Float64) sampleRate;
1168 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1169 if ( result != noErr ) {
1170 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1171 errorText_ = errorStream_.str();
1175 // Now wait until the reported nominal rate is what we just set.
1176 UInt32 microCounter = 0;
1177 Float64 reportedRate = 0.0;
1178 while ( reportedRate != nominalRate ) {
1179 microCounter += 5000;
1180 if ( microCounter > 2000000 ) break;
1182 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &reportedRate );
1185 if ( microCounter > 2000000 ) {
1186 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1187 errorText_ = errorStream_.str();
1192 // Now set the stream format for all streams. Also, check the
1193 // physical format of the device and change that if necessary.
1194 AudioStreamBasicDescription description;
1195 dataSize = sizeof( AudioStreamBasicDescription );
1196 property.mSelector = kAudioStreamPropertyVirtualFormat;
1197 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1198 if ( result != noErr ) {
1199 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1200 errorText_ = errorStream_.str();
1204 // Set the sample rate and data format id. However, only make the
1205 // change if the sample rate is not within 1.0 of the desired
1206 // rate and the format is not linear pcm.
1207 bool updateFormat = false;
1208 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1209 description.mSampleRate = (Float64) sampleRate;
1210 updateFormat = true;
1213 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1214 description.mFormatID = kAudioFormatLinearPCM;
1215 updateFormat = true;
1218 if ( updateFormat ) {
1219 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1220 if ( result != noErr ) {
1221 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1222 errorText_ = errorStream_.str();
1227 // Now check the physical format.
1228 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1229 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1230 if ( result != noErr ) {
1231 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1232 errorText_ = errorStream_.str();
1236 //std::cout << "Current physical stream format:" << std::endl;
1237 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1238 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1239 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1240 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1242 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1243 description.mFormatID = kAudioFormatLinearPCM;
1244 //description.mSampleRate = (Float64) sampleRate;
1245 AudioStreamBasicDescription testDescription = description;
1248 // We'll try higher bit rates first and then work our way down.
1249 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1255 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1257 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1259 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1263 bool setPhysicalFormat = false;
1264 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1265 testDescription = description;
1266 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1267 testDescription.mFormatFlags = physicalFormats[i].second;
1268 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1269 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1271 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1272 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1273 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1274 if ( result == noErr ) {
1275 setPhysicalFormat = true;
1276 //std::cout << "Updated physical stream format:" << std::endl;
1277 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1278 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1279 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1280 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1285 if ( !setPhysicalFormat ) {
1286 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1287 errorText_ = errorStream_.str();
1290 } // done setting virtual/physical formats.
1292 // Get the stream / device latency.
1294 dataSize = sizeof( UInt32 );
1295 property.mSelector = kAudioDevicePropertyLatency;
1296 if ( AudioObjectHasProperty( id, &property ) == true ) {
1297 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1298 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1300 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1301 errorText_ = errorStream_.str();
1302 error( RTAUDIO_WARNING );
1306 // Byte-swapping: According to AudioHardware.h, the stream data will
1307 // always be presented in native-endian format, so we should never
1308 // need to byte swap.
1309 stream_.doByteSwap[mode] = false;
1311 // From the CoreAudio documentation, PCM data must be supplied as
1313 stream_.userFormat = format;
1314 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1316 if ( streamCount == 1 )
1317 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1318 else // multiple streams
1319 stream_.nDeviceChannels[mode] = channels;
1320 stream_.nUserChannels[mode] = channels;
1321 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1322 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1323 else stream_.userInterleaved = true;
1324 stream_.deviceInterleaved[mode] = true;
1325 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1327 // Set flags for buffer conversion.
1328 stream_.doConvertBuffer[mode] = false;
1329 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1332 stream_.doConvertBuffer[mode] = true;
1333 if ( streamCount == 1 ) {
1334 if ( stream_.nUserChannels[mode] > 1 &&
1335 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1336 stream_.doConvertBuffer[mode] = true;
1338 else if ( monoMode && stream_.userInterleaved )
1339 stream_.doConvertBuffer[mode] = true;
1341 // Allocate our CoreHandle structure for the stream.
1342 CoreHandle *handle = 0;
1343 if ( stream_.apiHandle == 0 ) {
1345 handle = new CoreHandle;
1347 catch ( std::bad_alloc& ) {
1348 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1352 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1353 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1356 stream_.apiHandle = (void *) handle;
1359 handle = (CoreHandle *) stream_.apiHandle;
1360 handle->iStream[mode] = firstStream;
1361 handle->nStreams[mode] = streamCount;
1362 handle->id[mode] = id;
1364 // Allocate necessary internal buffers.
1365 unsigned long bufferBytes;
1366 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1367 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, sizeof(char) );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure and property listener per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1433 if ( result != noErr ) {
1434 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting xrun listener for device (" << device << ").";
1435 errorText_ = errorStream_.str();
1438 handle->xrunListenerAdded[mode] = true;
1440 // Setup a listener to detect a possible device disconnect.
1441 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1442 result = AudioObjectAddPropertyListener( id , &property, disconnectListener, (void *) &stream_.callbackInfo );
1443 if ( result != noErr ) {
1444 AudioObjectRemovePropertyListener( id, &property, xrunListener, (void *) handle );
1445 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting disconnect listener for device (" << device << ").";
1446 errorText_ = errorStream_.str();
1449 handle->disconnectListenerAdded[mode] = true;
1455 closeStream(); // this should safely clear out procedures, listeners and memory, even for duplex stream
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RTAUDIO_WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 if ( handle->xrunListenerAdded[0] ) {
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1478 error( RTAUDIO_WARNING );
1481 if ( handle->disconnectListenerAdded[0] ) {
1482 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1483 if (AudioObjectRemovePropertyListener( handle->id[0], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1484 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1485 error( RTAUDIO_WARNING );
1489 if ( stream_.state == STREAM_RUNNING )
1490 AudioDeviceStop( handle->id[0], callbackHandler );
1492 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1493 if ( handle->procId[0] )
1494 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1496 // deprecated in favor of AudioDeviceDestroyIOProcID()
1497 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1502 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1504 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1505 kAudioObjectPropertyScopeGlobal,
1506 kAudioObjectPropertyElementMaster };
1508 if ( handle->xrunListenerAdded[1] ) {
1509 property.mSelector = kAudioDeviceProcessorOverload;
1510 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1511 errorText_ = "RtApiCore::closeStream(): error removing xrun property listener!";
1512 error( RTAUDIO_WARNING );
1516 if ( handle->disconnectListenerAdded[0] ) {
1517 property.mSelector = kAudioDevicePropertyDeviceIsAlive;
1518 if (AudioObjectRemovePropertyListener( handle->id[1], &property, disconnectListener, (void *) &stream_.callbackInfo ) != noErr) {
1519 errorText_ = "RtApiCore::closeStream(): error removing disconnect property listener!";
1520 error( RTAUDIO_WARNING );
1525 if ( stream_.state == STREAM_RUNNING )
1526 AudioDeviceStop( handle->id[1], callbackHandler );
1527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1528 if ( handle->procId[1] )
1529 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1531 // deprecated in favor of AudioDeviceDestroyIOProcID()
1532 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1536 for ( int i=0; i<2; i++ ) {
1537 if ( stream_.userBuffer[i] ) {
1538 free( stream_.userBuffer[i] );
1539 stream_.userBuffer[i] = 0;
1543 if ( stream_.deviceBuffer ) {
1544 free( stream_.deviceBuffer );
1545 stream_.deviceBuffer = 0;
1548 // Destroy pthread condition variable.
1549 pthread_cond_signal( &handle->condition ); // signal condition variable in case stopStream is blocked
1550 pthread_cond_destroy( &handle->condition );
1552 stream_.apiHandle = 0;
1554 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1555 if ( info->deviceDisconnected ) {
1556 errorText_ = "RtApiCore: the stream device was disconnected (and closed)!";
1557 error( RTAUDIO_DEVICE_DISCONNECT );
1561 //stream_.mode = UNINITIALIZED;
1562 //stream_.state = STREAM_CLOSED;
1565 RtAudioErrorType RtApiCore :: startStream( void )
1567 if ( stream_.state != STREAM_STOPPED ) {
1568 if ( stream_.state == STREAM_RUNNING )
1569 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1570 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1571 errorText_ = "RtApiCore::startStream(): the stream is stopping or closed!";
1572 return error( RTAUDIO_WARNING );
1576 #if defined( HAVE_GETTIMEOFDAY )
1577 gettimeofday( &stream_.lastTickTimestamp, NULL );
1581 OSStatus result = noErr;
1582 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1583 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1585 result = AudioDeviceStart( handle->id[0], callbackHandler );
1586 if ( result != noErr ) {
1587 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1588 errorText_ = errorStream_.str();
1593 if ( stream_.mode == INPUT ||
1594 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1596 // Clear user input buffer
1597 unsigned long bufferBytes;
1598 bufferBytes = stream_.nUserChannels[1] * stream_.bufferSize * formatBytes( stream_.userFormat );
1599 memset( stream_.userBuffer[1], 0, bufferBytes * sizeof(char) );
1601 result = AudioDeviceStart( handle->id[1], callbackHandler );
1602 if ( result != noErr ) {
1603 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1604 errorText_ = errorStream_.str();
1609 handle->drainCounter = 0;
1610 handle->internalDrain = false;
1611 stream_.state = STREAM_RUNNING;
1614 if ( result == noErr ) return RTAUDIO_NO_ERROR;
1615 return error( RTAUDIO_SYSTEM_ERROR );
1618 RtAudioErrorType RtApiCore :: stopStream( void )
1620 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
1621 if ( stream_.state == STREAM_STOPPED )
1622 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1623 else if ( stream_.state == STREAM_CLOSED )
1624 errorText_ = "RtApiCore::stopStream(): the stream is closed!";
1625 return error( RTAUDIO_WARNING );
1628 OSStatus result = noErr;
1629 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1630 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1632 if ( handle->drainCounter == 0 ) {
1633 handle->drainCounter = 2;
1634 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1637 result = AudioDeviceStop( handle->id[0], callbackHandler );
1638 if ( result != noErr ) {
1639 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1640 errorText_ = errorStream_.str();
1645 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1647 result = AudioDeviceStop( handle->id[1], callbackHandler );
1648 if ( result != noErr ) {
1649 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1650 errorText_ = errorStream_.str();
1655 stream_.state = STREAM_STOPPED;
1658 if ( result == noErr ) return RTAUDIO_NO_ERROR;
1659 return error( RTAUDIO_SYSTEM_ERROR );
1662 RtAudioErrorType RtApiCore :: abortStream( void )
1664 if ( stream_.state != STREAM_RUNNING ) {
1665 if ( stream_.state == STREAM_STOPPED )
1666 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1667 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
1668 errorText_ = "RtApiCore::abortStream(): the stream is stopping or closed!";
1669 return error( RTAUDIO_WARNING );
1673 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1674 handle->drainCounter = 2;
1676 stream_.state = STREAM_STOPPING;
1677 return stopStream();
1680 // This function will be called by a spawned thread when the user
1681 // callback function signals that the stream should be stopped or
1682 // aborted. It is better to handle it this way because the
1683 // callbackEvent() function probably should return before the AudioDeviceStop()
1684 // function is called.
1685 static void *coreStopStream( void *ptr )
1687 CallbackInfo *info = (CallbackInfo *) ptr;
1688 RtApiCore *object = (RtApiCore *) info->object;
1690 object->stopStream();
1691 pthread_exit( NULL );
1694 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1695 const AudioBufferList *inBufferList,
1696 const AudioBufferList *outBufferList )
1698 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1699 if ( stream_.state == STREAM_CLOSED ) {
1700 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1701 error( RTAUDIO_WARNING );
1705 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1706 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1708 // Check if we were draining the stream and signal is finished.
1709 if ( handle->drainCounter > 3 ) {
1710 ThreadHandle threadId;
1712 stream_.state = STREAM_STOPPING;
1713 if ( handle->internalDrain == true )
1714 pthread_create( &threadId, NULL, coreStopStream, info );
1715 else // external call to stopStream()
1716 pthread_cond_signal( &handle->condition );
1720 AudioDeviceID outputDevice = handle->id[0];
1722 // Invoke user callback to get fresh output data UNLESS we are
1723 // draining stream or duplex mode AND the input/output devices are
1724 // different AND this function is called for the input device.
1725 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1726 RtAudioCallback callback = (RtAudioCallback) info->callback;
1727 double streamTime = getStreamTime();
1728 RtAudioStreamStatus status = 0;
1729 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1730 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1731 handle->xrun[0] = false;
1733 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1734 status |= RTAUDIO_INPUT_OVERFLOW;
1735 handle->xrun[1] = false;
1738 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1739 stream_.bufferSize, streamTime, status, info->userData );
1740 if ( cbReturnValue == 2 ) {
1744 else if ( cbReturnValue == 1 ) {
1745 handle->drainCounter = 1;
1746 handle->internalDrain = true;
1750 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1752 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1754 if ( handle->nStreams[0] == 1 ) {
1755 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1757 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1759 else { // fill multiple streams with zeros
1760 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1761 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1763 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1767 else if ( handle->nStreams[0] == 1 ) {
1768 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1769 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1770 stream_.userBuffer[0], stream_.convertInfo[0] );
1772 else { // copy from user buffer
1773 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1774 stream_.userBuffer[0],
1775 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1778 else { // fill multiple streams
1779 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1780 if ( stream_.doConvertBuffer[0] ) {
1781 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1782 inBuffer = (Float32 *) stream_.deviceBuffer;
1785 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1786 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1787 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1788 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1789 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1792 else { // fill multiple multi-channel streams with interleaved data
1793 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1796 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1797 UInt32 inChannels = stream_.nUserChannels[0];
1798 if ( stream_.doConvertBuffer[0] ) {
1799 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1800 inChannels = stream_.nDeviceChannels[0];
1803 if ( inInterleaved ) inOffset = 1;
1804 else inOffset = stream_.bufferSize;
1806 channelsLeft = inChannels;
1807 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1809 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1810 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1813 // Account for possible channel offset in first stream
1814 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1815 streamChannels -= stream_.channelOffset[0];
1816 outJump = stream_.channelOffset[0];
1820 // Account for possible unfilled channels at end of the last stream
1821 if ( streamChannels > channelsLeft ) {
1822 outJump = streamChannels - channelsLeft;
1823 streamChannels = channelsLeft;
1826 // Determine input buffer offsets and skips
1827 if ( inInterleaved ) {
1828 inJump = inChannels;
1829 in += inChannels - channelsLeft;
1833 in += (inChannels - channelsLeft) * inOffset;
1836 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1837 for ( unsigned int j=0; j<streamChannels; j++ ) {
1838 *out++ = in[j*inOffset];
1843 channelsLeft -= streamChannels;
1849 // Don't bother draining input
1850 if ( handle->drainCounter ) {
1851 handle->drainCounter++;
1855 AudioDeviceID inputDevice;
1856 inputDevice = handle->id[1];
1857 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1859 if ( handle->nStreams[1] == 1 ) {
1860 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1861 convertBuffer( stream_.userBuffer[1],
1862 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1863 stream_.convertInfo[1] );
1865 else { // copy to user buffer
1866 memcpy( stream_.userBuffer[1],
1867 inBufferList->mBuffers[handle->iStream[1]].mData,
1868 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1871 else { // read from multiple streams
1872 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1873 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1875 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1876 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1877 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1878 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1879 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1882 else { // read from multiple multi-channel streams
1883 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1886 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1887 UInt32 outChannels = stream_.nUserChannels[1];
1888 if ( stream_.doConvertBuffer[1] ) {
1889 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1890 outChannels = stream_.nDeviceChannels[1];
1893 if ( outInterleaved ) outOffset = 1;
1894 else outOffset = stream_.bufferSize;
1896 channelsLeft = outChannels;
1897 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1899 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1900 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1903 // Account for possible channel offset in first stream
1904 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1905 streamChannels -= stream_.channelOffset[1];
1906 inJump = stream_.channelOffset[1];
1910 // Account for possible unread channels at end of the last stream
1911 if ( streamChannels > channelsLeft ) {
1912 inJump = streamChannels - channelsLeft;
1913 streamChannels = channelsLeft;
1916 // Determine output buffer offsets and skips
1917 if ( outInterleaved ) {
1918 outJump = outChannels;
1919 out += outChannels - channelsLeft;
1923 out += (outChannels - channelsLeft) * outOffset;
1926 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1927 for ( unsigned int j=0; j<streamChannels; j++ ) {
1928 out[j*outOffset] = *in++;
1933 channelsLeft -= streamChannels;
1937 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1938 convertBuffer( stream_.userBuffer[1],
1939 stream_.deviceBuffer,
1940 stream_.convertInfo[1] );
1947 // Make sure to only tick duplex stream time once if using two devices
1948 if ( stream_.mode == DUPLEX ) {
1949 if ( handle->id[0] == handle->id[1] ) // same device, only one callback
1950 RtApi::tickStreamTime();
1951 else if ( deviceId == handle->id[0] )
1952 RtApi::tickStreamTime(); // two devices, only tick on the output callback
1954 RtApi::tickStreamTime(); // input or output stream only
1959 const char* RtApiCore :: getErrorCode( OSStatus code )
1963 case kAudioHardwareNotRunningError:
1964 return "kAudioHardwareNotRunningError";
1966 case kAudioHardwareUnspecifiedError:
1967 return "kAudioHardwareUnspecifiedError";
1969 case kAudioHardwareUnknownPropertyError:
1970 return "kAudioHardwareUnknownPropertyError";
1972 case kAudioHardwareBadPropertySizeError:
1973 return "kAudioHardwareBadPropertySizeError";
1975 case kAudioHardwareIllegalOperationError:
1976 return "kAudioHardwareIllegalOperationError";
1978 case kAudioHardwareBadObjectError:
1979 return "kAudioHardwareBadObjectError";
1981 case kAudioHardwareBadDeviceError:
1982 return "kAudioHardwareBadDeviceError";
1984 case kAudioHardwareBadStreamError:
1985 return "kAudioHardwareBadStreamError";
1987 case kAudioHardwareUnsupportedOperationError:
1988 return "kAudioHardwareUnsupportedOperationError";
1990 case kAudioDeviceUnsupportedFormatError:
1991 return "kAudioDeviceUnsupportedFormatError";
1993 case kAudioDevicePermissionsError:
1994 return "kAudioDevicePermissionsError";
1997 return "CoreAudio unknown error";
2001 //******************** End of __MACOSX_CORE__ *********************//
2004 #if defined(__UNIX_JACK__)
2006 // JACK is a low-latency audio server, originally written for the
2007 // GNU/Linux operating system and now also ported to OS-X and
2008 // Windows. It can connect a number of different applications to an
2009 // audio device, as well as allowing them to share audio between
2012 // When using JACK with RtAudio, "devices" refer to JACK clients that
2013 // have ports connected to the server. The JACK server is typically
2014 // started in a terminal as follows:
2016 // .jackd -d alsa -d hw:0
2018 // or through an interface program such as qjackctl. Many of the
2019 // parameters normally set for a stream are fixed by the JACK server
2020 // and can be specified when the JACK server is started. In
2023 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2025 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2026 // frames, and number of buffers = 4. Once the server is running, it
2027 // is not possible to override these values. If the values are not
2028 // specified in the command-line, the JACK server uses default values.
2030 // The JACK server does not have to be running when an instance of
2031 // RtApiJack is created, though the function getDeviceCount() will
2032 // report 0 devices found until JACK has been started. When no
2033 // devices are available (i.e., the JACK server is not running), a
2034 // stream cannot be opened.
2036 #include <jack/jack.h>
2040 // A structure to hold various information related to the Jack API
2043 jack_client_t *client;
2044 jack_port_t **ports[2];
2045 std::string deviceName[2];
2047 pthread_cond_t condition;
2048 int drainCounter; // Tracks callback counts when draining
2049 bool internalDrain; // Indicates if stop is initiated from callback or not.
2052 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2055 #if !defined(__RTAUDIO_DEBUG__)
2056 static void jackSilentError( const char * ) {};
2059 RtApiJack :: RtApiJack()
2060 :shouldAutoconnect_(true) {
2061 // Nothing to do here.
2062 #if !defined(__RTAUDIO_DEBUG__)
2063 // Turn off Jack's internal error reporting.
2064 jack_set_error_function( &jackSilentError );
2068 RtApiJack :: ~RtApiJack()
2070 if ( stream_.state != STREAM_CLOSED ) closeStream();
2073 unsigned int RtApiJack :: getDeviceCount( void )
2075 // See if we can become a jack client.
2076 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2077 jack_status_t *status = NULL;
2078 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2079 if ( client == 0 ) return 0;
2082 std::string port, previousPort;
2083 unsigned int nChannels = 0, nDevices = 0;
2084 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2086 // Parse the port names up to the first colon (:).
2089 port = (char *) ports[ nChannels ];
2090 iColon = port.find(":");
2091 if ( iColon != std::string::npos ) {
2092 port = port.substr( 0, iColon + 1 );
2093 if ( port != previousPort ) {
2095 previousPort = port;
2098 } while ( ports[++nChannels] );
2102 jack_client_close( client );
2106 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2108 RtAudio::DeviceInfo info;
2109 info.probed = false;
2111 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2112 jack_status_t *status = NULL;
2113 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2114 if ( client == 0 ) {
2115 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2116 error( RTAUDIO_WARNING );
2121 std::string port, previousPort;
2122 unsigned int nPorts = 0, nDevices = 0;
2123 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2125 // Parse the port names up to the first colon (:).
2128 port = (char *) ports[ nPorts ];
2129 iColon = port.find(":");
2130 if ( iColon != std::string::npos ) {
2131 port = port.substr( 0, iColon );
2132 if ( port != previousPort ) {
2133 if ( nDevices == device ) info.name = port;
2135 previousPort = port;
2138 } while ( ports[++nPorts] );
2142 if ( device >= nDevices ) {
2143 jack_client_close( client );
2144 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2145 error( RTAUDIO_INVALID_USE );
2149 // Get the current jack server sample rate.
2150 info.sampleRates.clear();
2152 info.preferredSampleRate = jack_get_sample_rate( client );
2153 info.sampleRates.push_back( info.preferredSampleRate );
2155 // Count the available ports containing the client name as device
2156 // channels. Jack "input ports" equal RtAudio output channels.
2157 unsigned int nChannels = 0;
2158 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2160 while ( ports[ nChannels ] ) nChannels++;
2162 info.outputChannels = nChannels;
2165 // Jack "output ports" equal RtAudio input channels.
2167 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2169 while ( ports[ nChannels ] ) nChannels++;
2171 info.inputChannels = nChannels;
2174 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2175 jack_client_close(client);
2176 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2177 error( RTAUDIO_WARNING );
2181 // If device opens for both playback and capture, we determine the channels.
2182 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2183 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2185 // Jack always uses 32-bit floats.
2186 info.nativeFormats = RTAUDIO_FLOAT32;
2188 // Jack doesn't provide default devices so we'll use the first available one.
2189 if ( device == 0 && info.outputChannels > 0 )
2190 info.isDefaultOutput = true;
2191 if ( device == 0 && info.inputChannels > 0 )
2192 info.isDefaultInput = true;
2194 jack_client_close(client);
2199 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2201 CallbackInfo *info = (CallbackInfo *) infoPointer;
2203 RtApiJack *object = (RtApiJack *) info->object;
2204 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2209 // This function will be called by a spawned thread when the Jack
2210 // server signals that it is shutting down. It is necessary to handle
2211 // it this way because the jackShutdown() function must return before
2212 // the jack_deactivate() function (in closeStream()) will return.
2213 static void *jackCloseStream( void *ptr )
2215 CallbackInfo *info = (CallbackInfo *) ptr;
2216 RtApiJack *object = (RtApiJack *) info->object;
2218 info->deviceDisconnected = true;
2219 object->closeStream();
2220 pthread_exit( NULL );
2223 static void jackShutdown( void *infoPointer )
2225 CallbackInfo *info = (CallbackInfo *) infoPointer;
2226 RtApiJack *object = (RtApiJack *) info->object;
2228 // Check current stream state. If stopped, then we'll assume this
2229 // was called as a result of a call to RtApiJack::stopStream (the
2230 // deactivation of a client handle causes this function to be called).
2231 // If not, we'll assume the Jack server is shutting down or some
2232 // other problem occurred and we should close the stream.
2233 if ( object->isStreamRunning() == false ) return;
2235 ThreadHandle threadId;
2236 pthread_create( &threadId, NULL, jackCloseStream, info );
2239 static int jackXrun( void *infoPointer )
2241 JackHandle *handle = *((JackHandle **) infoPointer);
2243 if ( handle->ports[0] ) handle->xrun[0] = true;
2244 if ( handle->ports[1] ) handle->xrun[1] = true;
2249 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2250 unsigned int firstChannel, unsigned int sampleRate,
2251 RtAudioFormat format, unsigned int *bufferSize,
2252 RtAudio::StreamOptions *options )
2254 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2256 // Look for jack server and try to become a client (only do once per stream).
2257 jack_client_t *client = 0;
2258 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2259 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2260 jack_status_t *status = NULL;
2261 if ( options && !options->streamName.empty() )
2262 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2264 client = jack_client_open( "RtApiJack", jackoptions, status );
2265 if ( client == 0 ) {
2266 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2267 error( RTAUDIO_WARNING );
2272 // The handle must have been created on an earlier pass.
2273 client = handle->client;
2277 std::string port, previousPort, deviceName;
2278 unsigned int nPorts = 0, nDevices = 0;
2279 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2281 // Parse the port names up to the first colon (:).
2284 port = (char *) ports[ nPorts ];
2285 iColon = port.find(":");
2286 if ( iColon != std::string::npos ) {
2287 port = port.substr( 0, iColon );
2288 if ( port != previousPort ) {
2289 if ( nDevices == device ) deviceName = port;
2291 previousPort = port;
2294 } while ( ports[++nPorts] );
2298 if ( device >= nDevices ) {
2299 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2303 unsigned long flag = JackPortIsInput;
2304 if ( mode == INPUT ) flag = JackPortIsOutput;
2306 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2307 // Count the available ports containing the client name as device
2308 // channels. Jack "input ports" equal RtAudio output channels.
2309 unsigned int nChannels = 0;
2310 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2312 while ( ports[ nChannels ] ) nChannels++;
2315 // Compare the jack ports for specified client to the requested number of channels.
2316 if ( nChannels < (channels + firstChannel) ) {
2317 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2318 errorText_ = errorStream_.str();
2323 // Check the jack server sample rate.
2324 unsigned int jackRate = jack_get_sample_rate( client );
2325 if ( sampleRate != jackRate ) {
2326 jack_client_close( client );
2327 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2328 errorText_ = errorStream_.str();
2331 stream_.sampleRate = jackRate;
2333 // Get the latency of the JACK port.
2334 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2335 if ( ports[ firstChannel ] ) {
2337 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2338 // the range (usually the min and max are equal)
2339 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2340 // get the latency range
2341 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2342 // be optimistic, use the min!
2343 stream_.latency[mode] = latrange.min;
2344 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2348 // The jack server always uses 32-bit floating-point data.
2349 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2350 stream_.userFormat = format;
2352 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2353 else stream_.userInterleaved = true;
2355 // Jack always uses non-interleaved buffers.
2356 stream_.deviceInterleaved[mode] = false;
2358 // Jack always provides host byte-ordered data.
2359 stream_.doByteSwap[mode] = false;
2361 // Get the buffer size. The buffer size and number of buffers
2362 // (periods) is set when the jack server is started.
2363 stream_.bufferSize = (int) jack_get_buffer_size( client );
2364 *bufferSize = stream_.bufferSize;
2366 stream_.nDeviceChannels[mode] = channels;
2367 stream_.nUserChannels[mode] = channels;
2369 // Set flags for buffer conversion.
2370 stream_.doConvertBuffer[mode] = false;
2371 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2372 stream_.doConvertBuffer[mode] = true;
2373 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2374 stream_.nUserChannels[mode] > 1 )
2375 stream_.doConvertBuffer[mode] = true;
2377 // Allocate our JackHandle structure for the stream.
2378 if ( handle == 0 ) {
2380 handle = new JackHandle;
2382 catch ( std::bad_alloc& ) {
2383 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2387 if ( pthread_cond_init(&handle->condition, NULL) ) {
2388 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2391 stream_.apiHandle = (void *) handle;
2392 handle->client = client;
2394 handle->deviceName[mode] = deviceName;
2396 // Allocate necessary internal buffers.
2397 unsigned long bufferBytes;
2398 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2399 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2400 if ( stream_.userBuffer[mode] == NULL ) {
2401 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2405 if ( stream_.doConvertBuffer[mode] ) {
2407 bool makeBuffer = true;
2408 if ( mode == OUTPUT )
2409 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2410 else { // mode == INPUT
2411 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2412 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2413 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2414 if ( bufferBytes < bytesOut ) makeBuffer = false;
2419 bufferBytes *= *bufferSize;
2420 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2421 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2422 if ( stream_.deviceBuffer == NULL ) {
2423 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2429 // Allocate memory for the Jack ports (channels) identifiers.
2430 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2431 if ( handle->ports[mode] == NULL ) {
2432 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2436 stream_.device[mode] = device;
2437 stream_.channelOffset[mode] = firstChannel;
2438 stream_.state = STREAM_STOPPED;
2439 stream_.callbackInfo.object = (void *) this;
2441 if ( stream_.mode == OUTPUT && mode == INPUT )
2442 // We had already set up the stream for output.
2443 stream_.mode = DUPLEX;
2445 stream_.mode = mode;
2446 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2447 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2448 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2451 // Register our ports.
2453 if ( mode == OUTPUT ) {
2454 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2455 snprintf( label, 64, "outport %d", i );
2456 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2457 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2461 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2462 snprintf( label, 64, "inport %d", i );
2463 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2464 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2468 // Setup the buffer conversion information structure. We don't use
2469 // buffers to do channel offsets, so we override that parameter
2471 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2473 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2479 pthread_cond_destroy( &handle->condition );
2480 jack_client_close( handle->client );
2482 if ( handle->ports[0] ) free( handle->ports[0] );
2483 if ( handle->ports[1] ) free( handle->ports[1] );
2486 stream_.apiHandle = 0;
2489 for ( int i=0; i<2; i++ ) {
2490 if ( stream_.userBuffer[i] ) {
2491 free( stream_.userBuffer[i] );
2492 stream_.userBuffer[i] = 0;
2496 if ( stream_.deviceBuffer ) {
2497 free( stream_.deviceBuffer );
2498 stream_.deviceBuffer = 0;
2504 void RtApiJack :: closeStream( void )
2506 if ( stream_.state == STREAM_CLOSED ) {
2507 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2508 error( RTAUDIO_WARNING );
2512 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2514 if ( stream_.state == STREAM_RUNNING )
2515 jack_deactivate( handle->client );
2517 jack_client_close( handle->client );
2521 if ( handle->ports[0] ) free( handle->ports[0] );
2522 if ( handle->ports[1] ) free( handle->ports[1] );
2523 pthread_cond_destroy( &handle->condition );
2525 stream_.apiHandle = 0;
2528 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2529 if ( info->deviceDisconnected ) {
2530 errorText_ = "RtApiJack: the Jack server is shutting down this client ... stream stopped and closed!";
2531 error( RTAUDIO_DEVICE_DISCONNECT );
2534 for ( int i=0; i<2; i++ ) {
2535 if ( stream_.userBuffer[i] ) {
2536 free( stream_.userBuffer[i] );
2537 stream_.userBuffer[i] = 0;
2541 if ( stream_.deviceBuffer ) {
2542 free( stream_.deviceBuffer );
2543 stream_.deviceBuffer = 0;
2547 //stream_.mode = UNINITIALIZED;
2548 //stream_.state = STREAM_CLOSED;
2551 RtAudioErrorType RtApiJack :: startStream( void )
2553 if ( stream_.state != STREAM_STOPPED ) {
2554 if ( stream_.state == STREAM_RUNNING )
2555 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2556 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
2557 errorText_ = "RtApiJack::startStream(): the stream is stopping or closed!";
2558 return error( RTAUDIO_WARNING );
2562 #if defined( HAVE_GETTIMEOFDAY )
2563 gettimeofday( &stream_.lastTickTimestamp, NULL );
2567 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2568 int result = jack_activate( handle->client );
2570 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2576 // Get the list of available ports.
2577 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2579 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2580 if ( ports == NULL) {
2581 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2585 // Now make the port connections. Since RtAudio wasn't designed to
2586 // allow the user to select particular channels of a device, we'll
2587 // just open the first "nChannels" ports with offset.
2588 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2590 if ( ports[ stream_.channelOffset[0] + i ] )
2591 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2594 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2601 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2603 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2604 if ( ports == NULL) {
2605 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2609 // Now make the port connections. See note above.
2610 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2612 if ( ports[ stream_.channelOffset[1] + i ] )
2613 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2616 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2623 handle->drainCounter = 0;
2624 handle->internalDrain = false;
2625 stream_.state = STREAM_RUNNING;
2628 if ( result == 0 ) return RTAUDIO_NO_ERROR;
2629 return error( RTAUDIO_SYSTEM_ERROR );
2632 RtAudioErrorType RtApiJack :: stopStream( void )
2634 if ( stream_.state != STREAM_RUNNING && stream_.state != STREAM_STOPPING ) {
2635 if ( stream_.state == STREAM_STOPPED )
2636 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2637 else if ( stream_.state == STREAM_CLOSED )
2638 errorText_ = "RtApiJack::stopStream(): the stream is closed!";
2639 return error( RTAUDIO_WARNING );
2642 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2643 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2645 if ( handle->drainCounter == 0 ) {
2646 handle->drainCounter = 2;
2647 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2651 jack_deactivate( handle->client );
2652 stream_.state = STREAM_STOPPED;
2653 return RTAUDIO_NO_ERROR;
2656 RtAudioErrorType RtApiJack :: abortStream( void )
2658 if ( stream_.state != STREAM_RUNNING ) {
2659 if ( stream_.state == STREAM_STOPPED )
2660 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2661 else if ( stream_.state == STREAM_STOPPING || stream_.state == STREAM_CLOSED )
2662 errorText_ = "RtApiJack::abortStream(): the stream is stopping or closed!";
2663 return error( RTAUDIO_WARNING );
2666 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2667 handle->drainCounter = 2;
2669 return stopStream();
2672 // This function will be called by a spawned thread when the user
2673 // callback function signals that the stream should be stopped or
2674 // aborted. It is necessary to handle it this way because the
2675 // callbackEvent() function must return before the jack_deactivate()
2676 // function will return.
2677 static void *jackStopStream( void *ptr )
2679 CallbackInfo *info = (CallbackInfo *) ptr;
2680 RtApiJack *object = (RtApiJack *) info->object;
2682 object->stopStream();
2683 pthread_exit( NULL );
2686 bool RtApiJack :: callbackEvent( unsigned long nframes )
2688 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2689 if ( stream_.state == STREAM_CLOSED ) {
2690 errorText_ = "RtApiJack::callbackEvent(): the stream is closed ... this shouldn't happen!";
2691 error( RTAUDIO_WARNING );
2694 if ( stream_.bufferSize != nframes ) {
2695 errorText_ = "RtApiJack::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2696 error( RTAUDIO_WARNING );
2700 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2701 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2703 // Check if we were draining the stream and signal is finished.
2704 if ( handle->drainCounter > 3 ) {
2705 ThreadHandle threadId;
2707 stream_.state = STREAM_STOPPING;
2708 if ( handle->internalDrain == true )
2709 pthread_create( &threadId, NULL, jackStopStream, info );
2710 else // external call to stopStream()
2711 pthread_cond_signal( &handle->condition );
2715 // Invoke user callback first, to get fresh output data.
2716 if ( handle->drainCounter == 0 ) {
2717 RtAudioCallback callback = (RtAudioCallback) info->callback;
2718 double streamTime = getStreamTime();
2719 RtAudioStreamStatus status = 0;
2720 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2721 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2722 handle->xrun[0] = false;
2724 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2725 status |= RTAUDIO_INPUT_OVERFLOW;
2726 handle->xrun[1] = false;
2728 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2729 stream_.bufferSize, streamTime, status, info->userData );
2730 if ( cbReturnValue == 2 ) {
2731 stream_.state = STREAM_STOPPING;
2732 handle->drainCounter = 2;
2734 pthread_create( &id, NULL, jackStopStream, info );
2737 else if ( cbReturnValue == 1 ) {
2738 handle->drainCounter = 1;
2739 handle->internalDrain = true;
2743 jack_default_audio_sample_t *jackbuffer;
2744 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2745 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2747 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2749 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2750 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2751 memset( jackbuffer, 0, bufferBytes );
2755 else if ( stream_.doConvertBuffer[0] ) {
2757 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2759 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2760 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2761 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2764 else { // no buffer conversion
2765 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2766 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2767 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2772 // Don't bother draining input
2773 if ( handle->drainCounter ) {
2774 handle->drainCounter++;
2778 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2780 if ( stream_.doConvertBuffer[1] ) {
2781 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2782 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2783 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2785 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2787 else { // no buffer conversion
2788 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2789 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2790 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2796 RtApi::tickStreamTime();
2799 //******************** End of __UNIX_JACK__ *********************//
2802 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2804 // The ASIO API is designed around a callback scheme, so this
2805 // implementation is similar to that used for OS-X CoreAudio and Linux
2806 // Jack. The primary constraint with ASIO is that it only allows
2807 // access to a single driver at a time. Thus, it is not possible to
2808 // have more than one simultaneous RtAudio stream.
2810 // This implementation also requires a number of external ASIO files
2811 // and a few global variables. The ASIO callback scheme does not
2812 // allow for the passing of user data, so we must create a global
2813 // pointer to our callbackInfo structure.
2815 // On unix systems, we make use of a pthread condition variable.
2816 // Since there is no equivalent in Windows, I hacked something based
2817 // on information found in
2818 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2820 #include "asiosys.h"
2822 #include "iasiothiscallresolver.h"
2823 #include "asiodrivers.h"
2826 static AsioDrivers drivers;
2827 static ASIOCallbacks asioCallbacks;
2828 static ASIODriverInfo driverInfo;
2829 static CallbackInfo *asioCallbackInfo;
2830 static bool asioXRun;
2833 int drainCounter; // Tracks callback counts when draining
2834 bool internalDrain; // Indicates if stop is initiated from callback or not.
2835 ASIOBufferInfo *bufferInfos;
2839 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2842 // Function declarations (definitions at end of section)
2843 static const char* getAsioErrorString( ASIOError result );
2844 static void sampleRateChanged( ASIOSampleRate sRate );
2845 static long asioMessages( long selector, long value, void* message, double* opt );
2847 RtApiAsio :: RtApiAsio()
2849 // ASIO cannot run on a multi-threaded appartment. You can call
2850 // CoInitialize beforehand, but it must be for appartment threading
2851 // (in which case, CoInitilialize will return S_FALSE here).
2852 coInitialized_ = false;
2853 HRESULT hr = CoInitialize( NULL );
2855 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2856 error( RtAudioError::WARNING );
2858 coInitialized_ = true;
2860 drivers.removeCurrentDriver();
2861 driverInfo.asioVersion = 2;
2863 // See note in DirectSound implementation about GetDesktopWindow().
2864 driverInfo.sysRef = GetForegroundWindow();
2867 RtApiAsio :: ~RtApiAsio()
2869 if ( stream_.state != STREAM_CLOSED ) closeStream();
2870 if ( coInitialized_ ) CoUninitialize();
2873 unsigned int RtApiAsio :: getDeviceCount( void )
2875 return (unsigned int) drivers.asioGetNumDev();
2878 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2880 RtAudio::DeviceInfo info;
2881 info.probed = false;
2884 unsigned int nDevices = getDeviceCount();
2885 if ( nDevices == 0 ) {
2886 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2887 error( RtAudioError::INVALID_USE );
2891 if ( device >= nDevices ) {
2892 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2893 error( RtAudioError::INVALID_USE );
2897 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2898 if ( stream_.state != STREAM_CLOSED ) {
2899 if ( device >= devices_.size() ) {
2900 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2901 error( RtAudioError::WARNING );
2904 return devices_[ device ];
2907 char driverName[32];
2908 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2909 if ( result != ASE_OK ) {
2910 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2911 errorText_ = errorStream_.str();
2912 error( RtAudioError::WARNING );
2916 info.name = driverName;
2918 if ( !drivers.loadDriver( driverName ) ) {
2919 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2920 errorText_ = errorStream_.str();
2921 error( RtAudioError::WARNING );
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2929 error( RtAudioError::WARNING );
2933 // Determine the device channel information.
2934 long inputChannels, outputChannels;
2935 result = ASIOGetChannels( &inputChannels, &outputChannels );
2936 if ( result != ASE_OK ) {
2937 drivers.removeCurrentDriver();
2938 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2939 errorText_ = errorStream_.str();
2940 error( RtAudioError::WARNING );
2944 info.outputChannels = outputChannels;
2945 info.inputChannels = inputChannels;
2946 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2947 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2949 // Determine the supported sample rates.
2950 info.sampleRates.clear();
2951 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2952 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2953 if ( result == ASE_OK ) {
2954 info.sampleRates.push_back( SAMPLE_RATES[i] );
2956 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2957 info.preferredSampleRate = SAMPLE_RATES[i];
2961 // Determine supported data types ... just check first channel and assume rest are the same.
2962 ASIOChannelInfo channelInfo;
2963 channelInfo.channel = 0;
2964 channelInfo.isInput = true;
2965 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2966 result = ASIOGetChannelInfo( &channelInfo );
2967 if ( result != ASE_OK ) {
2968 drivers.removeCurrentDriver();
2969 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2970 errorText_ = errorStream_.str();
2971 error( RtAudioError::WARNING );
2975 info.nativeFormats = 0;
2976 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2977 info.nativeFormats |= RTAUDIO_SINT16;
2978 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2979 info.nativeFormats |= RTAUDIO_SINT32;
2980 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2981 info.nativeFormats |= RTAUDIO_FLOAT32;
2982 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2983 info.nativeFormats |= RTAUDIO_FLOAT64;
2984 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2985 info.nativeFormats |= RTAUDIO_SINT24;
2987 if ( info.outputChannels > 0 )
2988 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2989 if ( info.inputChannels > 0 )
2990 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2993 drivers.removeCurrentDriver();
2997 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2999 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
3000 object->callbackEvent( index );
3003 void RtApiAsio :: saveDeviceInfo( void )
3007 unsigned int nDevices = getDeviceCount();
3008 devices_.resize( nDevices );
3009 for ( unsigned int i=0; i<nDevices; i++ )
3010 devices_[i] = getDeviceInfo( i );
3013 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3014 unsigned int firstChannel, unsigned int sampleRate,
3015 RtAudioFormat format, unsigned int *bufferSize,
3016 RtAudio::StreamOptions *options )
3017 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3019 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3021 // For ASIO, a duplex stream MUST use the same driver.
3022 if ( isDuplexInput && stream_.device[0] != device ) {
3023 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3027 char driverName[32];
3028 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3029 if ( result != ASE_OK ) {
3030 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3031 errorText_ = errorStream_.str();
3035 // Only load the driver once for duplex stream.
3036 if ( !isDuplexInput ) {
3037 // The getDeviceInfo() function will not work when a stream is open
3038 // because ASIO does not allow multiple devices to run at the same
3039 // time. Thus, we'll probe the system before opening a stream and
3040 // save the results for use by getDeviceInfo().
3041 this->saveDeviceInfo();
3043 if ( !drivers.loadDriver( driverName ) ) {
3044 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3045 errorText_ = errorStream_.str();
3049 result = ASIOInit( &driverInfo );
3050 if ( result != ASE_OK ) {
3051 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3052 errorText_ = errorStream_.str();
3057 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3058 bool buffersAllocated = false;
3059 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3060 unsigned int nChannels;
3063 // Check the device channel count.
3064 long inputChannels, outputChannels;
3065 result = ASIOGetChannels( &inputChannels, &outputChannels );
3066 if ( result != ASE_OK ) {
3067 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3068 errorText_ = errorStream_.str();
3072 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3073 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3074 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3075 errorText_ = errorStream_.str();
3078 stream_.nDeviceChannels[mode] = channels;
3079 stream_.nUserChannels[mode] = channels;
3080 stream_.channelOffset[mode] = firstChannel;
3082 // Verify the sample rate is supported.
3083 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3084 if ( result != ASE_OK ) {
3085 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3086 errorText_ = errorStream_.str();
3090 // Get the current sample rate
3091 ASIOSampleRate currentRate;
3092 result = ASIOGetSampleRate( ¤tRate );
3093 if ( result != ASE_OK ) {
3094 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3095 errorText_ = errorStream_.str();
3099 // Set the sample rate only if necessary
3100 if ( currentRate != sampleRate ) {
3101 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3102 if ( result != ASE_OK ) {
3103 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3104 errorText_ = errorStream_.str();
3109 // Determine the driver data type.
3110 ASIOChannelInfo channelInfo;
3111 channelInfo.channel = 0;
3112 if ( mode == OUTPUT ) channelInfo.isInput = false;
3113 else channelInfo.isInput = true;
3114 result = ASIOGetChannelInfo( &channelInfo );
3115 if ( result != ASE_OK ) {
3116 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3117 errorText_ = errorStream_.str();
3121 // Assuming WINDOWS host is always little-endian.
3122 stream_.doByteSwap[mode] = false;
3123 stream_.userFormat = format;
3124 stream_.deviceFormat[mode] = 0;
3125 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3126 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3127 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3129 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3130 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3131 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3133 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3134 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3135 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3137 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3138 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3139 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3141 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3142 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3143 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3146 if ( stream_.deviceFormat[mode] == 0 ) {
3147 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3148 errorText_ = errorStream_.str();
3152 // Set the buffer size. For a duplex stream, this will end up
3153 // setting the buffer size based on the input constraints, which
3155 long minSize, maxSize, preferSize, granularity;
3156 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3157 if ( result != ASE_OK ) {
3158 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3159 errorText_ = errorStream_.str();
3163 if ( isDuplexInput ) {
3164 // When this is the duplex input (output was opened before), then we have to use the same
3165 // buffersize as the output, because it might use the preferred buffer size, which most
3166 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3167 // So instead of throwing an error, make them equal. The caller uses the reference
3168 // to the "bufferSize" param as usual to set up processing buffers.
3170 *bufferSize = stream_.bufferSize;
3173 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3174 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3175 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3176 else if ( granularity == -1 ) {
3177 // Make sure bufferSize is a power of two.
3178 int log2_of_min_size = 0;
3179 int log2_of_max_size = 0;
3181 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3182 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3183 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3186 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3187 int min_delta_num = log2_of_min_size;
3189 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3190 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3191 if (current_delta < min_delta) {
3192 min_delta = current_delta;
3197 *bufferSize = ( (unsigned int)1 << min_delta_num );
3198 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3199 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3201 else if ( granularity != 0 ) {
3202 // Set to an even multiple of granularity, rounding up.
3203 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3208 // we don't use it anymore, see above!
3209 // Just left it here for the case...
3210 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3211 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3216 stream_.bufferSize = *bufferSize;
3217 stream_.nBuffers = 2;
3219 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3220 else stream_.userInterleaved = true;
3222 // ASIO always uses non-interleaved buffers.
3223 stream_.deviceInterleaved[mode] = false;
3225 // Allocate, if necessary, our AsioHandle structure for the stream.
3226 if ( handle == 0 ) {
3228 handle = new AsioHandle;
3230 catch ( std::bad_alloc& ) {
3231 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3234 handle->bufferInfos = 0;
3236 // Create a manual-reset event.
3237 handle->condition = CreateEvent( NULL, // no security
3238 TRUE, // manual-reset
3239 FALSE, // non-signaled initially
3241 stream_.apiHandle = (void *) handle;
3244 // Create the ASIO internal buffers. Since RtAudio sets up input
3245 // and output separately, we'll have to dispose of previously
3246 // created output buffers for a duplex stream.
3247 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3248 ASIODisposeBuffers();
3249 if ( handle->bufferInfos ) free( handle->bufferInfos );
3252 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3254 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3255 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3256 if ( handle->bufferInfos == NULL ) {
3257 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3258 errorText_ = errorStream_.str();
3262 ASIOBufferInfo *infos;
3263 infos = handle->bufferInfos;
3264 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3265 infos->isInput = ASIOFalse;
3266 infos->channelNum = i + stream_.channelOffset[0];
3267 infos->buffers[0] = infos->buffers[1] = 0;
3269 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3270 infos->isInput = ASIOTrue;
3271 infos->channelNum = i + stream_.channelOffset[1];
3272 infos->buffers[0] = infos->buffers[1] = 0;
3275 // prepare for callbacks
3276 stream_.sampleRate = sampleRate;
3277 stream_.device[mode] = device;
3278 stream_.mode = isDuplexInput ? DUPLEX : mode;
3280 // store this class instance before registering callbacks, that are going to use it
3281 asioCallbackInfo = &stream_.callbackInfo;
3282 stream_.callbackInfo.object = (void *) this;
3284 // Set up the ASIO callback structure and create the ASIO data buffers.
3285 asioCallbacks.bufferSwitch = &bufferSwitch;
3286 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3287 asioCallbacks.asioMessage = &asioMessages;
3288 asioCallbacks.bufferSwitchTimeInfo = NULL;
3289 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3290 if ( result != ASE_OK ) {
3291 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3292 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3293 // In that case, let's be naïve and try that instead.
3294 *bufferSize = preferSize;
3295 stream_.bufferSize = *bufferSize;
3296 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3299 if ( result != ASE_OK ) {
3300 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3301 errorText_ = errorStream_.str();
3304 buffersAllocated = true;
3305 stream_.state = STREAM_STOPPED;
3307 // Set flags for buffer conversion.
3308 stream_.doConvertBuffer[mode] = false;
3309 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3310 stream_.doConvertBuffer[mode] = true;
3311 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3312 stream_.nUserChannels[mode] > 1 )
3313 stream_.doConvertBuffer[mode] = true;
3315 // Allocate necessary internal buffers
3316 unsigned long bufferBytes;
3317 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3318 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3319 if ( stream_.userBuffer[mode] == NULL ) {
3320 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3324 if ( stream_.doConvertBuffer[mode] ) {
3326 bool makeBuffer = true;
3327 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3328 if ( isDuplexInput && stream_.deviceBuffer ) {
3329 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3330 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3334 bufferBytes *= *bufferSize;
3335 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3336 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3337 if ( stream_.deviceBuffer == NULL ) {
3338 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3344 // Determine device latencies
3345 long inputLatency, outputLatency;
3346 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3347 if ( result != ASE_OK ) {
3348 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3349 errorText_ = errorStream_.str();
3350 error( RtAudioError::WARNING); // warn but don't fail
3353 stream_.latency[0] = outputLatency;
3354 stream_.latency[1] = inputLatency;
3357 // Setup the buffer conversion information structure. We don't use
3358 // buffers to do channel offsets, so we override that parameter
3360 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3365 if ( !isDuplexInput ) {
3366 // the cleanup for error in the duplex input, is done by RtApi::openStream
3367 // So we clean up for single channel only
3369 if ( buffersAllocated )
3370 ASIODisposeBuffers();
3372 drivers.removeCurrentDriver();
3375 CloseHandle( handle->condition );
3376 if ( handle->bufferInfos )
3377 free( handle->bufferInfos );
3380 stream_.apiHandle = 0;
3384 if ( stream_.userBuffer[mode] ) {
3385 free( stream_.userBuffer[mode] );
3386 stream_.userBuffer[mode] = 0;
3389 if ( stream_.deviceBuffer ) {
3390 free( stream_.deviceBuffer );
3391 stream_.deviceBuffer = 0;
3396 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3398 void RtApiAsio :: closeStream()
3400 if ( stream_.state == STREAM_CLOSED ) {
3401 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3402 error( RtAudioError::WARNING );
3406 if ( stream_.state == STREAM_RUNNING ) {
3407 stream_.state = STREAM_STOPPED;
3410 ASIODisposeBuffers();
3411 drivers.removeCurrentDriver();
3413 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3415 CloseHandle( handle->condition );
3416 if ( handle->bufferInfos )
3417 free( handle->bufferInfos );
3419 stream_.apiHandle = 0;
3422 for ( int i=0; i<2; i++ ) {
3423 if ( stream_.userBuffer[i] ) {
3424 free( stream_.userBuffer[i] );
3425 stream_.userBuffer[i] = 0;
3429 if ( stream_.deviceBuffer ) {
3430 free( stream_.deviceBuffer );
3431 stream_.deviceBuffer = 0;
3434 stream_.mode = UNINITIALIZED;
3435 stream_.state = STREAM_CLOSED;
3438 bool stopThreadCalled = false;
3440 void RtApiAsio :: startStream()
3443 if ( stream_.state == STREAM_RUNNING ) {
3444 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3445 error( RtAudioError::WARNING );
3449 #if defined( HAVE_GETTIMEOFDAY )
3450 gettimeofday( &stream_.lastTickTimestamp, NULL );
3453 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3454 ASIOError result = ASIOStart();
3455 if ( result != ASE_OK ) {
3456 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3457 errorText_ = errorStream_.str();
3461 handle->drainCounter = 0;
3462 handle->internalDrain = false;
3463 ResetEvent( handle->condition );
3464 stream_.state = STREAM_RUNNING;
3468 stopThreadCalled = false;
3470 if ( result == ASE_OK ) return;
3471 error( RtAudioError::SYSTEM_ERROR );
3474 void RtApiAsio :: stopStream()
3477 if ( stream_.state == STREAM_STOPPED ) {
3478 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3479 error( RtAudioError::WARNING );
3483 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3484 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3485 if ( handle->drainCounter == 0 ) {
3486 handle->drainCounter = 2;
3487 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3491 stream_.state = STREAM_STOPPED;
3493 ASIOError result = ASIOStop();
3494 if ( result != ASE_OK ) {
3495 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3496 errorText_ = errorStream_.str();
3499 if ( result == ASE_OK ) return;
3500 error( RtAudioError::SYSTEM_ERROR );
3503 void RtApiAsio :: abortStream()
3506 if ( stream_.state == STREAM_STOPPED ) {
3507 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3508 error( RtAudioError::WARNING );
3512 // The following lines were commented-out because some behavior was
3513 // noted where the device buffers need to be zeroed to avoid
3514 // continuing sound, even when the device buffers are completely
3515 // disposed. So now, calling abort is the same as calling stop.
3516 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3517 // handle->drainCounter = 2;
3521 // This function will be called by a spawned thread when the user
3522 // callback function signals that the stream should be stopped or
3523 // aborted. It is necessary to handle it this way because the
3524 // callbackEvent() function must return before the ASIOStop()
3525 // function will return.
3526 static unsigned __stdcall asioStopStream( void *ptr )
3528 CallbackInfo *info = (CallbackInfo *) ptr;
3529 RtApiAsio *object = (RtApiAsio *) info->object;
3531 object->stopStream();
3536 bool RtApiAsio :: callbackEvent( long bufferIndex )
3538 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3539 if ( stream_.state == STREAM_CLOSED ) {
3540 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3541 error( RtAudioError::WARNING );
3545 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3546 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3548 // Check if we were draining the stream and signal if finished.
3549 if ( handle->drainCounter > 3 ) {
3551 stream_.state = STREAM_STOPPING;
3552 if ( handle->internalDrain == false )
3553 SetEvent( handle->condition );
3554 else { // spawn a thread to stop the stream
3556 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3557 &stream_.callbackInfo, 0, &threadId );
3562 // Invoke user callback to get fresh output data UNLESS we are
3564 if ( handle->drainCounter == 0 ) {
3565 RtAudioCallback callback = (RtAudioCallback) info->callback;
3566 double streamTime = getStreamTime();
3567 RtAudioStreamStatus status = 0;
3568 if ( stream_.mode != INPUT && asioXRun == true ) {
3569 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3572 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3573 status |= RTAUDIO_INPUT_OVERFLOW;
3576 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3577 stream_.bufferSize, streamTime, status, info->userData );
3578 if ( cbReturnValue == 2 ) {
3579 stream_.state = STREAM_STOPPING;
3580 handle->drainCounter = 2;
3582 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3583 &stream_.callbackInfo, 0, &threadId );
3586 else if ( cbReturnValue == 1 ) {
3587 handle->drainCounter = 1;
3588 handle->internalDrain = true;
3592 unsigned int nChannels, bufferBytes, i, j;
3593 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3594 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3596 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3598 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3600 for ( i=0, j=0; i<nChannels; i++ ) {
3601 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3602 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3606 else if ( stream_.doConvertBuffer[0] ) {
3608 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3609 if ( stream_.doByteSwap[0] )
3610 byteSwapBuffer( stream_.deviceBuffer,
3611 stream_.bufferSize * stream_.nDeviceChannels[0],
3612 stream_.deviceFormat[0] );
3614 for ( i=0, j=0; i<nChannels; i++ ) {
3615 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3616 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3617 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3623 if ( stream_.doByteSwap[0] )
3624 byteSwapBuffer( stream_.userBuffer[0],
3625 stream_.bufferSize * stream_.nUserChannels[0],
3626 stream_.userFormat );
3628 for ( i=0, j=0; i<nChannels; i++ ) {
3629 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3630 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3631 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3637 // Don't bother draining input
3638 if ( handle->drainCounter ) {
3639 handle->drainCounter++;
3643 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3645 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3647 if (stream_.doConvertBuffer[1]) {
3649 // Always interleave ASIO input data.
3650 for ( i=0, j=0; i<nChannels; i++ ) {
3651 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3652 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3653 handle->bufferInfos[i].buffers[bufferIndex],
3657 if ( stream_.doByteSwap[1] )
3658 byteSwapBuffer( stream_.deviceBuffer,
3659 stream_.bufferSize * stream_.nDeviceChannels[1],
3660 stream_.deviceFormat[1] );
3661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3665 for ( i=0, j=0; i<nChannels; i++ ) {
3666 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3667 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3668 handle->bufferInfos[i].buffers[bufferIndex],
3673 if ( stream_.doByteSwap[1] )
3674 byteSwapBuffer( stream_.userBuffer[1],
3675 stream_.bufferSize * stream_.nUserChannels[1],
3676 stream_.userFormat );
3681 // The following call was suggested by Malte Clasen. While the API
3682 // documentation indicates it should not be required, some device
3683 // drivers apparently do not function correctly without it.
3686 RtApi::tickStreamTime();
3690 static void sampleRateChanged( ASIOSampleRate sRate )
3692 // The ASIO documentation says that this usually only happens during
3693 // external sync. Audio processing is not stopped by the driver,
3694 // actual sample rate might not have even changed, maybe only the
3695 // sample rate status of an AES/EBU or S/PDIF digital input at the
3698 RtApi *object = (RtApi *) asioCallbackInfo->object;
3700 object->stopStream();
3702 catch ( RtAudioError &exception ) {
3703 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3707 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3710 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3714 switch( selector ) {
3715 case kAsioSelectorSupported:
3716 if ( value == kAsioResetRequest
3717 || value == kAsioEngineVersion
3718 || value == kAsioResyncRequest
3719 || value == kAsioLatenciesChanged
3720 // The following three were added for ASIO 2.0, you don't
3721 // necessarily have to support them.
3722 || value == kAsioSupportsTimeInfo
3723 || value == kAsioSupportsTimeCode
3724 || value == kAsioSupportsInputMonitor)
3727 case kAsioResetRequest:
3728 // Defer the task and perform the reset of the driver during the
3729 // next "safe" situation. You cannot reset the driver right now,
3730 // as this code is called from the driver. Reset the driver is
3731 // done by completely destruct is. I.e. ASIOStop(),
3732 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3734 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3737 case kAsioResyncRequest:
3738 // This informs the application that the driver encountered some
3739 // non-fatal data loss. It is used for synchronization purposes
3740 // of different media. Added mainly to work around the Win16Mutex
3741 // problems in Windows 95/98 with the Windows Multimedia system,
3742 // which could lose data because the Mutex was held too long by
3743 // another thread. However a driver can issue it in other
3745 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3749 case kAsioLatenciesChanged:
3750 // This will inform the host application that the drivers were
3751 // latencies changed. Beware, it this does not mean that the
3752 // buffer sizes have changed! You might need to update internal
3754 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3757 case kAsioEngineVersion:
3758 // Return the supported ASIO version of the host application. If
3759 // a host application does not implement this selector, ASIO 1.0
3760 // is assumed by the driver.
3763 case kAsioSupportsTimeInfo:
3764 // Informs the driver whether the
3765 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3766 // For compatibility with ASIO 1.0 drivers the host application
3767 // should always support the "old" bufferSwitch method, too.
3770 case kAsioSupportsTimeCode:
3771 // Informs the driver whether application is interested in time
3772 // code info. If an application does not need to know about time
3773 // code, the driver has less work to do.
3780 static const char* getAsioErrorString( ASIOError result )
3788 static const Messages m[] =
3790 { ASE_NotPresent, "Hardware input or output is not present or available." },
3791 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3792 { ASE_InvalidParameter, "Invalid input parameter." },
3793 { ASE_InvalidMode, "Invalid mode." },
3794 { ASE_SPNotAdvancing, "Sample position not advancing." },
3795 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3796 { ASE_NoMemory, "Not enough memory to complete the request." }
3799 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3800 if ( m[i].value == result ) return m[i].message;
3802 return "Unknown error.";
3805 //******************** End of __WINDOWS_ASIO__ *********************//
3809 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3811 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3812 // - Introduces support for the Windows WASAPI API
3813 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3814 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3815 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3822 #include <mferror.h>
3824 #include <mftransform.h>
3825 #include <wmcodecdsp.h>
3827 #include <audioclient.h>
3829 #include <mmdeviceapi.h>
3830 #include <functiondiscoverykeys_devpkey.h>
3832 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3833 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3836 #ifndef MFSTARTUP_NOSOCKET
3837 #define MFSTARTUP_NOSOCKET 0x1
3841 #pragma comment( lib, "ksuser" )
3842 #pragma comment( lib, "mfplat.lib" )
3843 #pragma comment( lib, "mfuuid.lib" )
3844 #pragma comment( lib, "wmcodecdspuuid" )
3847 //=============================================================================
3849 #define SAFE_RELEASE( objectPtr )\
3852 objectPtr->Release();\
3856 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3858 //-----------------------------------------------------------------------------
3860 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3861 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3862 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3863 // provide intermediate storage for read / write synchronization.
3877 // sets the length of the internal ring buffer
3878 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3881 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3883 bufferSize_ = bufferSize;
3888 // attempt to push a buffer into the ring buffer at the current "in" index
3889 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3891 if ( !buffer || // incoming buffer is NULL
3892 bufferSize == 0 || // incoming buffer has no data
3893 bufferSize > bufferSize_ ) // incoming buffer too large
3898 unsigned int relOutIndex = outIndex_;
3899 unsigned int inIndexEnd = inIndex_ + bufferSize;
3900 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3901 relOutIndex += bufferSize_;
3904 // the "IN" index CAN BEGIN at the "OUT" index
3905 // the "IN" index CANNOT END at the "OUT" index
3906 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3907 return false; // not enough space between "in" index and "out" index
3910 // copy buffer from external to internal
3911 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3912 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3913 int fromInSize = bufferSize - fromZeroSize;
3918 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3919 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3921 case RTAUDIO_SINT16:
3922 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3923 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3925 case RTAUDIO_SINT24:
3926 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3927 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3929 case RTAUDIO_SINT32:
3930 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3931 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3933 case RTAUDIO_FLOAT32:
3934 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3935 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3937 case RTAUDIO_FLOAT64:
3938 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3939 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3943 // update "in" index
3944 inIndex_ += bufferSize;
3945 inIndex_ %= bufferSize_;
3950 // attempt to pull a buffer from the ring buffer from the current "out" index
3951 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3953 if ( !buffer || // incoming buffer is NULL
3954 bufferSize == 0 || // incoming buffer has no data
3955 bufferSize > bufferSize_ ) // incoming buffer too large
3960 unsigned int relInIndex = inIndex_;
3961 unsigned int outIndexEnd = outIndex_ + bufferSize;
3962 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3963 relInIndex += bufferSize_;
3966 // the "OUT" index CANNOT BEGIN at the "IN" index
3967 // the "OUT" index CAN END at the "IN" index
3968 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3969 return false; // not enough space between "out" index and "in" index
3972 // copy buffer from internal to external
3973 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3974 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3975 int fromOutSize = bufferSize - fromZeroSize;
3980 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3981 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3983 case RTAUDIO_SINT16:
3984 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3985 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3987 case RTAUDIO_SINT24:
3988 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3989 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3991 case RTAUDIO_SINT32:
3992 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3993 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3995 case RTAUDIO_FLOAT32:
3996 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3997 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3999 case RTAUDIO_FLOAT64:
4000 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
4001 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
4005 // update "out" index
4006 outIndex_ += bufferSize;
4007 outIndex_ %= bufferSize_;
4014 unsigned int bufferSize_;
4015 unsigned int inIndex_;
4016 unsigned int outIndex_;
4019 //-----------------------------------------------------------------------------
4021 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4022 // between HW and the user. The WasapiResampler class is used to perform this conversion between
4023 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4024 class WasapiResampler
4027 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4028 unsigned int inSampleRate, unsigned int outSampleRate )
4029 : _bytesPerSample( bitsPerSample / 8 )
4030 , _channelCount( channelCount )
4031 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4032 , _transformUnk( NULL )
4033 , _transform( NULL )
4034 , _mediaType( NULL )
4035 , _inputMediaType( NULL )
4036 , _outputMediaType( NULL )
4038 #ifdef __IWMResamplerProps_FWD_DEFINED__
4039 , _resamplerProps( NULL )
4042 // 1. Initialization
4044 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4046 // 2. Create Resampler Transform Object
4048 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4049 IID_IUnknown, ( void** ) &_transformUnk );
4051 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4053 #ifdef __IWMResamplerProps_FWD_DEFINED__
4054 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4055 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4058 // 3. Specify input / output format
4060 MFCreateMediaType( &_mediaType );
4061 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4062 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4063 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4064 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4065 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4066 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4067 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4068 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4070 MFCreateMediaType( &_inputMediaType );
4071 _mediaType->CopyAllItems( _inputMediaType );
4073 _transform->SetInputType( 0, _inputMediaType, 0 );
4075 MFCreateMediaType( &_outputMediaType );
4076 _mediaType->CopyAllItems( _outputMediaType );
4078 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4079 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4081 _transform->SetOutputType( 0, _outputMediaType, 0 );
4083 // 4. Send stream start messages to Resampler
4085 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4086 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4087 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4092 // 8. Send stream stop messages to Resampler
4094 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4095 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4101 SAFE_RELEASE( _transformUnk );
4102 SAFE_RELEASE( _transform );
4103 SAFE_RELEASE( _mediaType );
4104 SAFE_RELEASE( _inputMediaType );
4105 SAFE_RELEASE( _outputMediaType );
4107 #ifdef __IWMResamplerProps_FWD_DEFINED__
4108 SAFE_RELEASE( _resamplerProps );
4112 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4114 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4115 if ( _sampleRatio == 1 )
4117 // no sample rate conversion required
4118 memcpy( outBuffer, inBuffer, inputBufferSize );
4119 outSampleCount = inSampleCount;
4123 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4125 IMFMediaBuffer* rInBuffer;
4126 IMFSample* rInSample;
4127 BYTE* rInByteBuffer = NULL;
4129 // 5. Create Sample object from input data
4131 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4133 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4134 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4135 rInBuffer->Unlock();
4136 rInByteBuffer = NULL;
4138 rInBuffer->SetCurrentLength( inputBufferSize );
4140 MFCreateSample( &rInSample );
4141 rInSample->AddBuffer( rInBuffer );
4143 // 6. Pass input data to Resampler
4145 _transform->ProcessInput( 0, rInSample, 0 );
4147 SAFE_RELEASE( rInBuffer );
4148 SAFE_RELEASE( rInSample );
4150 // 7. Perform sample rate conversion
4152 IMFMediaBuffer* rOutBuffer = NULL;
4153 BYTE* rOutByteBuffer = NULL;
4155 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4157 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4159 // 7.1 Create Sample object for output data
4161 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4162 MFCreateSample( &( rOutDataBuffer.pSample ) );
4163 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4164 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4165 rOutDataBuffer.dwStreamID = 0;
4166 rOutDataBuffer.dwStatus = 0;
4167 rOutDataBuffer.pEvents = NULL;
4169 // 7.2 Get output data from Resampler
4171 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4174 SAFE_RELEASE( rOutBuffer );
4175 SAFE_RELEASE( rOutDataBuffer.pSample );
4179 // 7.3 Write output data to outBuffer
4181 SAFE_RELEASE( rOutBuffer );
4182 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4183 rOutBuffer->GetCurrentLength( &rBytes );
4185 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4186 memcpy( outBuffer, rOutByteBuffer, rBytes );
4187 rOutBuffer->Unlock();
4188 rOutByteBuffer = NULL;
4190 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4191 SAFE_RELEASE( rOutBuffer );
4192 SAFE_RELEASE( rOutDataBuffer.pSample );
4196 unsigned int _bytesPerSample;
4197 unsigned int _channelCount;
4200 IUnknown* _transformUnk;
4201 IMFTransform* _transform;
4202 IMFMediaType* _mediaType;
4203 IMFMediaType* _inputMediaType;
4204 IMFMediaType* _outputMediaType;
4206 #ifdef __IWMResamplerProps_FWD_DEFINED__
4207 IWMResamplerProps* _resamplerProps;
4211 //-----------------------------------------------------------------------------
4213 // A structure to hold various information related to the WASAPI implementation.
4216 IAudioClient* captureAudioClient;
4217 IAudioClient* renderAudioClient;
4218 IAudioCaptureClient* captureClient;
4219 IAudioRenderClient* renderClient;
4220 HANDLE captureEvent;
4224 : captureAudioClient( NULL ),
4225 renderAudioClient( NULL ),
4226 captureClient( NULL ),
4227 renderClient( NULL ),
4228 captureEvent( NULL ),
4229 renderEvent( NULL ) {}
4232 //=============================================================================
4234 RtApiWasapi::RtApiWasapi()
4235 : coInitialized_( false ), deviceEnumerator_( NULL )
4237 // WASAPI can run either apartment or multi-threaded
4238 HRESULT hr = CoInitialize( NULL );
4239 if ( !FAILED( hr ) )
4240 coInitialized_ = true;
4242 // Instantiate device enumerator
4243 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4244 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4245 ( void** ) &deviceEnumerator_ );
4247 // If this runs on an old Windows, it will fail. Ignore and proceed.
4249 deviceEnumerator_ = NULL;
4252 //-----------------------------------------------------------------------------
4254 RtApiWasapi::~RtApiWasapi()
4256 if ( stream_.state != STREAM_CLOSED )
4259 SAFE_RELEASE( deviceEnumerator_ );
4261 // If this object previously called CoInitialize()
4262 if ( coInitialized_ )
4266 //=============================================================================
4268 unsigned int RtApiWasapi::getDeviceCount( void )
4270 unsigned int captureDeviceCount = 0;
4271 unsigned int renderDeviceCount = 0;
4273 IMMDeviceCollection* captureDevices = NULL;
4274 IMMDeviceCollection* renderDevices = NULL;
4276 if ( !deviceEnumerator_ )
4279 // Count capture devices
4281 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4282 if ( FAILED( hr ) ) {
4283 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4287 hr = captureDevices->GetCount( &captureDeviceCount );
4288 if ( FAILED( hr ) ) {
4289 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4293 // Count render devices
4294 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4295 if ( FAILED( hr ) ) {
4296 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4300 hr = renderDevices->GetCount( &renderDeviceCount );
4301 if ( FAILED( hr ) ) {
4302 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4307 // release all references
4308 SAFE_RELEASE( captureDevices );
4309 SAFE_RELEASE( renderDevices );
4311 if ( errorText_.empty() )
4312 return captureDeviceCount + renderDeviceCount;
4314 error( RtAudioError::DRIVER_ERROR );
4318 //-----------------------------------------------------------------------------
4320 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4322 RtAudio::DeviceInfo info;
4323 unsigned int captureDeviceCount = 0;
4324 unsigned int renderDeviceCount = 0;
4325 std::string defaultDeviceName;
4326 bool isCaptureDevice = false;
4328 PROPVARIANT deviceNameProp;
4329 PROPVARIANT defaultDeviceNameProp;
4331 IMMDeviceCollection* captureDevices = NULL;
4332 IMMDeviceCollection* renderDevices = NULL;
4333 IMMDevice* devicePtr = NULL;
4334 IMMDevice* defaultDevicePtr = NULL;
4335 IAudioClient* audioClient = NULL;
4336 IPropertyStore* devicePropStore = NULL;
4337 IPropertyStore* defaultDevicePropStore = NULL;
4339 WAVEFORMATEX* deviceFormat = NULL;
4340 WAVEFORMATEX* closestMatchFormat = NULL;
4343 info.probed = false;
4345 // Count capture devices
4347 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4348 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4349 if ( FAILED( hr ) ) {
4350 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4354 hr = captureDevices->GetCount( &captureDeviceCount );
4355 if ( FAILED( hr ) ) {
4356 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4360 // Count render devices
4361 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4362 if ( FAILED( hr ) ) {
4363 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4367 hr = renderDevices->GetCount( &renderDeviceCount );
4368 if ( FAILED( hr ) ) {
4369 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4373 // validate device index
4374 if ( device >= captureDeviceCount + renderDeviceCount ) {
4375 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4376 errorType = RtAudioError::INVALID_USE;
4380 // determine whether index falls within capture or render devices
4381 if ( device >= renderDeviceCount ) {
4382 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4383 if ( FAILED( hr ) ) {
4384 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4387 isCaptureDevice = true;
4390 hr = renderDevices->Item( device, &devicePtr );
4391 if ( FAILED( hr ) ) {
4392 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4395 isCaptureDevice = false;
4398 // get default device name
4399 if ( isCaptureDevice ) {
4400 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4407 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4408 if ( FAILED( hr ) ) {
4409 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4414 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4415 if ( FAILED( hr ) ) {
4416 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4419 PropVariantInit( &defaultDeviceNameProp );
4421 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4422 if ( FAILED( hr ) ) {
4423 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4427 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4430 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4431 if ( FAILED( hr ) ) {
4432 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4436 PropVariantInit( &deviceNameProp );
4438 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4439 if ( FAILED( hr ) ) {
4440 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4444 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4447 if ( isCaptureDevice ) {
4448 info.isDefaultInput = info.name == defaultDeviceName;
4449 info.isDefaultOutput = false;
4452 info.isDefaultInput = false;
4453 info.isDefaultOutput = info.name == defaultDeviceName;
4457 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4458 if ( FAILED( hr ) ) {
4459 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4463 hr = audioClient->GetMixFormat( &deviceFormat );
4464 if ( FAILED( hr ) ) {
4465 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4469 if ( isCaptureDevice ) {
4470 info.inputChannels = deviceFormat->nChannels;
4471 info.outputChannels = 0;
4472 info.duplexChannels = 0;
4475 info.inputChannels = 0;
4476 info.outputChannels = deviceFormat->nChannels;
4477 info.duplexChannels = 0;
4481 info.sampleRates.clear();
4483 // allow support for all sample rates as we have a built-in sample rate converter
4484 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4485 info.sampleRates.push_back( SAMPLE_RATES[i] );
4487 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4490 info.nativeFormats = 0;
4492 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4493 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4494 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4496 if ( deviceFormat->wBitsPerSample == 32 ) {
4497 info.nativeFormats |= RTAUDIO_FLOAT32;
4499 else if ( deviceFormat->wBitsPerSample == 64 ) {
4500 info.nativeFormats |= RTAUDIO_FLOAT64;
4503 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4504 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4505 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4507 if ( deviceFormat->wBitsPerSample == 8 ) {
4508 info.nativeFormats |= RTAUDIO_SINT8;
4510 else if ( deviceFormat->wBitsPerSample == 16 ) {
4511 info.nativeFormats |= RTAUDIO_SINT16;
4513 else if ( deviceFormat->wBitsPerSample == 24 ) {
4514 info.nativeFormats |= RTAUDIO_SINT24;
4516 else if ( deviceFormat->wBitsPerSample == 32 ) {
4517 info.nativeFormats |= RTAUDIO_SINT32;
4525 // release all references
4526 PropVariantClear( &deviceNameProp );
4527 PropVariantClear( &defaultDeviceNameProp );
4529 SAFE_RELEASE( captureDevices );
4530 SAFE_RELEASE( renderDevices );
4531 SAFE_RELEASE( devicePtr );
4532 SAFE_RELEASE( defaultDevicePtr );
4533 SAFE_RELEASE( audioClient );
4534 SAFE_RELEASE( devicePropStore );
4535 SAFE_RELEASE( defaultDevicePropStore );
4537 CoTaskMemFree( deviceFormat );
4538 CoTaskMemFree( closestMatchFormat );
4540 if ( !errorText_.empty() )
4545 //-----------------------------------------------------------------------------
4547 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4549 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4550 if ( getDeviceInfo( i ).isDefaultOutput ) {
4558 //-----------------------------------------------------------------------------
4560 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4562 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4563 if ( getDeviceInfo( i ).isDefaultInput ) {
4571 //-----------------------------------------------------------------------------
4573 void RtApiWasapi::closeStream( void )
4575 if ( stream_.state == STREAM_CLOSED ) {
4576 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4577 error( RtAudioError::WARNING );
4581 if ( stream_.state != STREAM_STOPPED )
4584 // clean up stream memory
4585 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4586 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4588 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4589 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4591 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4592 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4594 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4595 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4597 delete ( WasapiHandle* ) stream_.apiHandle;
4598 stream_.apiHandle = NULL;
4600 for ( int i = 0; i < 2; i++ ) {
4601 if ( stream_.userBuffer[i] ) {
4602 free( stream_.userBuffer[i] );
4603 stream_.userBuffer[i] = 0;
4607 if ( stream_.deviceBuffer ) {
4608 free( stream_.deviceBuffer );
4609 stream_.deviceBuffer = 0;
4612 // update stream state
4613 stream_.state = STREAM_CLOSED;
4616 //-----------------------------------------------------------------------------
4618 void RtApiWasapi::startStream( void )
4622 if ( stream_.state == STREAM_RUNNING ) {
4623 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4624 error( RtAudioError::WARNING );
4628 #if defined( HAVE_GETTIMEOFDAY )
4629 gettimeofday( &stream_.lastTickTimestamp, NULL );
4632 // update stream state
4633 stream_.state = STREAM_RUNNING;
4635 // create WASAPI stream thread
4636 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4638 if ( !stream_.callbackInfo.thread ) {
4639 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4640 error( RtAudioError::THREAD_ERROR );
4643 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4644 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4648 //-----------------------------------------------------------------------------
4650 void RtApiWasapi::stopStream( void )
4654 if ( stream_.state == STREAM_STOPPED ) {
4655 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4656 error( RtAudioError::WARNING );
4660 // inform stream thread by setting stream state to STREAM_STOPPING
4661 stream_.state = STREAM_STOPPING;
4663 // wait until stream thread is stopped
4664 while( stream_.state != STREAM_STOPPED ) {
4668 // Wait for the last buffer to play before stopping.
4669 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4671 // close thread handle
4672 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4673 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4674 error( RtAudioError::THREAD_ERROR );
4678 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4681 //-----------------------------------------------------------------------------
4683 void RtApiWasapi::abortStream( void )
4687 if ( stream_.state == STREAM_STOPPED ) {
4688 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4689 error( RtAudioError::WARNING );
4693 // inform stream thread by setting stream state to STREAM_STOPPING
4694 stream_.state = STREAM_STOPPING;
4696 // wait until stream thread is stopped
4697 while ( stream_.state != STREAM_STOPPED ) {
4701 // close thread handle
4702 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4703 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4704 error( RtAudioError::THREAD_ERROR );
4708 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4711 //-----------------------------------------------------------------------------
4713 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4714 unsigned int firstChannel, unsigned int sampleRate,
4715 RtAudioFormat format, unsigned int* bufferSize,
4716 RtAudio::StreamOptions* options )
4718 bool methodResult = FAILURE;
4719 unsigned int captureDeviceCount = 0;
4720 unsigned int renderDeviceCount = 0;
4722 IMMDeviceCollection* captureDevices = NULL;
4723 IMMDeviceCollection* renderDevices = NULL;
4724 IMMDevice* devicePtr = NULL;
4725 WAVEFORMATEX* deviceFormat = NULL;
4726 unsigned int bufferBytes;
4727 stream_.state = STREAM_STOPPED;
4729 // create API Handle if not already created
4730 if ( !stream_.apiHandle )
4731 stream_.apiHandle = ( void* ) new WasapiHandle();
4733 // Count capture devices
4735 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4736 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4737 if ( FAILED( hr ) ) {
4738 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4742 hr = captureDevices->GetCount( &captureDeviceCount );
4743 if ( FAILED( hr ) ) {
4744 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4748 // Count render devices
4749 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4750 if ( FAILED( hr ) ) {
4751 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4755 hr = renderDevices->GetCount( &renderDeviceCount );
4756 if ( FAILED( hr ) ) {
4757 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4761 // validate device index
4762 if ( device >= captureDeviceCount + renderDeviceCount ) {
4763 errorType = RtAudioError::INVALID_USE;
4764 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4768 // if device index falls within capture devices
4769 if ( device >= renderDeviceCount ) {
4770 if ( mode != INPUT ) {
4771 errorType = RtAudioError::INVALID_USE;
4772 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4776 // retrieve captureAudioClient from devicePtr
4777 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4779 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4780 if ( FAILED( hr ) ) {
4781 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4785 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4786 NULL, ( void** ) &captureAudioClient );
4787 if ( FAILED( hr ) ) {
4788 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4792 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4793 if ( FAILED( hr ) ) {
4794 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4798 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4799 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4802 // if device index falls within render devices and is configured for loopback
4803 if ( device < renderDeviceCount && mode == INPUT )
4805 // if renderAudioClient is not initialised, initialise it now
4806 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4807 if ( !renderAudioClient )
4809 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4812 // retrieve captureAudioClient from devicePtr
4813 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4815 hr = renderDevices->Item( device, &devicePtr );
4816 if ( FAILED( hr ) ) {
4817 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4821 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4822 NULL, ( void** ) &captureAudioClient );
4823 if ( FAILED( hr ) ) {
4824 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4828 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4829 if ( FAILED( hr ) ) {
4830 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4834 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4835 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4838 // if device index falls within render devices and is configured for output
4839 if ( device < renderDeviceCount && mode == OUTPUT )
4841 // if renderAudioClient is already initialised, don't initialise it again
4842 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4843 if ( renderAudioClient )
4845 methodResult = SUCCESS;
4849 hr = renderDevices->Item( device, &devicePtr );
4850 if ( FAILED( hr ) ) {
4851 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4855 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4856 NULL, ( void** ) &renderAudioClient );
4857 if ( FAILED( hr ) ) {
4858 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4862 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4863 if ( FAILED( hr ) ) {
4864 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4868 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4869 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4873 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4874 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4875 stream_.mode = DUPLEX;
4878 stream_.mode = mode;
4881 stream_.device[mode] = device;
4882 stream_.doByteSwap[mode] = false;
4883 stream_.sampleRate = sampleRate;
4884 stream_.bufferSize = *bufferSize;
4885 stream_.nBuffers = 1;
4886 stream_.nUserChannels[mode] = channels;
4887 stream_.channelOffset[mode] = firstChannel;
4888 stream_.userFormat = format;
4889 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4891 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4892 stream_.userInterleaved = false;
4894 stream_.userInterleaved = true;
4895 stream_.deviceInterleaved[mode] = true;
4897 // Set flags for buffer conversion.
4898 stream_.doConvertBuffer[mode] = false;
4899 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4900 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4901 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4902 stream_.doConvertBuffer[mode] = true;
4903 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4904 stream_.nUserChannels[mode] > 1 )
4905 stream_.doConvertBuffer[mode] = true;
4907 if ( stream_.doConvertBuffer[mode] )
4908 setConvertInfo( mode, 0 );
4910 // Allocate necessary internal buffers
4911 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4913 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4914 if ( !stream_.userBuffer[mode] ) {
4915 errorType = RtAudioError::MEMORY_ERROR;
4916 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4920 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4921 stream_.callbackInfo.priority = 15;
4923 stream_.callbackInfo.priority = 0;
4925 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4926 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4928 methodResult = SUCCESS;
4932 SAFE_RELEASE( captureDevices );
4933 SAFE_RELEASE( renderDevices );
4934 SAFE_RELEASE( devicePtr );
4935 CoTaskMemFree( deviceFormat );
4937 // if method failed, close the stream
4938 if ( methodResult == FAILURE )
4941 if ( !errorText_.empty() )
4943 return methodResult;
4946 //=============================================================================
4948 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4951 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4956 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4959 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4964 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4967 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4972 //-----------------------------------------------------------------------------
4974 void RtApiWasapi::wasapiThread()
4976 // as this is a new thread, we must CoInitialize it
4977 CoInitialize( NULL );
4981 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4982 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4983 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4984 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4985 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4986 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4988 WAVEFORMATEX* captureFormat = NULL;
4989 WAVEFORMATEX* renderFormat = NULL;
4990 float captureSrRatio = 0.0f;
4991 float renderSrRatio = 0.0f;
4992 WasapiBuffer captureBuffer;
4993 WasapiBuffer renderBuffer;
4994 WasapiResampler* captureResampler = NULL;
4995 WasapiResampler* renderResampler = NULL;
4997 // declare local stream variables
4998 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4999 BYTE* streamBuffer = NULL;
5000 unsigned long captureFlags = 0;
5001 unsigned int bufferFrameCount = 0;
5002 unsigned int numFramesPadding = 0;
5003 unsigned int convBufferSize = 0;
5004 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
5005 bool callbackPushed = true;
5006 bool callbackPulled = false;
5007 bool callbackStopped = false;
5008 int callbackResult = 0;
5010 // convBuffer is used to store converted buffers between WASAPI and the user
5011 char* convBuffer = NULL;
5012 unsigned int convBuffSize = 0;
5013 unsigned int deviceBuffSize = 0;
5015 std::string errorText;
5016 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5018 // Attempt to assign "Pro Audio" characteristic to thread
5019 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
5021 DWORD taskIndex = 0;
5022 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5023 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5024 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5025 FreeLibrary( AvrtDll );
5028 // start capture stream if applicable
5029 if ( captureAudioClient ) {
5030 hr = captureAudioClient->GetMixFormat( &captureFormat );
5031 if ( FAILED( hr ) ) {
5032 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5036 // init captureResampler
5037 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5038 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5039 captureFormat->nSamplesPerSec, stream_.sampleRate );
5041 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5043 if ( !captureClient ) {
5044 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5045 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5050 if ( FAILED( hr ) ) {
5051 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5055 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5056 ( void** ) &captureClient );
5057 if ( FAILED( hr ) ) {
5058 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5062 // don't configure captureEvent if in loopback mode
5063 if ( !loopbackEnabled )
5065 // configure captureEvent to trigger on every available capture buffer
5066 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5067 if ( !captureEvent ) {
5068 errorType = RtAudioError::SYSTEM_ERROR;
5069 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5073 hr = captureAudioClient->SetEventHandle( captureEvent );
5074 if ( FAILED( hr ) ) {
5075 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5079 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5082 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5084 // reset the capture stream
5085 hr = captureAudioClient->Reset();
5086 if ( FAILED( hr ) ) {
5087 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5091 // start the capture stream
5092 hr = captureAudioClient->Start();
5093 if ( FAILED( hr ) ) {
5094 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5099 unsigned int inBufferSize = 0;
5100 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5101 if ( FAILED( hr ) ) {
5102 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5106 // scale outBufferSize according to stream->user sample rate ratio
5107 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5108 inBufferSize *= stream_.nDeviceChannels[INPUT];
5110 // set captureBuffer size
5111 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5114 // start render stream if applicable
5115 if ( renderAudioClient ) {
5116 hr = renderAudioClient->GetMixFormat( &renderFormat );
5117 if ( FAILED( hr ) ) {
5118 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5122 // init renderResampler
5123 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5124 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5125 stream_.sampleRate, renderFormat->nSamplesPerSec );
5127 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5129 if ( !renderClient ) {
5130 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5131 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5136 if ( FAILED( hr ) ) {
5137 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5141 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5142 ( void** ) &renderClient );
5143 if ( FAILED( hr ) ) {
5144 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5148 // configure renderEvent to trigger on every available render buffer
5149 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5150 if ( !renderEvent ) {
5151 errorType = RtAudioError::SYSTEM_ERROR;
5152 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5156 hr = renderAudioClient->SetEventHandle( renderEvent );
5157 if ( FAILED( hr ) ) {
5158 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5162 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5163 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5165 // reset the render stream
5166 hr = renderAudioClient->Reset();
5167 if ( FAILED( hr ) ) {
5168 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5172 // start the render stream
5173 hr = renderAudioClient->Start();
5174 if ( FAILED( hr ) ) {
5175 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5180 unsigned int outBufferSize = 0;
5181 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5182 if ( FAILED( hr ) ) {
5183 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5187 // scale inBufferSize according to user->stream sample rate ratio
5188 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5189 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5191 // set renderBuffer size
5192 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5195 // malloc buffer memory
5196 if ( stream_.mode == INPUT )
5198 using namespace std; // for ceilf
5199 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5200 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5202 else if ( stream_.mode == OUTPUT )
5204 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5205 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5207 else if ( stream_.mode == DUPLEX )
5209 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5210 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5211 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5212 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5215 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5216 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5217 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5218 if ( !convBuffer || !stream_.deviceBuffer ) {
5219 errorType = RtAudioError::MEMORY_ERROR;
5220 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5224 // stream process loop
5225 while ( stream_.state != STREAM_STOPPING ) {
5226 if ( !callbackPulled ) {
5229 // 1. Pull callback buffer from inputBuffer
5230 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5231 // Convert callback buffer to user format
5233 if ( captureAudioClient )
5235 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5236 if ( captureSrRatio != 1 )
5238 // account for remainders
5243 while ( convBufferSize < stream_.bufferSize )
5245 // Pull callback buffer from inputBuffer
5246 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5247 samplesToPull * stream_.nDeviceChannels[INPUT],
5248 stream_.deviceFormat[INPUT] );
5250 if ( !callbackPulled )
5255 // Convert callback buffer to user sample rate
5256 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5257 unsigned int convSamples = 0;
5259 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5264 convBufferSize += convSamples;
5265 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5268 if ( callbackPulled )
5270 if ( stream_.doConvertBuffer[INPUT] ) {
5271 // Convert callback buffer to user format
5272 convertBuffer( stream_.userBuffer[INPUT],
5273 stream_.deviceBuffer,
5274 stream_.convertInfo[INPUT] );
5277 // no further conversion, simple copy deviceBuffer to userBuffer
5278 memcpy( stream_.userBuffer[INPUT],
5279 stream_.deviceBuffer,
5280 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5285 // if there is no capture stream, set callbackPulled flag
5286 callbackPulled = true;
5291 // 1. Execute user callback method
5292 // 2. Handle return value from callback
5294 // if callback has not requested the stream to stop
5295 if ( callbackPulled && !callbackStopped ) {
5296 // Execute user callback method
5297 callbackResult = callback( stream_.userBuffer[OUTPUT],
5298 stream_.userBuffer[INPUT],
5301 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5302 stream_.callbackInfo.userData );
5305 RtApi::tickStreamTime();
5307 // Handle return value from callback
5308 if ( callbackResult == 1 ) {
5309 // instantiate a thread to stop this thread
5310 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5311 if ( !threadHandle ) {
5312 errorType = RtAudioError::THREAD_ERROR;
5313 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5316 else if ( !CloseHandle( threadHandle ) ) {
5317 errorType = RtAudioError::THREAD_ERROR;
5318 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5322 callbackStopped = true;
5324 else if ( callbackResult == 2 ) {
5325 // instantiate a thread to stop this thread
5326 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5327 if ( !threadHandle ) {
5328 errorType = RtAudioError::THREAD_ERROR;
5329 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5332 else if ( !CloseHandle( threadHandle ) ) {
5333 errorType = RtAudioError::THREAD_ERROR;
5334 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5338 callbackStopped = true;
5345 // 1. Convert callback buffer to stream format
5346 // 2. Convert callback buffer to stream sample rate and channel count
5347 // 3. Push callback buffer into outputBuffer
5349 if ( renderAudioClient && callbackPulled )
5351 // if the last call to renderBuffer.PushBuffer() was successful
5352 if ( callbackPushed || convBufferSize == 0 )
5354 if ( stream_.doConvertBuffer[OUTPUT] )
5356 // Convert callback buffer to stream format
5357 convertBuffer( stream_.deviceBuffer,
5358 stream_.userBuffer[OUTPUT],
5359 stream_.convertInfo[OUTPUT] );
5363 // no further conversion, simple copy userBuffer to deviceBuffer
5364 memcpy( stream_.deviceBuffer,
5365 stream_.userBuffer[OUTPUT],
5366 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5369 // Convert callback buffer to stream sample rate
5370 renderResampler->Convert( convBuffer,
5371 stream_.deviceBuffer,
5376 // Push callback buffer into outputBuffer
5377 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5378 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5379 stream_.deviceFormat[OUTPUT] );
5382 // if there is no render stream, set callbackPushed flag
5383 callbackPushed = true;
5388 // 1. Get capture buffer from stream
5389 // 2. Push capture buffer into inputBuffer
5390 // 3. If 2. was successful: Release capture buffer
5392 if ( captureAudioClient ) {
5393 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5394 if ( !callbackPulled ) {
5395 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5398 // Get capture buffer from stream
5399 hr = captureClient->GetBuffer( &streamBuffer,
5401 &captureFlags, NULL, NULL );
5402 if ( FAILED( hr ) ) {
5403 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5407 if ( bufferFrameCount != 0 ) {
5408 // Push capture buffer into inputBuffer
5409 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5410 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5411 stream_.deviceFormat[INPUT] ) )
5413 // Release capture buffer
5414 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5415 if ( FAILED( hr ) ) {
5416 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5422 // Inform WASAPI that capture was unsuccessful
5423 hr = captureClient->ReleaseBuffer( 0 );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5432 // Inform WASAPI that capture was unsuccessful
5433 hr = captureClient->ReleaseBuffer( 0 );
5434 if ( FAILED( hr ) ) {
5435 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5443 // 1. Get render buffer from stream
5444 // 2. Pull next buffer from outputBuffer
5445 // 3. If 2. was successful: Fill render buffer with next buffer
5446 // Release render buffer
5448 if ( renderAudioClient ) {
5449 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5450 if ( callbackPulled && !callbackPushed ) {
5451 WaitForSingleObject( renderEvent, INFINITE );
5454 // Get render buffer from stream
5455 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5456 if ( FAILED( hr ) ) {
5457 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5461 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5462 if ( FAILED( hr ) ) {
5463 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5467 bufferFrameCount -= numFramesPadding;
5469 if ( bufferFrameCount != 0 ) {
5470 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5471 if ( FAILED( hr ) ) {
5472 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5476 // Pull next buffer from outputBuffer
5477 // Fill render buffer with next buffer
5478 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5479 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5480 stream_.deviceFormat[OUTPUT] ) )
5482 // Release render buffer
5483 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5484 if ( FAILED( hr ) ) {
5485 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5491 // Inform WASAPI that render was unsuccessful
5492 hr = renderClient->ReleaseBuffer( 0, 0 );
5493 if ( FAILED( hr ) ) {
5494 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5501 // Inform WASAPI that render was unsuccessful
5502 hr = renderClient->ReleaseBuffer( 0, 0 );
5503 if ( FAILED( hr ) ) {
5504 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5510 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5511 if ( callbackPushed ) {
5512 // unsetting the callbackPulled flag lets the stream know that
5513 // the audio device is ready for another callback output buffer.
5514 callbackPulled = false;
5521 CoTaskMemFree( captureFormat );
5522 CoTaskMemFree( renderFormat );
5524 free ( convBuffer );
5525 delete renderResampler;
5526 delete captureResampler;
5530 // update stream state
5531 stream_.state = STREAM_STOPPED;
5533 if ( !errorText.empty() )
5535 errorText_ = errorText;
5540 //******************** End of __WINDOWS_WASAPI__ *********************//
5544 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5546 // Modified by Robin Davies, October 2005
5547 // - Improvements to DirectX pointer chasing.
5548 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5549 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5550 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5551 // Changed device query structure for RtAudio 4.0.7, January 2010
5553 #include <windows.h>
5554 #include <process.h>
5555 #include <mmsystem.h>
5559 #include <algorithm>
5561 #if defined(__MINGW32__)
5562 // missing from latest mingw winapi
5563 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5564 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5565 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5566 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5569 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5571 #ifdef _MSC_VER // if Microsoft Visual C++
5572 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5575 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5577 if ( pointer > bufferSize ) pointer -= bufferSize;
5578 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5579 if ( pointer < earlierPointer ) pointer += bufferSize;
5580 return pointer >= earlierPointer && pointer < laterPointer;
5583 // A structure to hold various information related to the DirectSound
5584 // API implementation.
5586 unsigned int drainCounter; // Tracks callback counts when draining
5587 bool internalDrain; // Indicates if stop is initiated from callback or not.
5591 UINT bufferPointer[2];
5592 DWORD dsBufferSize[2];
5593 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5597 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5600 // Declarations for utility functions, callbacks, and structures
5601 // specific to the DirectSound implementation.
5602 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5603 LPCTSTR description,
5607 static const char* getErrorString( int code );
5609 static unsigned __stdcall callbackHandler( void *ptr );
5618 : found(false) { validId[0] = false; validId[1] = false; }
5621 struct DsProbeData {
5623 std::vector<struct DsDevice>* dsDevices;
5626 RtApiDs :: RtApiDs()
5628 // Dsound will run both-threaded. If CoInitialize fails, then just
5629 // accept whatever the mainline chose for a threading model.
5630 coInitialized_ = false;
5631 HRESULT hr = CoInitialize( NULL );
5632 if ( !FAILED( hr ) ) coInitialized_ = true;
5635 RtApiDs :: ~RtApiDs()
5637 if ( stream_.state != STREAM_CLOSED ) closeStream();
5638 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5641 // The DirectSound default output is always the first device.
5642 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5647 // The DirectSound default input is always the first input device,
5648 // which is the first capture device enumerated.
5649 unsigned int RtApiDs :: getDefaultInputDevice( void )
5654 unsigned int RtApiDs :: getDeviceCount( void )
5656 // Set query flag for previously found devices to false, so that we
5657 // can check for any devices that have disappeared.
5658 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5659 dsDevices[i].found = false;
5661 // Query DirectSound devices.
5662 struct DsProbeData probeInfo;
5663 probeInfo.isInput = false;
5664 probeInfo.dsDevices = &dsDevices;
5665 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5666 if ( FAILED( result ) ) {
5667 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5668 errorText_ = errorStream_.str();
5669 error( RtAudioError::WARNING );
5672 // Query DirectSoundCapture devices.
5673 probeInfo.isInput = true;
5674 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5675 if ( FAILED( result ) ) {
5676 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5677 errorText_ = errorStream_.str();
5678 error( RtAudioError::WARNING );
5681 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5682 for ( unsigned int i=0; i<dsDevices.size(); ) {
5683 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5687 return static_cast<unsigned int>(dsDevices.size());
5690 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5692 RtAudio::DeviceInfo info;
5693 info.probed = false;
5695 if ( dsDevices.size() == 0 ) {
5696 // Force a query of all devices
5698 if ( dsDevices.size() == 0 ) {
5699 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5700 error( RtAudioError::INVALID_USE );
5705 if ( device >= dsDevices.size() ) {
5706 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5707 error( RtAudioError::INVALID_USE );
5712 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5714 LPDIRECTSOUND output;
5716 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5717 if ( FAILED( result ) ) {
5718 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5719 errorText_ = errorStream_.str();
5720 error( RtAudioError::WARNING );
5724 outCaps.dwSize = sizeof( outCaps );
5725 result = output->GetCaps( &outCaps );
5726 if ( FAILED( result ) ) {
5728 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5729 errorText_ = errorStream_.str();
5730 error( RtAudioError::WARNING );
5734 // Get output channel information.
5735 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5737 // Get sample rate information.
5738 info.sampleRates.clear();
5739 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5740 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5741 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5742 info.sampleRates.push_back( SAMPLE_RATES[k] );
5744 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5745 info.preferredSampleRate = SAMPLE_RATES[k];
5749 // Get format information.
5750 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5751 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5755 if ( getDefaultOutputDevice() == device )
5756 info.isDefaultOutput = true;
5758 if ( dsDevices[ device ].validId[1] == false ) {
5759 info.name = dsDevices[ device ].name;
5766 LPDIRECTSOUNDCAPTURE input;
5767 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5768 if ( FAILED( result ) ) {
5769 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5770 errorText_ = errorStream_.str();
5771 error( RtAudioError::WARNING );
5776 inCaps.dwSize = sizeof( inCaps );
5777 result = input->GetCaps( &inCaps );
5778 if ( FAILED( result ) ) {
5780 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5781 errorText_ = errorStream_.str();
5782 error( RtAudioError::WARNING );
5786 // Get input channel information.
5787 info.inputChannels = inCaps.dwChannels;
5789 // Get sample rate and format information.
5790 std::vector<unsigned int> rates;
5791 if ( inCaps.dwChannels >= 2 ) {
5792 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5793 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5794 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5795 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5796 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5797 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5798 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5799 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5801 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5802 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5803 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5804 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5805 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5807 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5808 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5809 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5810 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5811 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5814 else if ( inCaps.dwChannels == 1 ) {
5815 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5816 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5817 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5818 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5819 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5820 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5821 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5822 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5824 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5825 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5826 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5827 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5828 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5830 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5831 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5832 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5833 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5834 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5837 else info.inputChannels = 0; // technically, this would be an error
5841 if ( info.inputChannels == 0 ) return info;
5843 // Copy the supported rates to the info structure but avoid duplication.
5845 for ( unsigned int i=0; i<rates.size(); i++ ) {
5847 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5848 if ( rates[i] == info.sampleRates[j] ) {
5853 if ( found == false ) info.sampleRates.push_back( rates[i] );
5855 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5857 // If device opens for both playback and capture, we determine the channels.
5858 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5859 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5861 if ( device == 0 ) info.isDefaultInput = true;
5863 // Copy name and return.
5864 info.name = dsDevices[ device ].name;
5869 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5870 unsigned int firstChannel, unsigned int sampleRate,
5871 RtAudioFormat format, unsigned int *bufferSize,
5872 RtAudio::StreamOptions *options )
5874 if ( channels + firstChannel > 2 ) {
5875 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5879 size_t nDevices = dsDevices.size();
5880 if ( nDevices == 0 ) {
5881 // This should not happen because a check is made before this function is called.
5882 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5886 if ( device >= nDevices ) {
5887 // This should not happen because a check is made before this function is called.
5888 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5892 if ( mode == OUTPUT ) {
5893 if ( dsDevices[ device ].validId[0] == false ) {
5894 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5895 errorText_ = errorStream_.str();
5899 else { // mode == INPUT
5900 if ( dsDevices[ device ].validId[1] == false ) {
5901 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5902 errorText_ = errorStream_.str();
5907 // According to a note in PortAudio, using GetDesktopWindow()
5908 // instead of GetForegroundWindow() is supposed to avoid problems
5909 // that occur when the application's window is not the foreground
5910 // window. Also, if the application window closes before the
5911 // DirectSound buffer, DirectSound can crash. In the past, I had
5912 // problems when using GetDesktopWindow() but it seems fine now
5913 // (January 2010). I'll leave it commented here.
5914 // HWND hWnd = GetForegroundWindow();
5915 HWND hWnd = GetDesktopWindow();
5917 // Check the numberOfBuffers parameter and limit the lowest value to
5918 // two. This is a judgement call and a value of two is probably too
5919 // low for capture, but it should work for playback.
5921 if ( options ) nBuffers = options->numberOfBuffers;
5922 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5923 if ( nBuffers < 2 ) nBuffers = 3;
5925 // Check the lower range of the user-specified buffer size and set
5926 // (arbitrarily) to a lower bound of 32.
5927 if ( *bufferSize < 32 ) *bufferSize = 32;
5929 // Create the wave format structure. The data format setting will
5930 // be determined later.
5931 WAVEFORMATEX waveFormat;
5932 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5933 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5934 waveFormat.nChannels = channels + firstChannel;
5935 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5937 // Determine the device buffer size. By default, we'll use the value
5938 // defined above (32K), but we will grow it to make allowances for
5939 // very large software buffer sizes.
5940 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5941 DWORD dsPointerLeadTime = 0;
5943 void *ohandle = 0, *bhandle = 0;
5945 if ( mode == OUTPUT ) {
5947 LPDIRECTSOUND output;
5948 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5949 if ( FAILED( result ) ) {
5950 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5951 errorText_ = errorStream_.str();
5956 outCaps.dwSize = sizeof( outCaps );
5957 result = output->GetCaps( &outCaps );
5958 if ( FAILED( result ) ) {
5960 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5961 errorText_ = errorStream_.str();
5965 // Check channel information.
5966 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5967 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5968 errorText_ = errorStream_.str();
5972 // Check format information. Use 16-bit format unless not
5973 // supported or user requests 8-bit.
5974 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5975 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5976 waveFormat.wBitsPerSample = 16;
5977 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5980 waveFormat.wBitsPerSample = 8;
5981 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5983 stream_.userFormat = format;
5985 // Update wave format structure and buffer information.
5986 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5987 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5988 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5990 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5991 while ( dsPointerLeadTime * 2U > dsBufferSize )
5994 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5995 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5996 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5997 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5998 if ( FAILED( result ) ) {
6000 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
6001 errorText_ = errorStream_.str();
6005 // Even though we will write to the secondary buffer, we need to
6006 // access the primary buffer to set the correct output format
6007 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
6008 // buffer description.
6009 DSBUFFERDESC bufferDescription;
6010 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6011 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6012 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6014 // Obtain the primary buffer
6015 LPDIRECTSOUNDBUFFER buffer;
6016 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6017 if ( FAILED( result ) ) {
6019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6020 errorText_ = errorStream_.str();
6024 // Set the primary DS buffer sound format.
6025 result = buffer->SetFormat( &waveFormat );
6026 if ( FAILED( result ) ) {
6028 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6029 errorText_ = errorStream_.str();
6033 // Setup the secondary DS buffer description.
6034 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6035 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6036 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6037 DSBCAPS_GLOBALFOCUS |
6038 DSBCAPS_GETCURRENTPOSITION2 |
6039 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6040 bufferDescription.dwBufferBytes = dsBufferSize;
6041 bufferDescription.lpwfxFormat = &waveFormat;
6043 // Try to create the secondary DS buffer. If that doesn't work,
6044 // try to use software mixing. Otherwise, there's a problem.
6045 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6046 if ( FAILED( result ) ) {
6047 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6048 DSBCAPS_GLOBALFOCUS |
6049 DSBCAPS_GETCURRENTPOSITION2 |
6050 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6051 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6052 if ( FAILED( result ) ) {
6054 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6055 errorText_ = errorStream_.str();
6060 // Get the buffer size ... might be different from what we specified.
6062 dsbcaps.dwSize = sizeof( DSBCAPS );
6063 result = buffer->GetCaps( &dsbcaps );
6064 if ( FAILED( result ) ) {
6067 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6068 errorText_ = errorStream_.str();
6072 dsBufferSize = dsbcaps.dwBufferBytes;
6074 // Lock the DS buffer
6077 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6078 if ( FAILED( result ) ) {
6081 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6082 errorText_ = errorStream_.str();
6086 // Zero the DS buffer
6087 ZeroMemory( audioPtr, dataLen );
6089 // Unlock the DS buffer
6090 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6091 if ( FAILED( result ) ) {
6094 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6095 errorText_ = errorStream_.str();
6099 ohandle = (void *) output;
6100 bhandle = (void *) buffer;
6103 if ( mode == INPUT ) {
6105 LPDIRECTSOUNDCAPTURE input;
6106 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6107 if ( FAILED( result ) ) {
6108 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6109 errorText_ = errorStream_.str();
6114 inCaps.dwSize = sizeof( inCaps );
6115 result = input->GetCaps( &inCaps );
6116 if ( FAILED( result ) ) {
6118 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6119 errorText_ = errorStream_.str();
6123 // Check channel information.
6124 if ( inCaps.dwChannels < channels + firstChannel ) {
6125 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6129 // Check format information. Use 16-bit format unless user
6131 DWORD deviceFormats;
6132 if ( channels + firstChannel == 2 ) {
6133 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6134 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6135 waveFormat.wBitsPerSample = 8;
6136 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6138 else { // assume 16-bit is supported
6139 waveFormat.wBitsPerSample = 16;
6140 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6143 else { // channel == 1
6144 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6145 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6146 waveFormat.wBitsPerSample = 8;
6147 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6149 else { // assume 16-bit is supported
6150 waveFormat.wBitsPerSample = 16;
6151 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6154 stream_.userFormat = format;
6156 // Update wave format structure and buffer information.
6157 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6158 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6159 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6161 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6162 while ( dsPointerLeadTime * 2U > dsBufferSize )
6165 // Setup the secondary DS buffer description.
6166 DSCBUFFERDESC bufferDescription;
6167 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6168 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6169 bufferDescription.dwFlags = 0;
6170 bufferDescription.dwReserved = 0;
6171 bufferDescription.dwBufferBytes = dsBufferSize;
6172 bufferDescription.lpwfxFormat = &waveFormat;
6174 // Create the capture buffer.
6175 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6176 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6177 if ( FAILED( result ) ) {
6179 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6180 errorText_ = errorStream_.str();
6184 // Get the buffer size ... might be different from what we specified.
6186 dscbcaps.dwSize = sizeof( DSCBCAPS );
6187 result = buffer->GetCaps( &dscbcaps );
6188 if ( FAILED( result ) ) {
6191 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6192 errorText_ = errorStream_.str();
6196 dsBufferSize = dscbcaps.dwBufferBytes;
6198 // NOTE: We could have a problem here if this is a duplex stream
6199 // and the play and capture hardware buffer sizes are different
6200 // (I'm actually not sure if that is a problem or not).
6201 // Currently, we are not verifying that.
6203 // Lock the capture buffer
6206 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6207 if ( FAILED( result ) ) {
6210 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6211 errorText_ = errorStream_.str();
6216 ZeroMemory( audioPtr, dataLen );
6218 // Unlock the buffer
6219 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6220 if ( FAILED( result ) ) {
6223 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6224 errorText_ = errorStream_.str();
6228 ohandle = (void *) input;
6229 bhandle = (void *) buffer;
6232 // Set various stream parameters
6233 DsHandle *handle = 0;
6234 stream_.nDeviceChannels[mode] = channels + firstChannel;
6235 stream_.nUserChannels[mode] = channels;
6236 stream_.bufferSize = *bufferSize;
6237 stream_.channelOffset[mode] = firstChannel;
6238 stream_.deviceInterleaved[mode] = true;
6239 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6240 else stream_.userInterleaved = true;
6242 // Set flag for buffer conversion
6243 stream_.doConvertBuffer[mode] = false;
6244 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6245 stream_.doConvertBuffer[mode] = true;
6246 if (stream_.userFormat != stream_.deviceFormat[mode])
6247 stream_.doConvertBuffer[mode] = true;
6248 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6249 stream_.nUserChannels[mode] > 1 )
6250 stream_.doConvertBuffer[mode] = true;
6252 // Allocate necessary internal buffers
6253 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6254 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6255 if ( stream_.userBuffer[mode] == NULL ) {
6256 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6260 if ( stream_.doConvertBuffer[mode] ) {
6262 bool makeBuffer = true;
6263 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6264 if ( mode == INPUT ) {
6265 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6266 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6267 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6272 bufferBytes *= *bufferSize;
6273 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6274 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6275 if ( stream_.deviceBuffer == NULL ) {
6276 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6282 // Allocate our DsHandle structures for the stream.
6283 if ( stream_.apiHandle == 0 ) {
6285 handle = new DsHandle;
6287 catch ( std::bad_alloc& ) {
6288 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6292 // Create a manual-reset event.
6293 handle->condition = CreateEvent( NULL, // no security
6294 TRUE, // manual-reset
6295 FALSE, // non-signaled initially
6297 stream_.apiHandle = (void *) handle;
6300 handle = (DsHandle *) stream_.apiHandle;
6301 handle->id[mode] = ohandle;
6302 handle->buffer[mode] = bhandle;
6303 handle->dsBufferSize[mode] = dsBufferSize;
6304 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6306 stream_.device[mode] = device;
6307 stream_.state = STREAM_STOPPED;
6308 if ( stream_.mode == OUTPUT && mode == INPUT )
6309 // We had already set up an output stream.
6310 stream_.mode = DUPLEX;
6312 stream_.mode = mode;
6313 stream_.nBuffers = nBuffers;
6314 stream_.sampleRate = sampleRate;
6316 // Setup the buffer conversion information structure.
6317 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6319 // Setup the callback thread.
6320 if ( stream_.callbackInfo.isRunning == false ) {
6322 stream_.callbackInfo.isRunning = true;
6323 stream_.callbackInfo.object = (void *) this;
6324 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6325 &stream_.callbackInfo, 0, &threadId );
6326 if ( stream_.callbackInfo.thread == 0 ) {
6327 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6331 // Boost DS thread priority
6332 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6338 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6339 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6340 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6341 if ( buffer ) buffer->Release();
6344 if ( handle->buffer[1] ) {
6345 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6346 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6347 if ( buffer ) buffer->Release();
6350 CloseHandle( handle->condition );
6352 stream_.apiHandle = 0;
6355 for ( int i=0; i<2; i++ ) {
6356 if ( stream_.userBuffer[i] ) {
6357 free( stream_.userBuffer[i] );
6358 stream_.userBuffer[i] = 0;
6362 if ( stream_.deviceBuffer ) {
6363 free( stream_.deviceBuffer );
6364 stream_.deviceBuffer = 0;
6367 stream_.state = STREAM_CLOSED;
6371 void RtApiDs :: closeStream()
6373 if ( stream_.state == STREAM_CLOSED ) {
6374 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6375 error( RtAudioError::WARNING );
6379 // Stop the callback thread.
6380 stream_.callbackInfo.isRunning = false;
6381 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6382 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6384 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6386 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6387 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6388 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6395 if ( handle->buffer[1] ) {
6396 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6397 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6404 CloseHandle( handle->condition );
6406 stream_.apiHandle = 0;
6409 for ( int i=0; i<2; i++ ) {
6410 if ( stream_.userBuffer[i] ) {
6411 free( stream_.userBuffer[i] );
6412 stream_.userBuffer[i] = 0;
6416 if ( stream_.deviceBuffer ) {
6417 free( stream_.deviceBuffer );
6418 stream_.deviceBuffer = 0;
6421 stream_.mode = UNINITIALIZED;
6422 stream_.state = STREAM_CLOSED;
6425 void RtApiDs :: startStream()
6428 if ( stream_.state == STREAM_RUNNING ) {
6429 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6430 error( RtAudioError::WARNING );
6434 #if defined( HAVE_GETTIMEOFDAY )
6435 gettimeofday( &stream_.lastTickTimestamp, NULL );
6438 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6440 // Increase scheduler frequency on lesser windows (a side-effect of
6441 // increasing timer accuracy). On greater windows (Win2K or later),
6442 // this is already in effect.
6443 timeBeginPeriod( 1 );
6445 buffersRolling = false;
6446 duplexPrerollBytes = 0;
6448 if ( stream_.mode == DUPLEX ) {
6449 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6450 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6454 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6456 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6457 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6458 if ( FAILED( result ) ) {
6459 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6460 errorText_ = errorStream_.str();
6465 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6467 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6468 result = buffer->Start( DSCBSTART_LOOPING );
6469 if ( FAILED( result ) ) {
6470 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6471 errorText_ = errorStream_.str();
6476 handle->drainCounter = 0;
6477 handle->internalDrain = false;
6478 ResetEvent( handle->condition );
6479 stream_.state = STREAM_RUNNING;
6482 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6485 void RtApiDs :: stopStream()
6488 if ( stream_.state == STREAM_STOPPED ) {
6489 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6490 error( RtAudioError::WARNING );
6497 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6498 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6499 if ( handle->drainCounter == 0 ) {
6500 handle->drainCounter = 2;
6501 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6504 stream_.state = STREAM_STOPPED;
6506 MUTEX_LOCK( &stream_.mutex );
6508 // Stop the buffer and clear memory
6509 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6510 result = buffer->Stop();
6511 if ( FAILED( result ) ) {
6512 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6513 errorText_ = errorStream_.str();
6517 // Lock the buffer and clear it so that if we start to play again,
6518 // we won't have old data playing.
6519 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6520 if ( FAILED( result ) ) {
6521 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6522 errorText_ = errorStream_.str();
6526 // Zero the DS buffer
6527 ZeroMemory( audioPtr, dataLen );
6529 // Unlock the DS buffer
6530 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6531 if ( FAILED( result ) ) {
6532 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6533 errorText_ = errorStream_.str();
6537 // If we start playing again, we must begin at beginning of buffer.
6538 handle->bufferPointer[0] = 0;
6541 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6542 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6546 stream_.state = STREAM_STOPPED;
6548 if ( stream_.mode != DUPLEX )
6549 MUTEX_LOCK( &stream_.mutex );
6551 result = buffer->Stop();
6552 if ( FAILED( result ) ) {
6553 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6554 errorText_ = errorStream_.str();
6558 // Lock the buffer and clear it so that if we start to play again,
6559 // we won't have old data playing.
6560 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6561 if ( FAILED( result ) ) {
6562 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6563 errorText_ = errorStream_.str();
6567 // Zero the DS buffer
6568 ZeroMemory( audioPtr, dataLen );
6570 // Unlock the DS buffer
6571 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6572 if ( FAILED( result ) ) {
6573 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6574 errorText_ = errorStream_.str();
6578 // If we start recording again, we must begin at beginning of buffer.
6579 handle->bufferPointer[1] = 0;
6583 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6584 MUTEX_UNLOCK( &stream_.mutex );
6586 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6589 void RtApiDs :: abortStream()
6592 if ( stream_.state == STREAM_STOPPED ) {
6593 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6594 error( RtAudioError::WARNING );
6598 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6599 handle->drainCounter = 2;
6604 void RtApiDs :: callbackEvent()
6606 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6607 Sleep( 50 ); // sleep 50 milliseconds
6611 if ( stream_.state == STREAM_CLOSED ) {
6612 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6613 error( RtAudioError::WARNING );
6617 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6618 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6620 // Check if we were draining the stream and signal is finished.
6621 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6623 stream_.state = STREAM_STOPPING;
6624 if ( handle->internalDrain == false )
6625 SetEvent( handle->condition );
6631 // Invoke user callback to get fresh output data UNLESS we are
6633 if ( handle->drainCounter == 0 ) {
6634 RtAudioCallback callback = (RtAudioCallback) info->callback;
6635 double streamTime = getStreamTime();
6636 RtAudioStreamStatus status = 0;
6637 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6638 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6639 handle->xrun[0] = false;
6641 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6642 status |= RTAUDIO_INPUT_OVERFLOW;
6643 handle->xrun[1] = false;
6645 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6646 stream_.bufferSize, streamTime, status, info->userData );
6647 if ( cbReturnValue == 2 ) {
6648 stream_.state = STREAM_STOPPING;
6649 handle->drainCounter = 2;
6653 else if ( cbReturnValue == 1 ) {
6654 handle->drainCounter = 1;
6655 handle->internalDrain = true;
6660 DWORD currentWritePointer, safeWritePointer;
6661 DWORD currentReadPointer, safeReadPointer;
6662 UINT nextWritePointer;
6664 LPVOID buffer1 = NULL;
6665 LPVOID buffer2 = NULL;
6666 DWORD bufferSize1 = 0;
6667 DWORD bufferSize2 = 0;
6672 MUTEX_LOCK( &stream_.mutex );
6673 if ( stream_.state == STREAM_STOPPED ) {
6674 MUTEX_UNLOCK( &stream_.mutex );
6678 if ( buffersRolling == false ) {
6679 if ( stream_.mode == DUPLEX ) {
6680 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6682 // It takes a while for the devices to get rolling. As a result,
6683 // there's no guarantee that the capture and write device pointers
6684 // will move in lockstep. Wait here for both devices to start
6685 // rolling, and then set our buffer pointers accordingly.
6686 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6687 // bytes later than the write buffer.
6689 // Stub: a serious risk of having a pre-emptive scheduling round
6690 // take place between the two GetCurrentPosition calls... but I'm
6691 // really not sure how to solve the problem. Temporarily boost to
6692 // Realtime priority, maybe; but I'm not sure what priority the
6693 // DirectSound service threads run at. We *should* be roughly
6694 // within a ms or so of correct.
6696 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6697 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6699 DWORD startSafeWritePointer, startSafeReadPointer;
6701 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6702 if ( FAILED( result ) ) {
6703 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6704 errorText_ = errorStream_.str();
6705 MUTEX_UNLOCK( &stream_.mutex );
6706 error( RtAudioError::SYSTEM_ERROR );
6709 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6710 if ( FAILED( result ) ) {
6711 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6712 errorText_ = errorStream_.str();
6713 MUTEX_UNLOCK( &stream_.mutex );
6714 error( RtAudioError::SYSTEM_ERROR );
6718 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6719 if ( FAILED( result ) ) {
6720 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6721 errorText_ = errorStream_.str();
6722 MUTEX_UNLOCK( &stream_.mutex );
6723 error( RtAudioError::SYSTEM_ERROR );
6726 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6727 if ( FAILED( result ) ) {
6728 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6729 errorText_ = errorStream_.str();
6730 MUTEX_UNLOCK( &stream_.mutex );
6731 error( RtAudioError::SYSTEM_ERROR );
6734 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6738 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6740 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6741 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6742 handle->bufferPointer[1] = safeReadPointer;
6744 else if ( stream_.mode == OUTPUT ) {
6746 // Set the proper nextWritePosition after initial startup.
6747 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6748 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6749 if ( FAILED( result ) ) {
6750 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6751 errorText_ = errorStream_.str();
6752 MUTEX_UNLOCK( &stream_.mutex );
6753 error( RtAudioError::SYSTEM_ERROR );
6756 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6757 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6760 buffersRolling = true;
6763 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6765 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6767 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6768 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6769 bufferBytes *= formatBytes( stream_.userFormat );
6770 memset( stream_.userBuffer[0], 0, bufferBytes );
6773 // Setup parameters and do buffer conversion if necessary.
6774 if ( stream_.doConvertBuffer[0] ) {
6775 buffer = stream_.deviceBuffer;
6776 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6777 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6778 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6781 buffer = stream_.userBuffer[0];
6782 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6783 bufferBytes *= formatBytes( stream_.userFormat );
6786 // No byte swapping necessary in DirectSound implementation.
6788 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6789 // unsigned. So, we need to convert our signed 8-bit data here to
6791 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6792 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6794 DWORD dsBufferSize = handle->dsBufferSize[0];
6795 nextWritePointer = handle->bufferPointer[0];
6797 DWORD endWrite, leadPointer;
6799 // Find out where the read and "safe write" pointers are.
6800 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6801 if ( FAILED( result ) ) {
6802 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6803 errorText_ = errorStream_.str();
6804 MUTEX_UNLOCK( &stream_.mutex );
6805 error( RtAudioError::SYSTEM_ERROR );
6809 // We will copy our output buffer into the region between
6810 // safeWritePointer and leadPointer. If leadPointer is not
6811 // beyond the next endWrite position, wait until it is.
6812 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6813 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6814 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6815 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6816 endWrite = nextWritePointer + bufferBytes;
6818 // Check whether the entire write region is behind the play pointer.
6819 if ( leadPointer >= endWrite ) break;
6821 // If we are here, then we must wait until the leadPointer advances
6822 // beyond the end of our next write region. We use the
6823 // Sleep() function to suspend operation until that happens.
6824 double millis = ( endWrite - leadPointer ) * 1000.0;
6825 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6826 if ( millis < 1.0 ) millis = 1.0;
6827 Sleep( (DWORD) millis );
6830 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6831 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6832 // We've strayed into the forbidden zone ... resync the read pointer.
6833 handle->xrun[0] = true;
6834 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6835 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6836 handle->bufferPointer[0] = nextWritePointer;
6837 endWrite = nextWritePointer + bufferBytes;
6840 // Lock free space in the buffer
6841 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6842 &bufferSize1, &buffer2, &bufferSize2, 0 );
6843 if ( FAILED( result ) ) {
6844 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6845 errorText_ = errorStream_.str();
6846 MUTEX_UNLOCK( &stream_.mutex );
6847 error( RtAudioError::SYSTEM_ERROR );
6851 // Copy our buffer into the DS buffer
6852 CopyMemory( buffer1, buffer, bufferSize1 );
6853 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6855 // Update our buffer offset and unlock sound buffer
6856 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6857 if ( FAILED( result ) ) {
6858 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6859 errorText_ = errorStream_.str();
6860 MUTEX_UNLOCK( &stream_.mutex );
6861 error( RtAudioError::SYSTEM_ERROR );
6864 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6865 handle->bufferPointer[0] = nextWritePointer;
6868 // Don't bother draining input
6869 if ( handle->drainCounter ) {
6870 handle->drainCounter++;
6874 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6876 // Setup parameters.
6877 if ( stream_.doConvertBuffer[1] ) {
6878 buffer = stream_.deviceBuffer;
6879 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6880 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6883 buffer = stream_.userBuffer[1];
6884 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6885 bufferBytes *= formatBytes( stream_.userFormat );
6888 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6889 long nextReadPointer = handle->bufferPointer[1];
6890 DWORD dsBufferSize = handle->dsBufferSize[1];
6892 // Find out where the write and "safe read" pointers are.
6893 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6894 if ( FAILED( result ) ) {
6895 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6896 errorText_ = errorStream_.str();
6897 MUTEX_UNLOCK( &stream_.mutex );
6898 error( RtAudioError::SYSTEM_ERROR );
6902 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6903 DWORD endRead = nextReadPointer + bufferBytes;
6905 // Handling depends on whether we are INPUT or DUPLEX.
6906 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6907 // then a wait here will drag the write pointers into the forbidden zone.
6909 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6910 // it's in a safe position. This causes dropouts, but it seems to be the only
6911 // practical way to sync up the read and write pointers reliably, given the
6912 // the very complex relationship between phase and increment of the read and write
6915 // In order to minimize audible dropouts in DUPLEX mode, we will
6916 // provide a pre-roll period of 0.5 seconds in which we return
6917 // zeros from the read buffer while the pointers sync up.
6919 if ( stream_.mode == DUPLEX ) {
6920 if ( safeReadPointer < endRead ) {
6921 if ( duplexPrerollBytes <= 0 ) {
6922 // Pre-roll time over. Be more agressive.
6923 int adjustment = endRead-safeReadPointer;
6925 handle->xrun[1] = true;
6927 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6928 // and perform fine adjustments later.
6929 // - small adjustments: back off by twice as much.
6930 if ( adjustment >= 2*bufferBytes )
6931 nextReadPointer = safeReadPointer-2*bufferBytes;
6933 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6935 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6939 // In pre=roll time. Just do it.
6940 nextReadPointer = safeReadPointer - bufferBytes;
6941 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6943 endRead = nextReadPointer + bufferBytes;
6946 else { // mode == INPUT
6947 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6948 // See comments for playback.
6949 double millis = (endRead - safeReadPointer) * 1000.0;
6950 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6951 if ( millis < 1.0 ) millis = 1.0;
6952 Sleep( (DWORD) millis );
6954 // Wake up and find out where we are now.
6955 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6956 if ( FAILED( result ) ) {
6957 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6958 errorText_ = errorStream_.str();
6959 MUTEX_UNLOCK( &stream_.mutex );
6960 error( RtAudioError::SYSTEM_ERROR );
6964 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6968 // Lock free space in the buffer
6969 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6970 &bufferSize1, &buffer2, &bufferSize2, 0 );
6971 if ( FAILED( result ) ) {
6972 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6973 errorText_ = errorStream_.str();
6974 MUTEX_UNLOCK( &stream_.mutex );
6975 error( RtAudioError::SYSTEM_ERROR );
6979 if ( duplexPrerollBytes <= 0 ) {
6980 // Copy our buffer into the DS buffer
6981 CopyMemory( buffer, buffer1, bufferSize1 );
6982 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6985 memset( buffer, 0, bufferSize1 );
6986 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6987 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6990 // Update our buffer offset and unlock sound buffer
6991 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6992 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6993 if ( FAILED( result ) ) {
6994 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6995 errorText_ = errorStream_.str();
6996 MUTEX_UNLOCK( &stream_.mutex );
6997 error( RtAudioError::SYSTEM_ERROR );
7000 handle->bufferPointer[1] = nextReadPointer;
7002 // No byte swapping necessary in DirectSound implementation.
7004 // If necessary, convert 8-bit data from unsigned to signed.
7005 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
7006 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
7008 // Do buffer conversion if necessary.
7009 if ( stream_.doConvertBuffer[1] )
7010 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7014 MUTEX_UNLOCK( &stream_.mutex );
7015 RtApi::tickStreamTime();
7018 // Definitions for utility functions and callbacks
7019 // specific to the DirectSound implementation.
7021 static unsigned __stdcall callbackHandler( void *ptr )
7023 CallbackInfo *info = (CallbackInfo *) ptr;
7024 RtApiDs *object = (RtApiDs *) info->object;
7025 bool* isRunning = &info->isRunning;
7027 while ( *isRunning == true ) {
7028 object->callbackEvent();
7035 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7036 LPCTSTR description,
7040 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7041 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7044 bool validDevice = false;
7045 if ( probeInfo.isInput == true ) {
7047 LPDIRECTSOUNDCAPTURE object;
7049 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7050 if ( hr != DS_OK ) return TRUE;
7052 caps.dwSize = sizeof(caps);
7053 hr = object->GetCaps( &caps );
7054 if ( hr == DS_OK ) {
7055 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7062 LPDIRECTSOUND object;
7063 hr = DirectSoundCreate( lpguid, &object, NULL );
7064 if ( hr != DS_OK ) return TRUE;
7066 caps.dwSize = sizeof(caps);
7067 hr = object->GetCaps( &caps );
7068 if ( hr == DS_OK ) {
7069 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7075 // If good device, then save its name and guid.
7076 std::string name = convertCharPointerToStdString( description );
7077 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7078 if ( lpguid == NULL )
7079 name = "Default Device";
7080 if ( validDevice ) {
7081 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7082 if ( dsDevices[i].name == name ) {
7083 dsDevices[i].found = true;
7084 if ( probeInfo.isInput ) {
7085 dsDevices[i].id[1] = lpguid;
7086 dsDevices[i].validId[1] = true;
7089 dsDevices[i].id[0] = lpguid;
7090 dsDevices[i].validId[0] = true;
7098 device.found = true;
7099 if ( probeInfo.isInput ) {
7100 device.id[1] = lpguid;
7101 device.validId[1] = true;
7104 device.id[0] = lpguid;
7105 device.validId[0] = true;
7107 dsDevices.push_back( device );
7113 static const char* getErrorString( int code )
7117 case DSERR_ALLOCATED:
7118 return "Already allocated";
7120 case DSERR_CONTROLUNAVAIL:
7121 return "Control unavailable";
7123 case DSERR_INVALIDPARAM:
7124 return "Invalid parameter";
7126 case DSERR_INVALIDCALL:
7127 return "Invalid call";
7130 return "Generic error";
7132 case DSERR_PRIOLEVELNEEDED:
7133 return "Priority level needed";
7135 case DSERR_OUTOFMEMORY:
7136 return "Out of memory";
7138 case DSERR_BADFORMAT:
7139 return "The sample rate or the channel format is not supported";
7141 case DSERR_UNSUPPORTED:
7142 return "Not supported";
7144 case DSERR_NODRIVER:
7147 case DSERR_ALREADYINITIALIZED:
7148 return "Already initialized";
7150 case DSERR_NOAGGREGATION:
7151 return "No aggregation";
7153 case DSERR_BUFFERLOST:
7154 return "Buffer lost";
7156 case DSERR_OTHERAPPHASPRIO:
7157 return "Another application already has priority";
7159 case DSERR_UNINITIALIZED:
7160 return "Uninitialized";
7163 return "DirectSound unknown error";
7166 //******************** End of __WINDOWS_DS__ *********************//
7170 #if defined(__LINUX_ALSA__)
7172 #include <alsa/asoundlib.h>
7175 // A structure to hold various information related to the ALSA API
7178 snd_pcm_t *handles[2];
7181 pthread_cond_t runnable_cv;
7185 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7188 static void *alsaCallbackHandler( void * ptr );
7190 RtApiAlsa :: RtApiAlsa()
7192 // Nothing to do here.
7195 RtApiAlsa :: ~RtApiAlsa()
7197 if ( stream_.state != STREAM_CLOSED ) closeStream();
7200 unsigned int RtApiAlsa :: getDeviceCount( void )
7202 unsigned nDevices = 0;
7203 int result, subdevice, card;
7205 snd_ctl_t *handle = 0;
7207 // Count cards and devices
7209 snd_card_next( &card );
7210 while ( card >= 0 ) {
7211 sprintf( name, "hw:%d", card );
7212 result = snd_ctl_open( &handle, name, 0 );
7215 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7216 errorText_ = errorStream_.str();
7217 error( RtAudioError::WARNING );
7222 result = snd_ctl_pcm_next_device( handle, &subdevice );
7224 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7225 errorText_ = errorStream_.str();
7226 error( RtAudioError::WARNING );
7229 if ( subdevice < 0 )
7235 snd_ctl_close( handle );
7236 snd_card_next( &card );
7239 result = snd_ctl_open( &handle, "default", 0 );
7242 snd_ctl_close( handle );
7248 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7250 RtAudio::DeviceInfo info;
7251 info.probed = false;
7253 unsigned nDevices = 0;
7254 int result, subdevice, card;
7256 snd_ctl_t *chandle = 0;
7258 // Count cards and devices
7261 snd_card_next( &card );
7262 while ( card >= 0 ) {
7263 sprintf( name, "hw:%d", card );
7264 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7267 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7268 errorText_ = errorStream_.str();
7269 error( RtAudioError::WARNING );
7274 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7276 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7277 errorText_ = errorStream_.str();
7278 error( RtAudioError::WARNING );
7281 if ( subdevice < 0 ) break;
7282 if ( nDevices == device ) {
7283 sprintf( name, "hw:%d,%d", card, subdevice );
7290 snd_ctl_close( chandle );
7291 snd_card_next( &card );
7294 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7295 if ( result == 0 ) {
7296 if ( nDevices == device ) {
7297 strcpy( name, "default" );
7303 if ( nDevices == 0 ) {
7304 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7305 error( RtAudioError::INVALID_USE );
7309 if ( device >= nDevices ) {
7310 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7311 error( RtAudioError::INVALID_USE );
7317 // If a stream is already open, we cannot probe the stream devices.
7318 // Thus, use the saved results.
7319 if ( stream_.state != STREAM_CLOSED &&
7320 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7321 snd_ctl_close( chandle );
7322 if ( device >= devices_.size() ) {
7323 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7324 error( RtAudioError::WARNING );
7327 return devices_[ device ];
7330 int openMode = SND_PCM_ASYNC;
7331 snd_pcm_stream_t stream;
7332 snd_pcm_info_t *pcminfo;
7333 snd_pcm_info_alloca( &pcminfo );
7335 snd_pcm_hw_params_t *params;
7336 snd_pcm_hw_params_alloca( ¶ms );
7338 // First try for playback unless default device (which has subdev -1)
7339 stream = SND_PCM_STREAM_PLAYBACK;
7340 snd_pcm_info_set_stream( pcminfo, stream );
7341 if ( subdevice != -1 ) {
7342 snd_pcm_info_set_device( pcminfo, subdevice );
7343 snd_pcm_info_set_subdevice( pcminfo, 0 );
7345 result = snd_ctl_pcm_info( chandle, pcminfo );
7347 // Device probably doesn't support playback.
7352 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7354 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7355 errorText_ = errorStream_.str();
7356 error( RtAudioError::WARNING );
7360 // The device is open ... fill the parameter structure.
7361 result = snd_pcm_hw_params_any( phandle, params );
7363 snd_pcm_close( phandle );
7364 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7365 errorText_ = errorStream_.str();
7366 error( RtAudioError::WARNING );
7370 // Get output channel information.
7372 result = snd_pcm_hw_params_get_channels_max( params, &value );
7374 snd_pcm_close( phandle );
7375 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7376 errorText_ = errorStream_.str();
7377 error( RtAudioError::WARNING );
7380 info.outputChannels = value;
7381 snd_pcm_close( phandle );
7384 stream = SND_PCM_STREAM_CAPTURE;
7385 snd_pcm_info_set_stream( pcminfo, stream );
7387 // Now try for capture unless default device (with subdev = -1)
7388 if ( subdevice != -1 ) {
7389 result = snd_ctl_pcm_info( chandle, pcminfo );
7390 snd_ctl_close( chandle );
7392 // Device probably doesn't support capture.
7393 if ( info.outputChannels == 0 ) return info;
7394 goto probeParameters;
7398 snd_ctl_close( chandle );
7400 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7402 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7403 errorText_ = errorStream_.str();
7404 error( RtAudioError::WARNING );
7405 if ( info.outputChannels == 0 ) return info;
7406 goto probeParameters;
7409 // The device is open ... fill the parameter structure.
7410 result = snd_pcm_hw_params_any( phandle, params );
7412 snd_pcm_close( phandle );
7413 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7414 errorText_ = errorStream_.str();
7415 error( RtAudioError::WARNING );
7416 if ( info.outputChannels == 0 ) return info;
7417 goto probeParameters;
7420 result = snd_pcm_hw_params_get_channels_max( params, &value );
7422 snd_pcm_close( phandle );
7423 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7424 errorText_ = errorStream_.str();
7425 error( RtAudioError::WARNING );
7426 if ( info.outputChannels == 0 ) return info;
7427 goto probeParameters;
7429 info.inputChannels = value;
7430 snd_pcm_close( phandle );
7432 // If device opens for both playback and capture, we determine the channels.
7433 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7434 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7436 // ALSA doesn't provide default devices so we'll use the first available one.
7437 if ( device == 0 && info.outputChannels > 0 )
7438 info.isDefaultOutput = true;
7439 if ( device == 0 && info.inputChannels > 0 )
7440 info.isDefaultInput = true;
7443 // At this point, we just need to figure out the supported data
7444 // formats and sample rates. We'll proceed by opening the device in
7445 // the direction with the maximum number of channels, or playback if
7446 // they are equal. This might limit our sample rate options, but so
7449 if ( info.outputChannels >= info.inputChannels )
7450 stream = SND_PCM_STREAM_PLAYBACK;
7452 stream = SND_PCM_STREAM_CAPTURE;
7453 snd_pcm_info_set_stream( pcminfo, stream );
7455 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7457 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7458 errorText_ = errorStream_.str();
7459 error( RtAudioError::WARNING );
7463 // The device is open ... fill the parameter structure.
7464 result = snd_pcm_hw_params_any( phandle, params );
7466 snd_pcm_close( phandle );
7467 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7468 errorText_ = errorStream_.str();
7469 error( RtAudioError::WARNING );
7473 // Test our discrete set of sample rate values.
7474 info.sampleRates.clear();
7475 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7476 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7477 info.sampleRates.push_back( SAMPLE_RATES[i] );
7479 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7480 info.preferredSampleRate = SAMPLE_RATES[i];
7483 if ( info.sampleRates.size() == 0 ) {
7484 snd_pcm_close( phandle );
7485 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7486 errorText_ = errorStream_.str();
7487 error( RtAudioError::WARNING );
7491 // Probe the supported data formats ... we don't care about endian-ness just yet
7492 snd_pcm_format_t format;
7493 info.nativeFormats = 0;
7494 format = SND_PCM_FORMAT_S8;
7495 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7496 info.nativeFormats |= RTAUDIO_SINT8;
7497 format = SND_PCM_FORMAT_S16;
7498 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7499 info.nativeFormats |= RTAUDIO_SINT16;
7500 format = SND_PCM_FORMAT_S24;
7501 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7502 info.nativeFormats |= RTAUDIO_SINT24;
7503 format = SND_PCM_FORMAT_S32;
7504 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7505 info.nativeFormats |= RTAUDIO_SINT32;
7506 format = SND_PCM_FORMAT_FLOAT;
7507 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7508 info.nativeFormats |= RTAUDIO_FLOAT32;
7509 format = SND_PCM_FORMAT_FLOAT64;
7510 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7511 info.nativeFormats |= RTAUDIO_FLOAT64;
7513 // Check that we have at least one supported format
7514 if ( info.nativeFormats == 0 ) {
7515 snd_pcm_close( phandle );
7516 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7517 errorText_ = errorStream_.str();
7518 error( RtAudioError::WARNING );
7522 // Get the device name
7524 result = snd_card_get_name( card, &cardname );
7525 if ( result >= 0 ) {
7526 sprintf( name, "hw:%s,%d", cardname, subdevice );
7531 // That's all ... close the device and return
7532 snd_pcm_close( phandle );
7537 void RtApiAlsa :: saveDeviceInfo( void )
7541 unsigned int nDevices = getDeviceCount();
7542 devices_.resize( nDevices );
7543 for ( unsigned int i=0; i<nDevices; i++ )
7544 devices_[i] = getDeviceInfo( i );
7547 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7548 unsigned int firstChannel, unsigned int sampleRate,
7549 RtAudioFormat format, unsigned int *bufferSize,
7550 RtAudio::StreamOptions *options )
7553 #if defined(__RTAUDIO_DEBUG__)
7555 snd_output_stdio_attach(&out, stderr, 0);
7558 // I'm not using the "plug" interface ... too much inconsistent behavior.
7560 unsigned nDevices = 0;
7561 int result, subdevice, card;
7565 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7566 snprintf(name, sizeof(name), "%s", "default");
7568 // Count cards and devices
7570 snd_card_next( &card );
7571 while ( card >= 0 ) {
7572 sprintf( name, "hw:%d", card );
7573 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7575 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7576 errorText_ = errorStream_.str();
7581 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7582 if ( result < 0 ) break;
7583 if ( subdevice < 0 ) break;
7584 if ( nDevices == device ) {
7585 sprintf( name, "hw:%d,%d", card, subdevice );
7586 snd_ctl_close( chandle );
7591 snd_ctl_close( chandle );
7592 snd_card_next( &card );
7595 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7596 if ( result == 0 ) {
7597 if ( nDevices == device ) {
7598 strcpy( name, "default" );
7599 snd_ctl_close( chandle );
7604 snd_ctl_close( chandle );
7606 if ( nDevices == 0 ) {
7607 // This should not happen because a check is made before this function is called.
7608 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7612 if ( device >= nDevices ) {
7613 // This should not happen because a check is made before this function is called.
7614 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7621 // The getDeviceInfo() function will not work for a device that is
7622 // already open. Thus, we'll probe the system before opening a
7623 // stream and save the results for use by getDeviceInfo().
7624 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7625 this->saveDeviceInfo();
7627 snd_pcm_stream_t stream;
7628 if ( mode == OUTPUT )
7629 stream = SND_PCM_STREAM_PLAYBACK;
7631 stream = SND_PCM_STREAM_CAPTURE;
7634 int openMode = SND_PCM_ASYNC;
7635 result = snd_pcm_open( &phandle, name, stream, openMode );
7637 if ( mode == OUTPUT )
7638 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7640 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7641 errorText_ = errorStream_.str();
7645 // Fill the parameter structure.
7646 snd_pcm_hw_params_t *hw_params;
7647 snd_pcm_hw_params_alloca( &hw_params );
7648 result = snd_pcm_hw_params_any( phandle, hw_params );
7650 snd_pcm_close( phandle );
7651 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7652 errorText_ = errorStream_.str();
7656 #if defined(__RTAUDIO_DEBUG__)
7657 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7658 snd_pcm_hw_params_dump( hw_params, out );
7661 // Set access ... check user preference.
7662 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7663 stream_.userInterleaved = false;
7664 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7666 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7667 stream_.deviceInterleaved[mode] = true;
7670 stream_.deviceInterleaved[mode] = false;
7673 stream_.userInterleaved = true;
7674 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7676 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7677 stream_.deviceInterleaved[mode] = false;
7680 stream_.deviceInterleaved[mode] = true;
7684 snd_pcm_close( phandle );
7685 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7686 errorText_ = errorStream_.str();
7690 // Determine how to set the device format.
7691 stream_.userFormat = format;
7692 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7694 if ( format == RTAUDIO_SINT8 )
7695 deviceFormat = SND_PCM_FORMAT_S8;
7696 else if ( format == RTAUDIO_SINT16 )
7697 deviceFormat = SND_PCM_FORMAT_S16;
7698 else if ( format == RTAUDIO_SINT24 )
7699 deviceFormat = SND_PCM_FORMAT_S24;
7700 else if ( format == RTAUDIO_SINT32 )
7701 deviceFormat = SND_PCM_FORMAT_S32;
7702 else if ( format == RTAUDIO_FLOAT32 )
7703 deviceFormat = SND_PCM_FORMAT_FLOAT;
7704 else if ( format == RTAUDIO_FLOAT64 )
7705 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7707 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7708 stream_.deviceFormat[mode] = format;
7712 // The user requested format is not natively supported by the device.
7713 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7714 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7715 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7719 deviceFormat = SND_PCM_FORMAT_FLOAT;
7720 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7721 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7725 deviceFormat = SND_PCM_FORMAT_S32;
7726 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7727 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7731 deviceFormat = SND_PCM_FORMAT_S24;
7732 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7733 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7737 deviceFormat = SND_PCM_FORMAT_S16;
7738 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7739 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7743 deviceFormat = SND_PCM_FORMAT_S8;
7744 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7745 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7749 // If we get here, no supported format was found.
7750 snd_pcm_close( phandle );
7751 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7752 errorText_ = errorStream_.str();
7756 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7758 snd_pcm_close( phandle );
7759 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7760 errorText_ = errorStream_.str();
7764 // Determine whether byte-swaping is necessary.
7765 stream_.doByteSwap[mode] = false;
7766 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7767 result = snd_pcm_format_cpu_endian( deviceFormat );
7769 stream_.doByteSwap[mode] = true;
7770 else if (result < 0) {
7771 snd_pcm_close( phandle );
7772 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7773 errorText_ = errorStream_.str();
7778 // Set the sample rate.
7779 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7781 snd_pcm_close( phandle );
7782 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7783 errorText_ = errorStream_.str();
7787 // Determine the number of channels for this device. We support a possible
7788 // minimum device channel number > than the value requested by the user.
7789 stream_.nUserChannels[mode] = channels;
7791 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7792 unsigned int deviceChannels = value;
7793 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7794 snd_pcm_close( phandle );
7795 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7796 errorText_ = errorStream_.str();
7800 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7802 snd_pcm_close( phandle );
7803 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7804 errorText_ = errorStream_.str();
7807 deviceChannels = value;
7808 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7809 stream_.nDeviceChannels[mode] = deviceChannels;
7811 // Set the device channels.
7812 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7814 snd_pcm_close( phandle );
7815 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7816 errorText_ = errorStream_.str();
7820 // Set the buffer (or period) size.
7822 snd_pcm_uframes_t periodSize = *bufferSize;
7823 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7825 snd_pcm_close( phandle );
7826 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7827 errorText_ = errorStream_.str();
7830 *bufferSize = periodSize;
7832 // Set the buffer number, which in ALSA is referred to as the "period".
7833 unsigned int periods = 0;
7834 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7835 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7836 if ( periods < 2 ) periods = 4; // a fairly safe default value
7837 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7839 snd_pcm_close( phandle );
7840 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7841 errorText_ = errorStream_.str();
7845 // If attempting to setup a duplex stream, the bufferSize parameter
7846 // MUST be the same in both directions!
7847 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7848 snd_pcm_close( phandle );
7849 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7850 errorText_ = errorStream_.str();
7854 stream_.bufferSize = *bufferSize;
7856 // Install the hardware configuration
7857 result = snd_pcm_hw_params( phandle, hw_params );
7859 snd_pcm_close( phandle );
7860 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7861 errorText_ = errorStream_.str();
7865 #if defined(__RTAUDIO_DEBUG__)
7866 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7867 snd_pcm_hw_params_dump( hw_params, out );
7870 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7871 snd_pcm_sw_params_t *sw_params = NULL;
7872 snd_pcm_sw_params_alloca( &sw_params );
7873 snd_pcm_sw_params_current( phandle, sw_params );
7874 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7875 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7876 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7878 // The following two settings were suggested by Theo Veenker
7879 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7880 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7882 // here are two options for a fix
7883 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7884 snd_pcm_uframes_t val;
7885 snd_pcm_sw_params_get_boundary( sw_params, &val );
7886 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7888 result = snd_pcm_sw_params( phandle, sw_params );
7890 snd_pcm_close( phandle );
7891 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7892 errorText_ = errorStream_.str();
7896 #if defined(__RTAUDIO_DEBUG__)
7897 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7898 snd_pcm_sw_params_dump( sw_params, out );
7901 // Set flags for buffer conversion
7902 stream_.doConvertBuffer[mode] = false;
7903 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7904 stream_.doConvertBuffer[mode] = true;
7905 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7906 stream_.doConvertBuffer[mode] = true;
7907 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7908 stream_.nUserChannels[mode] > 1 )
7909 stream_.doConvertBuffer[mode] = true;
7911 // Allocate the ApiHandle if necessary and then save.
7912 AlsaHandle *apiInfo = 0;
7913 if ( stream_.apiHandle == 0 ) {
7915 apiInfo = (AlsaHandle *) new AlsaHandle;
7917 catch ( std::bad_alloc& ) {
7918 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7922 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7923 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7927 stream_.apiHandle = (void *) apiInfo;
7928 apiInfo->handles[0] = 0;
7929 apiInfo->handles[1] = 0;
7932 apiInfo = (AlsaHandle *) stream_.apiHandle;
7934 apiInfo->handles[mode] = phandle;
7937 // Allocate necessary internal buffers.
7938 unsigned long bufferBytes;
7939 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7940 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7941 if ( stream_.userBuffer[mode] == NULL ) {
7942 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7946 if ( stream_.doConvertBuffer[mode] ) {
7948 bool makeBuffer = true;
7949 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7950 if ( mode == INPUT ) {
7951 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7952 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7953 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7958 bufferBytes *= *bufferSize;
7959 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7960 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7961 if ( stream_.deviceBuffer == NULL ) {
7962 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7968 stream_.sampleRate = sampleRate;
7969 stream_.nBuffers = periods;
7970 stream_.device[mode] = device;
7971 stream_.state = STREAM_STOPPED;
7973 // Setup the buffer conversion information structure.
7974 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7976 // Setup thread if necessary.
7977 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7978 // We had already set up an output stream.
7979 stream_.mode = DUPLEX;
7980 // Link the streams if possible.
7981 apiInfo->synchronized = false;
7982 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7983 apiInfo->synchronized = true;
7985 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7986 error( RtAudioError::WARNING );
7990 stream_.mode = mode;
7992 // Setup callback thread.
7993 stream_.callbackInfo.object = (void *) this;
7995 // Set the thread attributes for joinable and realtime scheduling
7996 // priority (optional). The higher priority will only take affect
7997 // if the program is run as root or suid. Note, under Linux
7998 // processes with CAP_SYS_NICE privilege, a user can change
7999 // scheduling policy and priority (thus need not be root). See
8000 // POSIX "capabilities".
8001 pthread_attr_t attr;
8002 pthread_attr_init( &attr );
8003 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8004 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8005 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8006 stream_.callbackInfo.doRealtime = true;
8007 struct sched_param param;
8008 int priority = options->priority;
8009 int min = sched_get_priority_min( SCHED_RR );
8010 int max = sched_get_priority_max( SCHED_RR );
8011 if ( priority < min ) priority = min;
8012 else if ( priority > max ) priority = max;
8013 param.sched_priority = priority;
8015 // Set the policy BEFORE the priority. Otherwise it fails.
8016 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8017 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8018 // This is definitely required. Otherwise it fails.
8019 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8020 pthread_attr_setschedparam(&attr, ¶m);
8023 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8025 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8028 stream_.callbackInfo.isRunning = true;
8029 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8030 pthread_attr_destroy( &attr );
8032 // Failed. Try instead with default attributes.
8033 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8035 stream_.callbackInfo.isRunning = false;
8036 errorText_ = "RtApiAlsa::error creating callback thread!";
8046 pthread_cond_destroy( &apiInfo->runnable_cv );
8047 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8048 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8050 stream_.apiHandle = 0;
8053 if ( phandle) snd_pcm_close( phandle );
8055 for ( int i=0; i<2; i++ ) {
8056 if ( stream_.userBuffer[i] ) {
8057 free( stream_.userBuffer[i] );
8058 stream_.userBuffer[i] = 0;
8062 if ( stream_.deviceBuffer ) {
8063 free( stream_.deviceBuffer );
8064 stream_.deviceBuffer = 0;
8067 stream_.state = STREAM_CLOSED;
8071 void RtApiAlsa :: closeStream()
8073 if ( stream_.state == STREAM_CLOSED ) {
8074 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8075 error( RtAudioError::WARNING );
8079 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8080 stream_.callbackInfo.isRunning = false;
8081 MUTEX_LOCK( &stream_.mutex );
8082 if ( stream_.state == STREAM_STOPPED ) {
8083 apiInfo->runnable = true;
8084 pthread_cond_signal( &apiInfo->runnable_cv );
8086 MUTEX_UNLOCK( &stream_.mutex );
8087 pthread_join( stream_.callbackInfo.thread, NULL );
8089 if ( stream_.state == STREAM_RUNNING ) {
8090 stream_.state = STREAM_STOPPED;
8091 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8092 snd_pcm_drop( apiInfo->handles[0] );
8093 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8094 snd_pcm_drop( apiInfo->handles[1] );
8098 pthread_cond_destroy( &apiInfo->runnable_cv );
8099 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8100 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8102 stream_.apiHandle = 0;
8105 for ( int i=0; i<2; i++ ) {
8106 if ( stream_.userBuffer[i] ) {
8107 free( stream_.userBuffer[i] );
8108 stream_.userBuffer[i] = 0;
8112 if ( stream_.deviceBuffer ) {
8113 free( stream_.deviceBuffer );
8114 stream_.deviceBuffer = 0;
8117 stream_.mode = UNINITIALIZED;
8118 stream_.state = STREAM_CLOSED;
8121 void RtApiAlsa :: startStream()
8123 // This method calls snd_pcm_prepare if the device isn't already in that state.
8126 if ( stream_.state == STREAM_RUNNING ) {
8127 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8128 error( RtAudioError::WARNING );
8132 MUTEX_LOCK( &stream_.mutex );
8134 #if defined( HAVE_GETTIMEOFDAY )
8135 gettimeofday( &stream_.lastTickTimestamp, NULL );
8139 snd_pcm_state_t state;
8140 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8141 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8142 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8143 state = snd_pcm_state( handle[0] );
8144 if ( state != SND_PCM_STATE_PREPARED ) {
8145 result = snd_pcm_prepare( handle[0] );
8147 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8148 errorText_ = errorStream_.str();
8154 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8155 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8156 state = snd_pcm_state( handle[1] );
8157 if ( state != SND_PCM_STATE_PREPARED ) {
8158 result = snd_pcm_prepare( handle[1] );
8160 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8161 errorText_ = errorStream_.str();
8167 stream_.state = STREAM_RUNNING;
8170 apiInfo->runnable = true;
8171 pthread_cond_signal( &apiInfo->runnable_cv );
8172 MUTEX_UNLOCK( &stream_.mutex );
8174 if ( result >= 0 ) return;
8175 error( RtAudioError::SYSTEM_ERROR );
8178 void RtApiAlsa :: stopStream()
8181 if ( stream_.state == STREAM_STOPPED ) {
8182 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8183 error( RtAudioError::WARNING );
8187 stream_.state = STREAM_STOPPED;
8188 MUTEX_LOCK( &stream_.mutex );
8191 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8192 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8193 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8194 if ( apiInfo->synchronized )
8195 result = snd_pcm_drop( handle[0] );
8197 result = snd_pcm_drain( handle[0] );
8199 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8200 errorText_ = errorStream_.str();
8205 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8206 result = snd_pcm_drop( handle[1] );
8208 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8209 errorText_ = errorStream_.str();
8215 apiInfo->runnable = false; // fixes high CPU usage when stopped
8216 MUTEX_UNLOCK( &stream_.mutex );
8218 if ( result >= 0 ) return;
8219 error( RtAudioError::SYSTEM_ERROR );
8222 void RtApiAlsa :: abortStream()
8225 if ( stream_.state == STREAM_STOPPED ) {
8226 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8227 error( RtAudioError::WARNING );
8231 stream_.state = STREAM_STOPPED;
8232 MUTEX_LOCK( &stream_.mutex );
8235 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8236 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8237 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8238 result = snd_pcm_drop( handle[0] );
8240 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8241 errorText_ = errorStream_.str();
8246 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8247 result = snd_pcm_drop( handle[1] );
8249 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8250 errorText_ = errorStream_.str();
8256 apiInfo->runnable = false; // fixes high CPU usage when stopped
8257 MUTEX_UNLOCK( &stream_.mutex );
8259 if ( result >= 0 ) return;
8260 error( RtAudioError::SYSTEM_ERROR );
8263 void RtApiAlsa :: callbackEvent()
8265 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8266 if ( stream_.state == STREAM_STOPPED ) {
8267 MUTEX_LOCK( &stream_.mutex );
8268 while ( !apiInfo->runnable )
8269 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8271 if ( stream_.state != STREAM_RUNNING ) {
8272 MUTEX_UNLOCK( &stream_.mutex );
8275 MUTEX_UNLOCK( &stream_.mutex );
8278 if ( stream_.state == STREAM_CLOSED ) {
8279 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8280 error( RtAudioError::WARNING );
8284 int doStopStream = 0;
8285 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8286 double streamTime = getStreamTime();
8287 RtAudioStreamStatus status = 0;
8288 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8289 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8290 apiInfo->xrun[0] = false;
8292 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8293 status |= RTAUDIO_INPUT_OVERFLOW;
8294 apiInfo->xrun[1] = false;
8296 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8297 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8299 if ( doStopStream == 2 ) {
8304 MUTEX_LOCK( &stream_.mutex );
8306 // The state might change while waiting on a mutex.
8307 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8313 snd_pcm_sframes_t frames;
8314 RtAudioFormat format;
8315 handle = (snd_pcm_t **) apiInfo->handles;
8317 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8319 // Setup parameters.
8320 if ( stream_.doConvertBuffer[1] ) {
8321 buffer = stream_.deviceBuffer;
8322 channels = stream_.nDeviceChannels[1];
8323 format = stream_.deviceFormat[1];
8326 buffer = stream_.userBuffer[1];
8327 channels = stream_.nUserChannels[1];
8328 format = stream_.userFormat;
8331 // Read samples from device in interleaved/non-interleaved format.
8332 if ( stream_.deviceInterleaved[1] )
8333 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8335 void *bufs[channels];
8336 size_t offset = stream_.bufferSize * formatBytes( format );
8337 for ( int i=0; i<channels; i++ )
8338 bufs[i] = (void *) (buffer + (i * offset));
8339 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8342 if ( result < (int) stream_.bufferSize ) {
8343 // Either an error or overrun occured.
8344 if ( result == -EPIPE ) {
8345 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8346 if ( state == SND_PCM_STATE_XRUN ) {
8347 apiInfo->xrun[1] = true;
8348 result = snd_pcm_prepare( handle[1] );
8350 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8351 errorText_ = errorStream_.str();
8355 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8356 errorText_ = errorStream_.str();
8360 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8361 errorText_ = errorStream_.str();
8363 error( RtAudioError::WARNING );
8367 // Do byte swapping if necessary.
8368 if ( stream_.doByteSwap[1] )
8369 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8371 // Do buffer conversion if necessary.
8372 if ( stream_.doConvertBuffer[1] )
8373 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8375 // Check stream latency
8376 result = snd_pcm_delay( handle[1], &frames );
8377 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8382 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8384 // Setup parameters and do buffer conversion if necessary.
8385 if ( stream_.doConvertBuffer[0] ) {
8386 buffer = stream_.deviceBuffer;
8387 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8388 channels = stream_.nDeviceChannels[0];
8389 format = stream_.deviceFormat[0];
8392 buffer = stream_.userBuffer[0];
8393 channels = stream_.nUserChannels[0];
8394 format = stream_.userFormat;
8397 // Do byte swapping if necessary.
8398 if ( stream_.doByteSwap[0] )
8399 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8401 // Write samples to device in interleaved/non-interleaved format.
8402 if ( stream_.deviceInterleaved[0] )
8403 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8405 void *bufs[channels];
8406 size_t offset = stream_.bufferSize * formatBytes( format );
8407 for ( int i=0; i<channels; i++ )
8408 bufs[i] = (void *) (buffer + (i * offset));
8409 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8412 if ( result < (int) stream_.bufferSize ) {
8413 // Either an error or underrun occured.
8414 if ( result == -EPIPE ) {
8415 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8416 if ( state == SND_PCM_STATE_XRUN ) {
8417 apiInfo->xrun[0] = true;
8418 result = snd_pcm_prepare( handle[0] );
8420 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8421 errorText_ = errorStream_.str();
8424 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8427 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8428 errorText_ = errorStream_.str();
8432 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8433 errorText_ = errorStream_.str();
8435 error( RtAudioError::WARNING );
8439 // Check stream latency
8440 result = snd_pcm_delay( handle[0], &frames );
8441 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8445 MUTEX_UNLOCK( &stream_.mutex );
8447 RtApi::tickStreamTime();
8448 if ( doStopStream == 1 ) this->stopStream();
8451 static void *alsaCallbackHandler( void *ptr )
8453 CallbackInfo *info = (CallbackInfo *) ptr;
8454 RtApiAlsa *object = (RtApiAlsa *) info->object;
8455 bool *isRunning = &info->isRunning;
8457 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8458 if ( info->doRealtime ) {
8459 std::cerr << "RtAudio alsa: " <<
8460 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8461 "running realtime scheduling" << std::endl;
8465 while ( *isRunning == true ) {
8466 pthread_testcancel();
8467 object->callbackEvent();
8470 pthread_exit( NULL );
8473 //******************** End of __LINUX_ALSA__ *********************//
8476 #if defined(__LINUX_PULSE__)
8478 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8479 // and Tristan Matthews.
8481 #include <pulse/error.h>
8482 #include <pulse/simple.h>
8485 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8486 44100, 48000, 96000, 0};
8488 struct rtaudio_pa_format_mapping_t {
8489 RtAudioFormat rtaudio_format;
8490 pa_sample_format_t pa_format;
8493 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8494 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8495 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8496 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8497 {0, PA_SAMPLE_INVALID}};
8499 struct PulseAudioHandle {
8503 pthread_cond_t runnable_cv;
8505 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8508 RtApiPulse::~RtApiPulse()
8510 if ( stream_.state != STREAM_CLOSED )
8514 unsigned int RtApiPulse::getDeviceCount( void )
8519 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8521 RtAudio::DeviceInfo info;
8523 info.name = "PulseAudio";
8524 info.outputChannels = 2;
8525 info.inputChannels = 2;
8526 info.duplexChannels = 2;
8527 info.isDefaultOutput = true;
8528 info.isDefaultInput = true;
8530 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8531 info.sampleRates.push_back( *sr );
8533 info.preferredSampleRate = 48000;
8534 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8539 static void *pulseaudio_callback( void * user )
8541 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8542 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8543 volatile bool *isRunning = &cbi->isRunning;
8545 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8546 if (cbi->doRealtime) {
8547 std::cerr << "RtAudio pulse: " <<
8548 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8549 "running realtime scheduling" << std::endl;
8553 while ( *isRunning ) {
8554 pthread_testcancel();
8555 context->callbackEvent();
8558 pthread_exit( NULL );
8561 void RtApiPulse::closeStream( void )
8563 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8565 stream_.callbackInfo.isRunning = false;
8567 MUTEX_LOCK( &stream_.mutex );
8568 if ( stream_.state == STREAM_STOPPED ) {
8569 pah->runnable = true;
8570 pthread_cond_signal( &pah->runnable_cv );
8572 MUTEX_UNLOCK( &stream_.mutex );
8574 pthread_join( pah->thread, 0 );
8575 if ( pah->s_play ) {
8576 pa_simple_flush( pah->s_play, NULL );
8577 pa_simple_free( pah->s_play );
8580 pa_simple_free( pah->s_rec );
8582 pthread_cond_destroy( &pah->runnable_cv );
8584 stream_.apiHandle = 0;
8587 if ( stream_.userBuffer[0] ) {
8588 free( stream_.userBuffer[0] );
8589 stream_.userBuffer[0] = 0;
8591 if ( stream_.userBuffer[1] ) {
8592 free( stream_.userBuffer[1] );
8593 stream_.userBuffer[1] = 0;
8596 stream_.state = STREAM_CLOSED;
8597 stream_.mode = UNINITIALIZED;
8600 void RtApiPulse::callbackEvent( void )
8602 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8604 if ( stream_.state == STREAM_STOPPED ) {
8605 MUTEX_LOCK( &stream_.mutex );
8606 while ( !pah->runnable )
8607 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8609 if ( stream_.state != STREAM_RUNNING ) {
8610 MUTEX_UNLOCK( &stream_.mutex );
8613 MUTEX_UNLOCK( &stream_.mutex );
8616 if ( stream_.state == STREAM_CLOSED ) {
8617 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8618 "this shouldn't happen!";
8619 error( RtAudioError::WARNING );
8623 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8624 double streamTime = getStreamTime();
8625 RtAudioStreamStatus status = 0;
8626 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8627 stream_.bufferSize, streamTime, status,
8628 stream_.callbackInfo.userData );
8630 if ( doStopStream == 2 ) {
8635 MUTEX_LOCK( &stream_.mutex );
8636 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8637 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8639 if ( stream_.state != STREAM_RUNNING )
8644 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8645 if ( stream_.doConvertBuffer[OUTPUT] ) {
8646 convertBuffer( stream_.deviceBuffer,
8647 stream_.userBuffer[OUTPUT],
8648 stream_.convertInfo[OUTPUT] );
8649 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8650 formatBytes( stream_.deviceFormat[OUTPUT] );
8652 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8653 formatBytes( stream_.userFormat );
8655 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8656 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8657 pa_strerror( pa_error ) << ".";
8658 errorText_ = errorStream_.str();
8659 error( RtAudioError::WARNING );
8663 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8664 if ( stream_.doConvertBuffer[INPUT] )
8665 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8666 formatBytes( stream_.deviceFormat[INPUT] );
8668 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8669 formatBytes( stream_.userFormat );
8671 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8672 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8673 pa_strerror( pa_error ) << ".";
8674 errorText_ = errorStream_.str();
8675 error( RtAudioError::WARNING );
8677 if ( stream_.doConvertBuffer[INPUT] ) {
8678 convertBuffer( stream_.userBuffer[INPUT],
8679 stream_.deviceBuffer,
8680 stream_.convertInfo[INPUT] );
8685 MUTEX_UNLOCK( &stream_.mutex );
8686 RtApi::tickStreamTime();
8688 if ( doStopStream == 1 )
8692 void RtApiPulse::startStream( void )
8694 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8696 if ( stream_.state == STREAM_CLOSED ) {
8697 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8698 error( RtAudioError::INVALID_USE );
8701 if ( stream_.state == STREAM_RUNNING ) {
8702 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8703 error( RtAudioError::WARNING );
8707 MUTEX_LOCK( &stream_.mutex );
8709 #if defined( HAVE_GETTIMEOFDAY )
8710 gettimeofday( &stream_.lastTickTimestamp, NULL );
8713 stream_.state = STREAM_RUNNING;
8715 pah->runnable = true;
8716 pthread_cond_signal( &pah->runnable_cv );
8717 MUTEX_UNLOCK( &stream_.mutex );
8720 void RtApiPulse::stopStream( void )
8722 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8724 if ( stream_.state == STREAM_CLOSED ) {
8725 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8726 error( RtAudioError::INVALID_USE );
8729 if ( stream_.state == STREAM_STOPPED ) {
8730 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8731 error( RtAudioError::WARNING );
8735 stream_.state = STREAM_STOPPED;
8736 MUTEX_LOCK( &stream_.mutex );
8738 if ( pah && pah->s_play ) {
8740 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8741 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8742 pa_strerror( pa_error ) << ".";
8743 errorText_ = errorStream_.str();
8744 MUTEX_UNLOCK( &stream_.mutex );
8745 error( RtAudioError::SYSTEM_ERROR );
8750 stream_.state = STREAM_STOPPED;
8751 MUTEX_UNLOCK( &stream_.mutex );
8754 void RtApiPulse::abortStream( void )
8756 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8758 if ( stream_.state == STREAM_CLOSED ) {
8759 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8760 error( RtAudioError::INVALID_USE );
8763 if ( stream_.state == STREAM_STOPPED ) {
8764 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8765 error( RtAudioError::WARNING );
8769 stream_.state = STREAM_STOPPED;
8770 MUTEX_LOCK( &stream_.mutex );
8772 if ( pah && pah->s_play ) {
8774 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8775 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8776 pa_strerror( pa_error ) << ".";
8777 errorText_ = errorStream_.str();
8778 MUTEX_UNLOCK( &stream_.mutex );
8779 error( RtAudioError::SYSTEM_ERROR );
8784 stream_.state = STREAM_STOPPED;
8785 MUTEX_UNLOCK( &stream_.mutex );
8788 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8789 unsigned int channels, unsigned int firstChannel,
8790 unsigned int sampleRate, RtAudioFormat format,
8791 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8793 PulseAudioHandle *pah = 0;
8794 unsigned long bufferBytes = 0;
8797 if ( device != 0 ) return false;
8798 if ( mode != INPUT && mode != OUTPUT ) return false;
8799 if ( channels != 1 && channels != 2 ) {
8800 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8803 ss.channels = channels;
8805 if ( firstChannel != 0 ) return false;
8807 bool sr_found = false;
8808 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8809 if ( sampleRate == *sr ) {
8811 stream_.sampleRate = sampleRate;
8812 ss.rate = sampleRate;
8817 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8822 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8823 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8824 if ( format == sf->rtaudio_format ) {
8826 stream_.userFormat = sf->rtaudio_format;
8827 stream_.deviceFormat[mode] = stream_.userFormat;
8828 ss.format = sf->pa_format;
8832 if ( !sf_found ) { // Use internal data format conversion.
8833 stream_.userFormat = format;
8834 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8835 ss.format = PA_SAMPLE_FLOAT32LE;
8838 // Set other stream parameters.
8839 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8840 else stream_.userInterleaved = true;
8841 stream_.deviceInterleaved[mode] = true;
8842 stream_.nBuffers = 1;
8843 stream_.doByteSwap[mode] = false;
8844 stream_.nUserChannels[mode] = channels;
8845 stream_.nDeviceChannels[mode] = channels + firstChannel;
8846 stream_.channelOffset[mode] = 0;
8847 std::string streamName = "RtAudio";
8849 // Set flags for buffer conversion.
8850 stream_.doConvertBuffer[mode] = false;
8851 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8852 stream_.doConvertBuffer[mode] = true;
8853 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8854 stream_.doConvertBuffer[mode] = true;
8856 // Allocate necessary internal buffers.
8857 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8858 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8859 if ( stream_.userBuffer[mode] == NULL ) {
8860 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8863 stream_.bufferSize = *bufferSize;
8865 if ( stream_.doConvertBuffer[mode] ) {
8867 bool makeBuffer = true;
8868 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8869 if ( mode == INPUT ) {
8870 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8871 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8872 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8877 bufferBytes *= *bufferSize;
8878 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8879 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8880 if ( stream_.deviceBuffer == NULL ) {
8881 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8887 stream_.device[mode] = device;
8889 // Setup the buffer conversion information structure.
8890 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8892 if ( !stream_.apiHandle ) {
8893 PulseAudioHandle *pah = new PulseAudioHandle;
8895 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8899 stream_.apiHandle = pah;
8900 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8901 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8905 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8908 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8911 pa_buffer_attr buffer_attr;
8912 buffer_attr.fragsize = bufferBytes;
8913 buffer_attr.maxlength = -1;
8915 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8916 if ( !pah->s_rec ) {
8917 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8922 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8923 if ( !pah->s_play ) {
8924 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8932 if ( stream_.mode == UNINITIALIZED )
8933 stream_.mode = mode;
8934 else if ( stream_.mode == mode )
8937 stream_.mode = DUPLEX;
8939 if ( !stream_.callbackInfo.isRunning ) {
8940 stream_.callbackInfo.object = this;
8942 stream_.state = STREAM_STOPPED;
8943 // Set the thread attributes for joinable and realtime scheduling
8944 // priority (optional). The higher priority will only take affect
8945 // if the program is run as root or suid. Note, under Linux
8946 // processes with CAP_SYS_NICE privilege, a user can change
8947 // scheduling policy and priority (thus need not be root). See
8948 // POSIX "capabilities".
8949 pthread_attr_t attr;
8950 pthread_attr_init( &attr );
8951 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8952 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8953 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8954 stream_.callbackInfo.doRealtime = true;
8955 struct sched_param param;
8956 int priority = options->priority;
8957 int min = sched_get_priority_min( SCHED_RR );
8958 int max = sched_get_priority_max( SCHED_RR );
8959 if ( priority < min ) priority = min;
8960 else if ( priority > max ) priority = max;
8961 param.sched_priority = priority;
8963 // Set the policy BEFORE the priority. Otherwise it fails.
8964 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8965 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8966 // This is definitely required. Otherwise it fails.
8967 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8968 pthread_attr_setschedparam(&attr, ¶m);
8971 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8973 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8976 stream_.callbackInfo.isRunning = true;
8977 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8978 pthread_attr_destroy(&attr);
8980 // Failed. Try instead with default attributes.
8981 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8983 stream_.callbackInfo.isRunning = false;
8984 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8993 if ( pah && stream_.callbackInfo.isRunning ) {
8994 pthread_cond_destroy( &pah->runnable_cv );
8996 stream_.apiHandle = 0;
8999 for ( int i=0; i<2; i++ ) {
9000 if ( stream_.userBuffer[i] ) {
9001 free( stream_.userBuffer[i] );
9002 stream_.userBuffer[i] = 0;
9006 if ( stream_.deviceBuffer ) {
9007 free( stream_.deviceBuffer );
9008 stream_.deviceBuffer = 0;
9011 stream_.state = STREAM_CLOSED;
9015 //******************** End of __LINUX_PULSE__ *********************//
9018 #if defined(__LINUX_OSS__)
9021 #include <sys/ioctl.h>
9024 #include <sys/soundcard.h>
9028 static void *ossCallbackHandler(void * ptr);
9030 // A structure to hold various information related to the OSS API
9033 int id[2]; // device ids
9036 pthread_cond_t runnable;
9039 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9042 RtApiOss :: RtApiOss()
9044 // Nothing to do here.
9047 RtApiOss :: ~RtApiOss()
9049 if ( stream_.state != STREAM_CLOSED ) closeStream();
9052 unsigned int RtApiOss :: getDeviceCount( void )
9054 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9055 if ( mixerfd == -1 ) {
9056 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9057 error( RtAudioError::WARNING );
9061 oss_sysinfo sysinfo;
9062 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9064 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9065 error( RtAudioError::WARNING );
9070 return sysinfo.numaudios;
9073 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9075 RtAudio::DeviceInfo info;
9076 info.probed = false;
9078 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9079 if ( mixerfd == -1 ) {
9080 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9081 error( RtAudioError::WARNING );
9085 oss_sysinfo sysinfo;
9086 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9087 if ( result == -1 ) {
9089 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9090 error( RtAudioError::WARNING );
9094 unsigned nDevices = sysinfo.numaudios;
9095 if ( nDevices == 0 ) {
9097 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9098 error( RtAudioError::INVALID_USE );
9102 if ( device >= nDevices ) {
9104 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9105 error( RtAudioError::INVALID_USE );
9109 oss_audioinfo ainfo;
9111 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9113 if ( result == -1 ) {
9114 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9115 errorText_ = errorStream_.str();
9116 error( RtAudioError::WARNING );
9121 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9122 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9123 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9124 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9125 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9128 // Probe data formats ... do for input
9129 unsigned long mask = ainfo.iformats;
9130 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9131 info.nativeFormats |= RTAUDIO_SINT16;
9132 if ( mask & AFMT_S8 )
9133 info.nativeFormats |= RTAUDIO_SINT8;
9134 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9135 info.nativeFormats |= RTAUDIO_SINT32;
9137 if ( mask & AFMT_FLOAT )
9138 info.nativeFormats |= RTAUDIO_FLOAT32;
9140 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9141 info.nativeFormats |= RTAUDIO_SINT24;
9143 // Check that we have at least one supported format
9144 if ( info.nativeFormats == 0 ) {
9145 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9146 errorText_ = errorStream_.str();
9147 error( RtAudioError::WARNING );
9151 // Probe the supported sample rates.
9152 info.sampleRates.clear();
9153 if ( ainfo.nrates ) {
9154 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9155 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9156 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9157 info.sampleRates.push_back( SAMPLE_RATES[k] );
9159 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9160 info.preferredSampleRate = SAMPLE_RATES[k];
9168 // Check min and max rate values;
9169 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9170 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9171 info.sampleRates.push_back( SAMPLE_RATES[k] );
9173 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9174 info.preferredSampleRate = SAMPLE_RATES[k];
9179 if ( info.sampleRates.size() == 0 ) {
9180 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9181 errorText_ = errorStream_.str();
9182 error( RtAudioError::WARNING );
9186 info.name = ainfo.name;
9193 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9194 unsigned int firstChannel, unsigned int sampleRate,
9195 RtAudioFormat format, unsigned int *bufferSize,
9196 RtAudio::StreamOptions *options )
9198 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9199 if ( mixerfd == -1 ) {
9200 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9204 oss_sysinfo sysinfo;
9205 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9206 if ( result == -1 ) {
9208 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9212 unsigned nDevices = sysinfo.numaudios;
9213 if ( nDevices == 0 ) {
9214 // This should not happen because a check is made before this function is called.
9216 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9220 if ( device >= nDevices ) {
9221 // This should not happen because a check is made before this function is called.
9223 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9227 oss_audioinfo ainfo;
9229 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9231 if ( result == -1 ) {
9232 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9233 errorText_ = errorStream_.str();
9237 // Check if device supports input or output
9238 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9239 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9240 if ( mode == OUTPUT )
9241 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9243 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9244 errorText_ = errorStream_.str();
9249 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9250 if ( mode == OUTPUT )
9252 else { // mode == INPUT
9253 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9254 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9255 close( handle->id[0] );
9257 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9258 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9259 errorText_ = errorStream_.str();
9262 // Check that the number previously set channels is the same.
9263 if ( stream_.nUserChannels[0] != channels ) {
9264 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9265 errorText_ = errorStream_.str();
9274 // Set exclusive access if specified.
9275 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9277 // Try to open the device.
9279 fd = open( ainfo.devnode, flags, 0 );
9281 if ( errno == EBUSY )
9282 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9284 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9285 errorText_ = errorStream_.str();
9289 // For duplex operation, specifically set this mode (this doesn't seem to work).
9291 if ( flags | O_RDWR ) {
9292 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9293 if ( result == -1) {
9294 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9295 errorText_ = errorStream_.str();
9301 // Check the device channel support.
9302 stream_.nUserChannels[mode] = channels;
9303 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9305 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9306 errorText_ = errorStream_.str();
9310 // Set the number of channels.
9311 int deviceChannels = channels + firstChannel;
9312 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9313 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9315 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9316 errorText_ = errorStream_.str();
9319 stream_.nDeviceChannels[mode] = deviceChannels;
9321 // Get the data format mask
9323 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9324 if ( result == -1 ) {
9326 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9327 errorText_ = errorStream_.str();
9331 // Determine how to set the device format.
9332 stream_.userFormat = format;
9333 int deviceFormat = -1;
9334 stream_.doByteSwap[mode] = false;
9335 if ( format == RTAUDIO_SINT8 ) {
9336 if ( mask & AFMT_S8 ) {
9337 deviceFormat = AFMT_S8;
9338 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9341 else if ( format == RTAUDIO_SINT16 ) {
9342 if ( mask & AFMT_S16_NE ) {
9343 deviceFormat = AFMT_S16_NE;
9344 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9346 else if ( mask & AFMT_S16_OE ) {
9347 deviceFormat = AFMT_S16_OE;
9348 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9349 stream_.doByteSwap[mode] = true;
9352 else if ( format == RTAUDIO_SINT24 ) {
9353 if ( mask & AFMT_S24_NE ) {
9354 deviceFormat = AFMT_S24_NE;
9355 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9357 else if ( mask & AFMT_S24_OE ) {
9358 deviceFormat = AFMT_S24_OE;
9359 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9360 stream_.doByteSwap[mode] = true;
9363 else if ( format == RTAUDIO_SINT32 ) {
9364 if ( mask & AFMT_S32_NE ) {
9365 deviceFormat = AFMT_S32_NE;
9366 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9368 else if ( mask & AFMT_S32_OE ) {
9369 deviceFormat = AFMT_S32_OE;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9371 stream_.doByteSwap[mode] = true;
9375 if ( deviceFormat == -1 ) {
9376 // The user requested format is not natively supported by the device.
9377 if ( mask & AFMT_S16_NE ) {
9378 deviceFormat = AFMT_S16_NE;
9379 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9381 else if ( mask & AFMT_S32_NE ) {
9382 deviceFormat = AFMT_S32_NE;
9383 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9385 else if ( mask & AFMT_S24_NE ) {
9386 deviceFormat = AFMT_S24_NE;
9387 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9389 else if ( mask & AFMT_S16_OE ) {
9390 deviceFormat = AFMT_S16_OE;
9391 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9392 stream_.doByteSwap[mode] = true;
9394 else if ( mask & AFMT_S32_OE ) {
9395 deviceFormat = AFMT_S32_OE;
9396 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9397 stream_.doByteSwap[mode] = true;
9399 else if ( mask & AFMT_S24_OE ) {
9400 deviceFormat = AFMT_S24_OE;
9401 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9402 stream_.doByteSwap[mode] = true;
9404 else if ( mask & AFMT_S8) {
9405 deviceFormat = AFMT_S8;
9406 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9410 if ( stream_.deviceFormat[mode] == 0 ) {
9411 // This really shouldn't happen ...
9413 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9414 errorText_ = errorStream_.str();
9418 // Set the data format.
9419 int temp = deviceFormat;
9420 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9421 if ( result == -1 || deviceFormat != temp ) {
9423 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9424 errorText_ = errorStream_.str();
9428 // Attempt to set the buffer size. According to OSS, the minimum
9429 // number of buffers is two. The supposed minimum buffer size is 16
9430 // bytes, so that will be our lower bound. The argument to this
9431 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9432 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9433 // We'll check the actual value used near the end of the setup
9435 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9436 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9438 if ( options ) buffers = options->numberOfBuffers;
9439 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9440 if ( buffers < 2 ) buffers = 3;
9441 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9442 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9443 if ( result == -1 ) {
9445 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9446 errorText_ = errorStream_.str();
9449 stream_.nBuffers = buffers;
9451 // Save buffer size (in sample frames).
9452 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9453 stream_.bufferSize = *bufferSize;
9455 // Set the sample rate.
9456 int srate = sampleRate;
9457 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9458 if ( result == -1 ) {
9460 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9461 errorText_ = errorStream_.str();
9465 // Verify the sample rate setup worked.
9466 if ( abs( srate - (int)sampleRate ) > 100 ) {
9468 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9469 errorText_ = errorStream_.str();
9472 stream_.sampleRate = sampleRate;
9474 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9475 // We're doing duplex setup here.
9476 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9477 stream_.nDeviceChannels[0] = deviceChannels;
9480 // Set interleaving parameters.
9481 stream_.userInterleaved = true;
9482 stream_.deviceInterleaved[mode] = true;
9483 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9484 stream_.userInterleaved = false;
9486 // Set flags for buffer conversion
9487 stream_.doConvertBuffer[mode] = false;
9488 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9489 stream_.doConvertBuffer[mode] = true;
9490 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9491 stream_.doConvertBuffer[mode] = true;
9492 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9493 stream_.nUserChannels[mode] > 1 )
9494 stream_.doConvertBuffer[mode] = true;
9496 // Allocate the stream handles if necessary and then save.
9497 if ( stream_.apiHandle == 0 ) {
9499 handle = new OssHandle;
9501 catch ( std::bad_alloc& ) {
9502 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9506 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9507 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9511 stream_.apiHandle = (void *) handle;
9514 handle = (OssHandle *) stream_.apiHandle;
9516 handle->id[mode] = fd;
9518 // Allocate necessary internal buffers.
9519 unsigned long bufferBytes;
9520 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9521 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9522 if ( stream_.userBuffer[mode] == NULL ) {
9523 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9527 if ( stream_.doConvertBuffer[mode] ) {
9529 bool makeBuffer = true;
9530 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9531 if ( mode == INPUT ) {
9532 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9533 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9534 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9539 bufferBytes *= *bufferSize;
9540 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9541 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9542 if ( stream_.deviceBuffer == NULL ) {
9543 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9549 stream_.device[mode] = device;
9550 stream_.state = STREAM_STOPPED;
9552 // Setup the buffer conversion information structure.
9553 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9555 // Setup thread if necessary.
9556 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9557 // We had already set up an output stream.
9558 stream_.mode = DUPLEX;
9559 if ( stream_.device[0] == device ) handle->id[0] = fd;
9562 stream_.mode = mode;
9564 // Setup callback thread.
9565 stream_.callbackInfo.object = (void *) this;
9567 // Set the thread attributes for joinable and realtime scheduling
9568 // priority. The higher priority will only take affect if the
9569 // program is run as root or suid.
9570 pthread_attr_t attr;
9571 pthread_attr_init( &attr );
9572 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9573 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9574 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9575 stream_.callbackInfo.doRealtime = true;
9576 struct sched_param param;
9577 int priority = options->priority;
9578 int min = sched_get_priority_min( SCHED_RR );
9579 int max = sched_get_priority_max( SCHED_RR );
9580 if ( priority < min ) priority = min;
9581 else if ( priority > max ) priority = max;
9582 param.sched_priority = priority;
9584 // Set the policy BEFORE the priority. Otherwise it fails.
9585 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9586 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9587 // This is definitely required. Otherwise it fails.
9588 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9589 pthread_attr_setschedparam(&attr, ¶m);
9592 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9594 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9597 stream_.callbackInfo.isRunning = true;
9598 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9599 pthread_attr_destroy( &attr );
9601 // Failed. Try instead with default attributes.
9602 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9604 stream_.callbackInfo.isRunning = false;
9605 errorText_ = "RtApiOss::error creating callback thread!";
9615 pthread_cond_destroy( &handle->runnable );
9616 if ( handle->id[0] ) close( handle->id[0] );
9617 if ( handle->id[1] ) close( handle->id[1] );
9619 stream_.apiHandle = 0;
9622 for ( int i=0; i<2; i++ ) {
9623 if ( stream_.userBuffer[i] ) {
9624 free( stream_.userBuffer[i] );
9625 stream_.userBuffer[i] = 0;
9629 if ( stream_.deviceBuffer ) {
9630 free( stream_.deviceBuffer );
9631 stream_.deviceBuffer = 0;
9634 stream_.state = STREAM_CLOSED;
9638 void RtApiOss :: closeStream()
9640 if ( stream_.state == STREAM_CLOSED ) {
9641 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9642 error( RtAudioError::WARNING );
9646 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9647 stream_.callbackInfo.isRunning = false;
9648 MUTEX_LOCK( &stream_.mutex );
9649 if ( stream_.state == STREAM_STOPPED )
9650 pthread_cond_signal( &handle->runnable );
9651 MUTEX_UNLOCK( &stream_.mutex );
9652 pthread_join( stream_.callbackInfo.thread, NULL );
9654 if ( stream_.state == STREAM_RUNNING ) {
9655 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9656 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9658 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9659 stream_.state = STREAM_STOPPED;
9663 pthread_cond_destroy( &handle->runnable );
9664 if ( handle->id[0] ) close( handle->id[0] );
9665 if ( handle->id[1] ) close( handle->id[1] );
9667 stream_.apiHandle = 0;
9670 for ( int i=0; i<2; i++ ) {
9671 if ( stream_.userBuffer[i] ) {
9672 free( stream_.userBuffer[i] );
9673 stream_.userBuffer[i] = 0;
9677 if ( stream_.deviceBuffer ) {
9678 free( stream_.deviceBuffer );
9679 stream_.deviceBuffer = 0;
9682 stream_.mode = UNINITIALIZED;
9683 stream_.state = STREAM_CLOSED;
9686 void RtApiOss :: startStream()
9689 if ( stream_.state == STREAM_RUNNING ) {
9690 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9691 error( RtAudioError::WARNING );
9695 MUTEX_LOCK( &stream_.mutex );
9697 #if defined( HAVE_GETTIMEOFDAY )
9698 gettimeofday( &stream_.lastTickTimestamp, NULL );
9701 stream_.state = STREAM_RUNNING;
9703 // No need to do anything else here ... OSS automatically starts
9704 // when fed samples.
9706 MUTEX_UNLOCK( &stream_.mutex );
9708 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9709 pthread_cond_signal( &handle->runnable );
9712 void RtApiOss :: stopStream()
9715 if ( stream_.state == STREAM_STOPPED ) {
9716 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9717 error( RtAudioError::WARNING );
9721 MUTEX_LOCK( &stream_.mutex );
9723 // The state might change while waiting on a mutex.
9724 if ( stream_.state == STREAM_STOPPED ) {
9725 MUTEX_UNLOCK( &stream_.mutex );
9730 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9731 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9733 // Flush the output with zeros a few times.
9736 RtAudioFormat format;
9738 if ( stream_.doConvertBuffer[0] ) {
9739 buffer = stream_.deviceBuffer;
9740 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9741 format = stream_.deviceFormat[0];
9744 buffer = stream_.userBuffer[0];
9745 samples = stream_.bufferSize * stream_.nUserChannels[0];
9746 format = stream_.userFormat;
9749 memset( buffer, 0, samples * formatBytes(format) );
9750 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9751 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9752 if ( result == -1 ) {
9753 errorText_ = "RtApiOss::stopStream: audio write error.";
9754 error( RtAudioError::WARNING );
9758 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9759 if ( result == -1 ) {
9760 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9761 errorText_ = errorStream_.str();
9764 handle->triggered = false;
9767 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9768 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9769 if ( result == -1 ) {
9770 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9771 errorText_ = errorStream_.str();
9777 stream_.state = STREAM_STOPPED;
9778 MUTEX_UNLOCK( &stream_.mutex );
9780 if ( result != -1 ) return;
9781 error( RtAudioError::SYSTEM_ERROR );
9784 void RtApiOss :: abortStream()
9787 if ( stream_.state == STREAM_STOPPED ) {
9788 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9789 error( RtAudioError::WARNING );
9793 MUTEX_LOCK( &stream_.mutex );
9795 // The state might change while waiting on a mutex.
9796 if ( stream_.state == STREAM_STOPPED ) {
9797 MUTEX_UNLOCK( &stream_.mutex );
9802 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9803 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9804 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9805 if ( result == -1 ) {
9806 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9807 errorText_ = errorStream_.str();
9810 handle->triggered = false;
9813 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9814 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9815 if ( result == -1 ) {
9816 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9817 errorText_ = errorStream_.str();
9823 stream_.state = STREAM_STOPPED;
9824 MUTEX_UNLOCK( &stream_.mutex );
9826 if ( result != -1 ) return;
9827 error( RtAudioError::SYSTEM_ERROR );
9830 void RtApiOss :: callbackEvent()
9832 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9833 if ( stream_.state == STREAM_STOPPED ) {
9834 MUTEX_LOCK( &stream_.mutex );
9835 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9836 if ( stream_.state != STREAM_RUNNING ) {
9837 MUTEX_UNLOCK( &stream_.mutex );
9840 MUTEX_UNLOCK( &stream_.mutex );
9843 if ( stream_.state == STREAM_CLOSED ) {
9844 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9845 error( RtAudioError::WARNING );
9849 // Invoke user callback to get fresh output data.
9850 int doStopStream = 0;
9851 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9852 double streamTime = getStreamTime();
9853 RtAudioStreamStatus status = 0;
9854 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9855 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9856 handle->xrun[0] = false;
9858 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9859 status |= RTAUDIO_INPUT_OVERFLOW;
9860 handle->xrun[1] = false;
9862 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9863 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9864 if ( doStopStream == 2 ) {
9865 this->abortStream();
9869 MUTEX_LOCK( &stream_.mutex );
9871 // The state might change while waiting on a mutex.
9872 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9877 RtAudioFormat format;
9879 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9881 // Setup parameters and do buffer conversion if necessary.
9882 if ( stream_.doConvertBuffer[0] ) {
9883 buffer = stream_.deviceBuffer;
9884 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9885 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9886 format = stream_.deviceFormat[0];
9889 buffer = stream_.userBuffer[0];
9890 samples = stream_.bufferSize * stream_.nUserChannels[0];
9891 format = stream_.userFormat;
9894 // Do byte swapping if necessary.
9895 if ( stream_.doByteSwap[0] )
9896 byteSwapBuffer( buffer, samples, format );
9898 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9900 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9901 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9902 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9903 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9904 handle->triggered = true;
9907 // Write samples to device.
9908 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9910 if ( result == -1 ) {
9911 // We'll assume this is an underrun, though there isn't a
9912 // specific means for determining that.
9913 handle->xrun[0] = true;
9914 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9915 error( RtAudioError::WARNING );
9916 // Continue on to input section.
9920 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9922 // Setup parameters.
9923 if ( stream_.doConvertBuffer[1] ) {
9924 buffer = stream_.deviceBuffer;
9925 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9926 format = stream_.deviceFormat[1];
9929 buffer = stream_.userBuffer[1];
9930 samples = stream_.bufferSize * stream_.nUserChannels[1];
9931 format = stream_.userFormat;
9934 // Read samples from device.
9935 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9937 if ( result == -1 ) {
9938 // We'll assume this is an overrun, though there isn't a
9939 // specific means for determining that.
9940 handle->xrun[1] = true;
9941 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9942 error( RtAudioError::WARNING );
9946 // Do byte swapping if necessary.
9947 if ( stream_.doByteSwap[1] )
9948 byteSwapBuffer( buffer, samples, format );
9950 // Do buffer conversion if necessary.
9951 if ( stream_.doConvertBuffer[1] )
9952 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9956 MUTEX_UNLOCK( &stream_.mutex );
9958 RtApi::tickStreamTime();
9959 if ( doStopStream == 1 ) this->stopStream();
9962 static void *ossCallbackHandler( void *ptr )
9964 CallbackInfo *info = (CallbackInfo *) ptr;
9965 RtApiOss *object = (RtApiOss *) info->object;
9966 bool *isRunning = &info->isRunning;
9968 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9969 if (info->doRealtime) {
9970 std::cerr << "RtAudio oss: " <<
9971 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9972 "running realtime scheduling" << std::endl;
9976 while ( *isRunning == true ) {
9977 pthread_testcancel();
9978 object->callbackEvent();
9981 pthread_exit( NULL );
9984 //******************** End of __LINUX_OSS__ *********************//
9988 // *************************************************** //
9990 // Protected common (OS-independent) RtAudio methods.
9992 // *************************************************** //
9994 // This method can be modified to control the behavior of error
9995 // message printing.
9996 RtAudioErrorType RtApi :: error( RtAudioErrorType type )
9998 errorStream_.str(""); // clear the ostringstream to avoid repeated messages
10000 // Don't output warnings if showWarnings_ is false
10001 if ( type == RTAUDIO_WARNING && showWarnings_ == false ) return type;
10003 if ( errorCallback_ ) {
10004 const std::string errorMessage = errorText_;
10005 errorCallback_( type, errorMessage );
10008 std::cerr << '\n' << errorText_ << "\n\n";
10013 void RtApi :: verifyStream()
10015 if ( stream_.state == STREAM_CLOSED ) {
10016 errorText_ = "RtApi:: a stream is not open!";
10017 error( RtAudioError::INVALID_USE );
10022 void RtApi :: clearStreamInfo()
10024 stream_.mode = UNINITIALIZED;
10025 stream_.state = STREAM_CLOSED;
10026 stream_.sampleRate = 0;
10027 stream_.bufferSize = 0;
10028 stream_.nBuffers = 0;
10029 stream_.userFormat = 0;
10030 stream_.userInterleaved = true;
10031 stream_.streamTime = 0.0;
10032 stream_.apiHandle = 0;
10033 stream_.deviceBuffer = 0;
10034 stream_.callbackInfo.callback = 0;
10035 stream_.callbackInfo.userData = 0;
10036 stream_.callbackInfo.isRunning = false;
10037 stream_.callbackInfo.deviceDisconnected = false;
10038 for ( int i=0; i<2; i++ ) {
10039 stream_.device[i] = 11111;
10040 stream_.doConvertBuffer[i] = false;
10041 stream_.deviceInterleaved[i] = true;
10042 stream_.doByteSwap[i] = false;
10043 stream_.nUserChannels[i] = 0;
10044 stream_.nDeviceChannels[i] = 0;
10045 stream_.channelOffset[i] = 0;
10046 stream_.deviceFormat[i] = 0;
10047 stream_.latency[i] = 0;
10048 stream_.userBuffer[i] = 0;
10049 stream_.convertInfo[i].channels = 0;
10050 stream_.convertInfo[i].inJump = 0;
10051 stream_.convertInfo[i].outJump = 0;
10052 stream_.convertInfo[i].inFormat = 0;
10053 stream_.convertInfo[i].outFormat = 0;
10054 stream_.convertInfo[i].inOffset.clear();
10055 stream_.convertInfo[i].outOffset.clear();
10059 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10061 if ( format == RTAUDIO_SINT16 )
10063 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10065 else if ( format == RTAUDIO_FLOAT64 )
10067 else if ( format == RTAUDIO_SINT24 )
10069 else if ( format == RTAUDIO_SINT8 )
10072 errorText_ = "RtApi::formatBytes: undefined format.";
10073 error( RTAUDIO_WARNING );
10078 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10080 if ( mode == INPUT ) { // convert device to user buffer
10081 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10082 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10083 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10084 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10086 else { // convert user to device buffer
10087 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10088 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10089 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10090 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10093 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10094 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10096 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10098 // Set up the interleave/deinterleave offsets.
10099 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10100 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10101 ( mode == INPUT && stream_.userInterleaved ) ) {
10102 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10103 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10104 stream_.convertInfo[mode].outOffset.push_back( k );
10105 stream_.convertInfo[mode].inJump = 1;
10109 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10110 stream_.convertInfo[mode].inOffset.push_back( k );
10111 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10112 stream_.convertInfo[mode].outJump = 1;
10116 else { // no (de)interleaving
10117 if ( stream_.userInterleaved ) {
10118 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10119 stream_.convertInfo[mode].inOffset.push_back( k );
10120 stream_.convertInfo[mode].outOffset.push_back( k );
10124 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10125 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10126 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10127 stream_.convertInfo[mode].inJump = 1;
10128 stream_.convertInfo[mode].outJump = 1;
10133 // Add channel offset.
10134 if ( firstChannel > 0 ) {
10135 if ( stream_.deviceInterleaved[mode] ) {
10136 if ( mode == OUTPUT ) {
10137 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10138 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10141 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10142 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10146 if ( mode == OUTPUT ) {
10147 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10148 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10151 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10152 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10158 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10160 // This function does format conversion, input/output channel compensation, and
10161 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10162 // the lower three bytes of a 32-bit integer.
10164 // Clear our device buffer when in/out duplex device channels are different
10165 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10166 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10167 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10170 if (info.outFormat == RTAUDIO_FLOAT64) {
10172 Float64 *out = (Float64 *)outBuffer;
10174 if (info.inFormat == RTAUDIO_SINT8) {
10175 signed char *in = (signed char *)inBuffer;
10176 scale = 1.0 / 127.5;
10177 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10178 for (j=0; j<info.channels; j++) {
10179 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10180 out[info.outOffset[j]] += 0.5;
10181 out[info.outOffset[j]] *= scale;
10184 out += info.outJump;
10187 else if (info.inFormat == RTAUDIO_SINT16) {
10188 Int16 *in = (Int16 *)inBuffer;
10189 scale = 1.0 / 32767.5;
10190 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10191 for (j=0; j<info.channels; j++) {
10192 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10193 out[info.outOffset[j]] += 0.5;
10194 out[info.outOffset[j]] *= scale;
10197 out += info.outJump;
10200 else if (info.inFormat == RTAUDIO_SINT24) {
10201 Int24 *in = (Int24 *)inBuffer;
10202 scale = 1.0 / 8388607.5;
10203 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10204 for (j=0; j<info.channels; j++) {
10205 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10206 out[info.outOffset[j]] += 0.5;
10207 out[info.outOffset[j]] *= scale;
10210 out += info.outJump;
10213 else if (info.inFormat == RTAUDIO_SINT32) {
10214 Int32 *in = (Int32 *)inBuffer;
10215 scale = 1.0 / 2147483647.5;
10216 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10217 for (j=0; j<info.channels; j++) {
10218 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10219 out[info.outOffset[j]] += 0.5;
10220 out[info.outOffset[j]] *= scale;
10223 out += info.outJump;
10226 else if (info.inFormat == RTAUDIO_FLOAT32) {
10227 Float32 *in = (Float32 *)inBuffer;
10228 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10229 for (j=0; j<info.channels; j++) {
10230 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10233 out += info.outJump;
10236 else if (info.inFormat == RTAUDIO_FLOAT64) {
10237 // Channel compensation and/or (de)interleaving only.
10238 Float64 *in = (Float64 *)inBuffer;
10239 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10240 for (j=0; j<info.channels; j++) {
10241 out[info.outOffset[j]] = in[info.inOffset[j]];
10244 out += info.outJump;
10248 else if (info.outFormat == RTAUDIO_FLOAT32) {
10250 Float32 *out = (Float32 *)outBuffer;
10252 if (info.inFormat == RTAUDIO_SINT8) {
10253 signed char *in = (signed char *)inBuffer;
10254 scale = (Float32) ( 1.0 / 127.5 );
10255 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10256 for (j=0; j<info.channels; j++) {
10257 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10258 out[info.outOffset[j]] += 0.5;
10259 out[info.outOffset[j]] *= scale;
10262 out += info.outJump;
10265 else if (info.inFormat == RTAUDIO_SINT16) {
10266 Int16 *in = (Int16 *)inBuffer;
10267 scale = (Float32) ( 1.0 / 32767.5 );
10268 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10269 for (j=0; j<info.channels; j++) {
10270 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10271 out[info.outOffset[j]] += 0.5;
10272 out[info.outOffset[j]] *= scale;
10275 out += info.outJump;
10278 else if (info.inFormat == RTAUDIO_SINT24) {
10279 Int24 *in = (Int24 *)inBuffer;
10280 scale = (Float32) ( 1.0 / 8388607.5 );
10281 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10282 for (j=0; j<info.channels; j++) {
10283 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10284 out[info.outOffset[j]] += 0.5;
10285 out[info.outOffset[j]] *= scale;
10288 out += info.outJump;
10291 else if (info.inFormat == RTAUDIO_SINT32) {
10292 Int32 *in = (Int32 *)inBuffer;
10293 scale = (Float32) ( 1.0 / 2147483647.5 );
10294 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10295 for (j=0; j<info.channels; j++) {
10296 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10297 out[info.outOffset[j]] += 0.5;
10298 out[info.outOffset[j]] *= scale;
10301 out += info.outJump;
10304 else if (info.inFormat == RTAUDIO_FLOAT32) {
10305 // Channel compensation and/or (de)interleaving only.
10306 Float32 *in = (Float32 *)inBuffer;
10307 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10308 for (j=0; j<info.channels; j++) {
10309 out[info.outOffset[j]] = in[info.inOffset[j]];
10312 out += info.outJump;
10315 else if (info.inFormat == RTAUDIO_FLOAT64) {
10316 Float64 *in = (Float64 *)inBuffer;
10317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10318 for (j=0; j<info.channels; j++) {
10319 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10322 out += info.outJump;
10326 else if (info.outFormat == RTAUDIO_SINT32) {
10327 Int32 *out = (Int32 *)outBuffer;
10328 if (info.inFormat == RTAUDIO_SINT8) {
10329 signed char *in = (signed char *)inBuffer;
10330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10331 for (j=0; j<info.channels; j++) {
10332 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10333 out[info.outOffset[j]] <<= 24;
10336 out += info.outJump;
10339 else if (info.inFormat == RTAUDIO_SINT16) {
10340 Int16 *in = (Int16 *)inBuffer;
10341 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10342 for (j=0; j<info.channels; j++) {
10343 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10344 out[info.outOffset[j]] <<= 16;
10347 out += info.outJump;
10350 else if (info.inFormat == RTAUDIO_SINT24) {
10351 Int24 *in = (Int24 *)inBuffer;
10352 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10353 for (j=0; j<info.channels; j++) {
10354 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10355 out[info.outOffset[j]] <<= 8;
10358 out += info.outJump;
10361 else if (info.inFormat == RTAUDIO_SINT32) {
10362 // Channel compensation and/or (de)interleaving only.
10363 Int32 *in = (Int32 *)inBuffer;
10364 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10365 for (j=0; j<info.channels; j++) {
10366 out[info.outOffset[j]] = in[info.inOffset[j]];
10369 out += info.outJump;
10372 else if (info.inFormat == RTAUDIO_FLOAT32) {
10373 Float32 *in = (Float32 *)inBuffer;
10374 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10375 for (j=0; j<info.channels; j++) {
10376 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10379 out += info.outJump;
10382 else if (info.inFormat == RTAUDIO_FLOAT64) {
10383 Float64 *in = (Float64 *)inBuffer;
10384 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10385 for (j=0; j<info.channels; j++) {
10386 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10389 out += info.outJump;
10393 else if (info.outFormat == RTAUDIO_SINT24) {
10394 Int24 *out = (Int24 *)outBuffer;
10395 if (info.inFormat == RTAUDIO_SINT8) {
10396 signed char *in = (signed char *)inBuffer;
10397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10398 for (j=0; j<info.channels; j++) {
10399 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10400 //out[info.outOffset[j]] <<= 16;
10403 out += info.outJump;
10406 else if (info.inFormat == RTAUDIO_SINT16) {
10407 Int16 *in = (Int16 *)inBuffer;
10408 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10409 for (j=0; j<info.channels; j++) {
10410 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10411 //out[info.outOffset[j]] <<= 8;
10414 out += info.outJump;
10417 else if (info.inFormat == RTAUDIO_SINT24) {
10418 // Channel compensation and/or (de)interleaving only.
10419 Int24 *in = (Int24 *)inBuffer;
10420 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10421 for (j=0; j<info.channels; j++) {
10422 out[info.outOffset[j]] = in[info.inOffset[j]];
10425 out += info.outJump;
10428 else if (info.inFormat == RTAUDIO_SINT32) {
10429 Int32 *in = (Int32 *)inBuffer;
10430 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10431 for (j=0; j<info.channels; j++) {
10432 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10433 //out[info.outOffset[j]] >>= 8;
10436 out += info.outJump;
10439 else if (info.inFormat == RTAUDIO_FLOAT32) {
10440 Float32 *in = (Float32 *)inBuffer;
10441 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10442 for (j=0; j<info.channels; j++) {
10443 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10446 out += info.outJump;
10449 else if (info.inFormat == RTAUDIO_FLOAT64) {
10450 Float64 *in = (Float64 *)inBuffer;
10451 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10452 for (j=0; j<info.channels; j++) {
10453 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10456 out += info.outJump;
10460 else if (info.outFormat == RTAUDIO_SINT16) {
10461 Int16 *out = (Int16 *)outBuffer;
10462 if (info.inFormat == RTAUDIO_SINT8) {
10463 signed char *in = (signed char *)inBuffer;
10464 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10465 for (j=0; j<info.channels; j++) {
10466 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10467 out[info.outOffset[j]] <<= 8;
10470 out += info.outJump;
10473 else if (info.inFormat == RTAUDIO_SINT16) {
10474 // Channel compensation and/or (de)interleaving only.
10475 Int16 *in = (Int16 *)inBuffer;
10476 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10477 for (j=0; j<info.channels; j++) {
10478 out[info.outOffset[j]] = in[info.inOffset[j]];
10481 out += info.outJump;
10484 else if (info.inFormat == RTAUDIO_SINT24) {
10485 Int24 *in = (Int24 *)inBuffer;
10486 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10487 for (j=0; j<info.channels; j++) {
10488 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10491 out += info.outJump;
10494 else if (info.inFormat == RTAUDIO_SINT32) {
10495 Int32 *in = (Int32 *)inBuffer;
10496 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10497 for (j=0; j<info.channels; j++) {
10498 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10501 out += info.outJump;
10504 else if (info.inFormat == RTAUDIO_FLOAT32) {
10505 Float32 *in = (Float32 *)inBuffer;
10506 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10507 for (j=0; j<info.channels; j++) {
10508 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10511 out += info.outJump;
10514 else if (info.inFormat == RTAUDIO_FLOAT64) {
10515 Float64 *in = (Float64 *)inBuffer;
10516 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517 for (j=0; j<info.channels; j++) {
10518 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10521 out += info.outJump;
10525 else if (info.outFormat == RTAUDIO_SINT8) {
10526 signed char *out = (signed char *)outBuffer;
10527 if (info.inFormat == RTAUDIO_SINT8) {
10528 // Channel compensation and/or (de)interleaving only.
10529 signed char *in = (signed char *)inBuffer;
10530 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10531 for (j=0; j<info.channels; j++) {
10532 out[info.outOffset[j]] = in[info.inOffset[j]];
10535 out += info.outJump;
10538 if (info.inFormat == RTAUDIO_SINT16) {
10539 Int16 *in = (Int16 *)inBuffer;
10540 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10541 for (j=0; j<info.channels; j++) {
10542 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10545 out += info.outJump;
10548 else if (info.inFormat == RTAUDIO_SINT24) {
10549 Int24 *in = (Int24 *)inBuffer;
10550 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10551 for (j=0; j<info.channels; j++) {
10552 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10555 out += info.outJump;
10558 else if (info.inFormat == RTAUDIO_SINT32) {
10559 Int32 *in = (Int32 *)inBuffer;
10560 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10561 for (j=0; j<info.channels; j++) {
10562 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10565 out += info.outJump;
10568 else if (info.inFormat == RTAUDIO_FLOAT32) {
10569 Float32 *in = (Float32 *)inBuffer;
10570 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10571 for (j=0; j<info.channels; j++) {
10572 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10575 out += info.outJump;
10578 else if (info.inFormat == RTAUDIO_FLOAT64) {
10579 Float64 *in = (Float64 *)inBuffer;
10580 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10581 for (j=0; j<info.channels; j++) {
10582 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10585 out += info.outJump;
10591 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10592 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10593 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10595 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10601 if ( format == RTAUDIO_SINT16 ) {
10602 for ( unsigned int i=0; i<samples; i++ ) {
10603 // Swap 1st and 2nd bytes.
10608 // Increment 2 bytes.
10612 else if ( format == RTAUDIO_SINT32 ||
10613 format == RTAUDIO_FLOAT32 ) {
10614 for ( unsigned int i=0; i<samples; i++ ) {
10615 // Swap 1st and 4th bytes.
10620 // Swap 2nd and 3rd bytes.
10626 // Increment 3 more bytes.
10630 else if ( format == RTAUDIO_SINT24 ) {
10631 for ( unsigned int i=0; i<samples; i++ ) {
10632 // Swap 1st and 3rd bytes.
10637 // Increment 2 more bytes.
10641 else if ( format == RTAUDIO_FLOAT64 ) {
10642 for ( unsigned int i=0; i<samples; i++ ) {
10643 // Swap 1st and 8th bytes
10648 // Swap 2nd and 7th bytes
10654 // Swap 3rd and 6th bytes
10660 // Swap 4th and 5th bytes
10666 // Increment 5 more bytes.
10672 // Indentation settings for Vim and Emacs
10674 // Local Variables:
10675 // c-basic-offset: 2
10676 // indent-tabs-mode: nil
10679 // vim: et sts=2 sw=2