1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 const std::string &RtAudio :: getCompiledApiName( RtAudio::Api api )
138 #if defined(__UNIX_JACK__)
139 if ( api == UNIX_JACK ) {
140 static std::string name( "jack" );
144 #if defined(__LINUX_PULSE__)
145 if ( api == LINUX_PULSE ) {
146 static std::string name( "pulse" );
150 #if defined(__LINUX_ALSA__)
151 if ( api == LINUX_ALSA ) {
152 static std::string name( "alsa" );
156 #if defined(__LINUX_OSS__)
157 if ( api == LINUX_OSS ) {
158 static std::string name( "oss" );
162 #if defined(__WINDOWS_ASIO__)
163 if ( api == WINDOWS_ASIO ) {
164 static std::string name( "asio" );
168 #if defined(__WINDOWS_WASAPI__)
169 if ( api == WINDOWS_WASAPI ) {
170 static std::string name( "wasapi" );
174 #if defined(__WINDOWS_DS__)
175 if ( api == WINDOWS_DS ) {
176 static std::string name( "ds" );
180 #if defined(__MACOSX_CORE__)
181 if ( api == MACOSX_CORE ) {
182 static std::string name( "core" );
186 #if defined(__RTAUDIO_DUMMY__)
187 if ( api == RTAUDIO_DUMMY ) {
188 static std::string name( "dummy" );
192 static std::string name;
196 const std::string &RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK ) {
200 static std::string name( "JACK" );
204 #if defined(__LINUX_PULSE__)
205 if ( api == LINUX_PULSE ) {
206 static std::string name( "PulseAudio" );
210 #if defined(__LINUX_ALSA__)
211 if ( api == LINUX_ALSA ) {
212 static std::string name( "ALSA" );
216 #if defined(__LINUX_OSS__)
217 if ( api == LINUX_OSS ) {
218 static std::string name( "OSS" );
222 #if defined(__WINDOWS_ASIO__)
223 if ( api == WINDOWS_ASIO ) {
224 static std::string name( "ASIO" );
228 #if defined(__WINDOWS_WASAPI__)
229 if ( api == WINDOWS_WASAPI ) {
230 static std::string name( "WASAPI" );
234 #if defined(__WINDOWS_DS__)
235 if ( api == WINDOWS_DS ) {
236 static std::string name( "DirectSound" );
240 #if defined(__MACOSX_CORE__)
241 if ( api == MACOSX_CORE ) {
242 static std::string name( "Core Audio" );
246 #if defined(__RTAUDIO_DUMMY__)
247 if ( api == RTAUDIO_DUMMY ) {
248 static std::string name( "RtAudio Dummy" );
252 static std::string name;
256 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
258 unsigned int api_number = RtAudio::UNSPECIFIED;
259 size_t nameLength = name.size();
261 if ( nameLength == 0 )
262 return RtAudio::UNSPECIFIED;
264 while ( api_number <= RtAudio::RTAUDIO_DUMMY ) {
265 const std::string &otherName =
266 getCompiledApiName((RtAudio::Api)api_number);
268 if ( name == otherName )
269 return (RtAudio::Api)api_number;
274 return RtAudio::UNSPECIFIED;
277 void RtAudio :: openRtApi( RtAudio::Api api )
283 #if defined(__UNIX_JACK__)
284 if ( api == UNIX_JACK )
285 rtapi_ = new RtApiJack();
287 #if defined(__LINUX_ALSA__)
288 if ( api == LINUX_ALSA )
289 rtapi_ = new RtApiAlsa();
291 #if defined(__LINUX_PULSE__)
292 if ( api == LINUX_PULSE )
293 rtapi_ = new RtApiPulse();
295 #if defined(__LINUX_OSS__)
296 if ( api == LINUX_OSS )
297 rtapi_ = new RtApiOss();
299 #if defined(__WINDOWS_ASIO__)
300 if ( api == WINDOWS_ASIO )
301 rtapi_ = new RtApiAsio();
303 #if defined(__WINDOWS_WASAPI__)
304 if ( api == WINDOWS_WASAPI )
305 rtapi_ = new RtApiWasapi();
307 #if defined(__WINDOWS_DS__)
308 if ( api == WINDOWS_DS )
309 rtapi_ = new RtApiDs();
311 #if defined(__MACOSX_CORE__)
312 if ( api == MACOSX_CORE )
313 rtapi_ = new RtApiCore();
315 #if defined(__RTAUDIO_DUMMY__)
316 if ( api == RTAUDIO_DUMMY )
317 rtapi_ = new RtApiDummy();
321 RtAudio :: RtAudio( RtAudio::Api api )
325 if ( api != UNSPECIFIED ) {
326 // Attempt to open the specified API.
328 if ( rtapi_ ) return;
330 // No compiled support for specified API value. Issue a debug
331 // warning and continue as if no API was specified.
332 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
335 // Iterate through the compiled APIs and return as soon as we find
336 // one with at least one device or we reach the end of the list.
337 std::vector< RtAudio::Api > apis;
338 getCompiledApi( apis );
339 for ( unsigned int i=0; i<apis.size(); i++ ) {
340 openRtApi( apis[i] );
341 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
344 if ( rtapi_ ) return;
346 // It should not be possible to get here because the preprocessor
347 // definition __RTAUDIO_DUMMY__ is automatically defined if no
348 // API-specific definitions are passed to the compiler. But just in
349 // case something weird happens, we'll thow an error.
350 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
351 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
354 RtAudio :: ~RtAudio()
360 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
361 RtAudio::StreamParameters *inputParameters,
362 RtAudioFormat format, unsigned int sampleRate,
363 unsigned int *bufferFrames,
364 RtAudioCallback callback, void *userData,
365 RtAudio::StreamOptions *options,
366 RtAudioErrorCallback errorCallback )
368 return rtapi_->openStream( outputParameters, inputParameters, format,
369 sampleRate, bufferFrames, callback,
370 userData, options, errorCallback );
373 // *************************************************** //
375 // Public RtApi definitions (see end of file for
376 // private or protected utility functions).
378 // *************************************************** //
382 stream_.state = STREAM_CLOSED;
383 stream_.mode = UNINITIALIZED;
384 stream_.apiHandle = 0;
385 stream_.userBuffer[0] = 0;
386 stream_.userBuffer[1] = 0;
387 MUTEX_INITIALIZE( &stream_.mutex );
388 showWarnings_ = true;
389 firstErrorOccurred_ = false;
394 MUTEX_DESTROY( &stream_.mutex );
397 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
398 RtAudio::StreamParameters *iParams,
399 RtAudioFormat format, unsigned int sampleRate,
400 unsigned int *bufferFrames,
401 RtAudioCallback callback, void *userData,
402 RtAudio::StreamOptions *options,
403 RtAudioErrorCallback errorCallback )
405 if ( stream_.state != STREAM_CLOSED ) {
406 errorText_ = "RtApi::openStream: a stream is already open!";
407 error( RtAudioError::INVALID_USE );
411 // Clear stream information potentially left from a previously open stream.
414 if ( oParams && oParams->nChannels < 1 ) {
415 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
416 error( RtAudioError::INVALID_USE );
420 if ( iParams && iParams->nChannels < 1 ) {
421 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
422 error( RtAudioError::INVALID_USE );
426 if ( oParams == NULL && iParams == NULL ) {
427 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
428 error( RtAudioError::INVALID_USE );
432 if ( formatBytes(format) == 0 ) {
433 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
434 error( RtAudioError::INVALID_USE );
438 unsigned int nDevices = getDeviceCount();
439 unsigned int oChannels = 0;
441 oChannels = oParams->nChannels;
442 if ( oParams->deviceId >= nDevices ) {
443 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
444 error( RtAudioError::INVALID_USE );
449 unsigned int iChannels = 0;
451 iChannels = iParams->nChannels;
452 if ( iParams->deviceId >= nDevices ) {
453 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
454 error( RtAudioError::INVALID_USE );
461 if ( oChannels > 0 ) {
463 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
464 sampleRate, format, bufferFrames, options );
465 if ( result == false ) {
466 error( RtAudioError::SYSTEM_ERROR );
471 if ( iChannels > 0 ) {
473 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
474 sampleRate, format, bufferFrames, options );
475 if ( result == false ) {
476 if ( oChannels > 0 ) closeStream();
477 error( RtAudioError::SYSTEM_ERROR );
482 stream_.callbackInfo.callback = (void *) callback;
483 stream_.callbackInfo.userData = userData;
484 stream_.callbackInfo.errorCallback = (void *) errorCallback;
486 if ( options ) options->numberOfBuffers = stream_.nBuffers;
487 stream_.state = STREAM_STOPPED;
490 unsigned int RtApi :: getDefaultInputDevice( void )
492 // Should be implemented in subclasses if possible.
496 unsigned int RtApi :: getDefaultOutputDevice( void )
498 // Should be implemented in subclasses if possible.
502 void RtApi :: closeStream( void )
504 // MUST be implemented in subclasses!
508 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
509 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
510 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
511 RtAudio::StreamOptions * /*options*/ )
513 // MUST be implemented in subclasses!
517 void RtApi :: tickStreamTime( void )
519 // Subclasses that do not provide their own implementation of
520 // getStreamTime should call this function once per buffer I/O to
521 // provide basic stream time support.
523 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
525 #if defined( HAVE_GETTIMEOFDAY )
526 gettimeofday( &stream_.lastTickTimestamp, NULL );
530 long RtApi :: getStreamLatency( void )
534 long totalLatency = 0;
535 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
536 totalLatency = stream_.latency[0];
537 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
538 totalLatency += stream_.latency[1];
543 double RtApi :: getStreamTime( void )
547 #if defined( HAVE_GETTIMEOFDAY )
548 // Return a very accurate estimate of the stream time by
549 // adding in the elapsed time since the last tick.
553 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
554 return stream_.streamTime;
556 gettimeofday( &now, NULL );
557 then = stream_.lastTickTimestamp;
558 return stream_.streamTime +
559 ((now.tv_sec + 0.000001 * now.tv_usec) -
560 (then.tv_sec + 0.000001 * then.tv_usec));
562 return stream_.streamTime;
566 void RtApi :: setStreamTime( double time )
571 stream_.streamTime = time;
572 #if defined( HAVE_GETTIMEOFDAY )
573 gettimeofday( &stream_.lastTickTimestamp, NULL );
577 unsigned int RtApi :: getStreamSampleRate( void )
581 return stream_.sampleRate;
585 // *************************************************** //
587 // OS/API-specific methods.
589 // *************************************************** //
591 #if defined(__MACOSX_CORE__)
593 // The OS X CoreAudio API is designed to use a separate callback
594 // procedure for each of its audio devices. A single RtAudio duplex
595 // stream using two different devices is supported here, though it
596 // cannot be guaranteed to always behave correctly because we cannot
597 // synchronize these two callbacks.
599 // A property listener is installed for over/underrun information.
600 // However, no functionality is currently provided to allow property
601 // listeners to trigger user handlers because it is unclear what could
602 // be done if a critical stream parameter (buffer size, sample rate,
603 // device disconnect) notification arrived. The listeners entail
604 // quite a bit of extra code and most likely, a user program wouldn't
605 // be prepared for the result anyway. However, we do provide a flag
606 // to the client callback function to inform of an over/underrun.
608 // A structure to hold various information related to the CoreAudio API
611 AudioDeviceID id[2]; // device ids
612 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
613 AudioDeviceIOProcID procId[2];
615 UInt32 iStream[2]; // device stream index (or first if using multiple)
616 UInt32 nStreams[2]; // number of streams to use
619 pthread_cond_t condition;
620 int drainCounter; // Tracks callback counts when draining
621 bool internalDrain; // Indicates if stop is initiated from callback or not.
624 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
627 RtApiCore:: RtApiCore()
629 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
630 // This is a largely undocumented but absolutely necessary
631 // requirement starting with OS-X 10.6. If not called, queries and
632 // updates to various audio device properties are not handled
634 CFRunLoopRef theRunLoop = NULL;
635 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
636 kAudioObjectPropertyScopeGlobal,
637 kAudioObjectPropertyElementMaster };
638 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
639 if ( result != noErr ) {
640 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
641 error( RtAudioError::WARNING );
646 RtApiCore :: ~RtApiCore()
648 // The subclass destructor gets called before the base class
649 // destructor, so close an existing stream before deallocating
650 // apiDeviceId memory.
651 if ( stream_.state != STREAM_CLOSED ) closeStream();
654 unsigned int RtApiCore :: getDeviceCount( void )
656 // Find out how many audio devices there are, if any.
658 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
659 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
660 if ( result != noErr ) {
661 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
662 error( RtAudioError::WARNING );
666 return dataSize / sizeof( AudioDeviceID );
669 unsigned int RtApiCore :: getDefaultInputDevice( void )
671 unsigned int nDevices = getDeviceCount();
672 if ( nDevices <= 1 ) return 0;
675 UInt32 dataSize = sizeof( AudioDeviceID );
676 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
677 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
680 error( RtAudioError::WARNING );
684 dataSize *= nDevices;
685 AudioDeviceID deviceList[ nDevices ];
686 property.mSelector = kAudioHardwarePropertyDevices;
687 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
688 if ( result != noErr ) {
689 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
690 error( RtAudioError::WARNING );
694 for ( unsigned int i=0; i<nDevices; i++ )
695 if ( id == deviceList[i] ) return i;
697 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
698 error( RtAudioError::WARNING );
702 unsigned int RtApiCore :: getDefaultOutputDevice( void )
704 unsigned int nDevices = getDeviceCount();
705 if ( nDevices <= 1 ) return 0;
708 UInt32 dataSize = sizeof( AudioDeviceID );
709 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
710 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
711 if ( result != noErr ) {
712 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
713 error( RtAudioError::WARNING );
717 dataSize = sizeof( AudioDeviceID ) * nDevices;
718 AudioDeviceID deviceList[ nDevices ];
719 property.mSelector = kAudioHardwarePropertyDevices;
720 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
721 if ( result != noErr ) {
722 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
723 error( RtAudioError::WARNING );
727 for ( unsigned int i=0; i<nDevices; i++ )
728 if ( id == deviceList[i] ) return i;
730 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
731 error( RtAudioError::WARNING );
735 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
737 RtAudio::DeviceInfo info;
741 unsigned int nDevices = getDeviceCount();
742 if ( nDevices == 0 ) {
743 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
744 error( RtAudioError::INVALID_USE );
748 if ( device >= nDevices ) {
749 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
750 error( RtAudioError::INVALID_USE );
754 AudioDeviceID deviceList[ nDevices ];
755 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
756 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
757 kAudioObjectPropertyScopeGlobal,
758 kAudioObjectPropertyElementMaster };
759 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
760 0, NULL, &dataSize, (void *) &deviceList );
761 if ( result != noErr ) {
762 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
763 error( RtAudioError::WARNING );
767 AudioDeviceID id = deviceList[ device ];
769 // Get the device name.
772 dataSize = sizeof( CFStringRef );
773 property.mSelector = kAudioObjectPropertyManufacturer;
774 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
775 if ( result != noErr ) {
776 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
777 errorText_ = errorStream_.str();
778 error( RtAudioError::WARNING );
782 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
783 int length = CFStringGetLength(cfname);
784 char *mname = (char *)malloc(length * 3 + 1);
785 #if defined( UNICODE ) || defined( _UNICODE )
786 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
788 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
790 info.name.append( (const char *)mname, strlen(mname) );
791 info.name.append( ": " );
795 property.mSelector = kAudioObjectPropertyName;
796 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
797 if ( result != noErr ) {
798 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
799 errorText_ = errorStream_.str();
800 error( RtAudioError::WARNING );
804 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
805 length = CFStringGetLength(cfname);
806 char *name = (char *)malloc(length * 3 + 1);
807 #if defined( UNICODE ) || defined( _UNICODE )
808 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
810 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
812 info.name.append( (const char *)name, strlen(name) );
816 // Get the output stream "configuration".
817 AudioBufferList *bufferList = nil;
818 property.mSelector = kAudioDevicePropertyStreamConfiguration;
819 property.mScope = kAudioDevicePropertyScopeOutput;
820 // property.mElement = kAudioObjectPropertyElementWildcard;
822 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
823 if ( result != noErr || dataSize == 0 ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // Allocate the AudioBufferList.
831 bufferList = (AudioBufferList *) malloc( dataSize );
832 if ( bufferList == NULL ) {
833 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
834 error( RtAudioError::WARNING );
838 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
839 if ( result != noErr || dataSize == 0 ) {
841 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
842 errorText_ = errorStream_.str();
843 error( RtAudioError::WARNING );
847 // Get output channel information.
848 unsigned int i, nStreams = bufferList->mNumberBuffers;
849 for ( i=0; i<nStreams; i++ )
850 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
853 // Get the input stream "configuration".
854 property.mScope = kAudioDevicePropertyScopeInput;
855 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
856 if ( result != noErr || dataSize == 0 ) {
857 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
858 errorText_ = errorStream_.str();
859 error( RtAudioError::WARNING );
863 // Allocate the AudioBufferList.
864 bufferList = (AudioBufferList *) malloc( dataSize );
865 if ( bufferList == NULL ) {
866 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
867 error( RtAudioError::WARNING );
871 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
872 if (result != noErr || dataSize == 0) {
874 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
875 errorText_ = errorStream_.str();
876 error( RtAudioError::WARNING );
880 // Get input channel information.
881 nStreams = bufferList->mNumberBuffers;
882 for ( i=0; i<nStreams; i++ )
883 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
886 // If device opens for both playback and capture, we determine the channels.
887 if ( info.outputChannels > 0 && info.inputChannels > 0 )
888 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
890 // Probe the device sample rates.
891 bool isInput = false;
892 if ( info.outputChannels == 0 ) isInput = true;
894 // Determine the supported sample rates.
895 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
896 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
897 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
898 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
899 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
900 errorText_ = errorStream_.str();
901 error( RtAudioError::WARNING );
905 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
906 AudioValueRange rangeList[ nRanges ];
907 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
908 if ( result != kAudioHardwareNoError ) {
909 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
910 errorText_ = errorStream_.str();
911 error( RtAudioError::WARNING );
915 // The sample rate reporting mechanism is a bit of a mystery. It
916 // seems that it can either return individual rates or a range of
917 // rates. I assume that if the min / max range values are the same,
918 // then that represents a single supported rate and if the min / max
919 // range values are different, the device supports an arbitrary
920 // range of values (though there might be multiple ranges, so we'll
921 // use the most conservative range).
922 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
923 bool haveValueRange = false;
924 info.sampleRates.clear();
925 for ( UInt32 i=0; i<nRanges; i++ ) {
926 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
927 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
928 info.sampleRates.push_back( tmpSr );
930 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
931 info.preferredSampleRate = tmpSr;
934 haveValueRange = true;
935 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
936 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
940 if ( haveValueRange ) {
941 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
942 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
943 info.sampleRates.push_back( SAMPLE_RATES[k] );
945 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
946 info.preferredSampleRate = SAMPLE_RATES[k];
951 // Sort and remove any redundant values
952 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
953 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
955 if ( info.sampleRates.size() == 0 ) {
956 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
957 errorText_ = errorStream_.str();
958 error( RtAudioError::WARNING );
962 // CoreAudio always uses 32-bit floating point data for PCM streams.
963 // Thus, any other "physical" formats supported by the device are of
964 // no interest to the client.
965 info.nativeFormats = RTAUDIO_FLOAT32;
967 if ( info.outputChannels > 0 )
968 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
969 if ( info.inputChannels > 0 )
970 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
976 static OSStatus callbackHandler( AudioDeviceID inDevice,
977 const AudioTimeStamp* /*inNow*/,
978 const AudioBufferList* inInputData,
979 const AudioTimeStamp* /*inInputTime*/,
980 AudioBufferList* outOutputData,
981 const AudioTimeStamp* /*inOutputTime*/,
984 CallbackInfo *info = (CallbackInfo *) infoPointer;
986 RtApiCore *object = (RtApiCore *) info->object;
987 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
988 return kAudioHardwareUnspecifiedError;
990 return kAudioHardwareNoError;
993 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
995 const AudioObjectPropertyAddress properties[],
996 void* handlePointer )
998 CoreHandle *handle = (CoreHandle *) handlePointer;
999 for ( UInt32 i=0; i<nAddresses; i++ ) {
1000 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
1001 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
1002 handle->xrun[1] = true;
1004 handle->xrun[0] = true;
1008 return kAudioHardwareNoError;
1011 static OSStatus rateListener( AudioObjectID inDevice,
1012 UInt32 /*nAddresses*/,
1013 const AudioObjectPropertyAddress /*properties*/[],
1016 Float64 *rate = (Float64 *) ratePointer;
1017 UInt32 dataSize = sizeof( Float64 );
1018 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
1019 kAudioObjectPropertyScopeGlobal,
1020 kAudioObjectPropertyElementMaster };
1021 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
1022 return kAudioHardwareNoError;
1025 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
1026 unsigned int firstChannel, unsigned int sampleRate,
1027 RtAudioFormat format, unsigned int *bufferSize,
1028 RtAudio::StreamOptions *options )
1031 unsigned int nDevices = getDeviceCount();
1032 if ( nDevices == 0 ) {
1033 // This should not happen because a check is made before this function is called.
1034 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
1038 if ( device >= nDevices ) {
1039 // This should not happen because a check is made before this function is called.
1040 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
1044 AudioDeviceID deviceList[ nDevices ];
1045 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
1046 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1047 kAudioObjectPropertyScopeGlobal,
1048 kAudioObjectPropertyElementMaster };
1049 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
1050 0, NULL, &dataSize, (void *) &deviceList );
1051 if ( result != noErr ) {
1052 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
1056 AudioDeviceID id = deviceList[ device ];
1058 // Setup for stream mode.
1059 bool isInput = false;
1060 if ( mode == INPUT ) {
1062 property.mScope = kAudioDevicePropertyScopeInput;
1065 property.mScope = kAudioDevicePropertyScopeOutput;
1067 // Get the stream "configuration".
1068 AudioBufferList *bufferList = nil;
1070 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1071 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1072 if ( result != noErr || dataSize == 0 ) {
1073 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1074 errorText_ = errorStream_.str();
1078 // Allocate the AudioBufferList.
1079 bufferList = (AudioBufferList *) malloc( dataSize );
1080 if ( bufferList == NULL ) {
1081 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1085 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1086 if (result != noErr || dataSize == 0) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1089 errorText_ = errorStream_.str();
1093 // Search for one or more streams that contain the desired number of
1094 // channels. CoreAudio devices can have an arbitrary number of
1095 // streams and each stream can have an arbitrary number of channels.
1096 // For each stream, a single buffer of interleaved samples is
1097 // provided. RtAudio prefers the use of one stream of interleaved
1098 // data or multiple consecutive single-channel streams. However, we
1099 // now support multiple consecutive multi-channel streams of
1100 // interleaved data as well.
1101 UInt32 iStream, offsetCounter = firstChannel;
1102 UInt32 nStreams = bufferList->mNumberBuffers;
1103 bool monoMode = false;
1104 bool foundStream = false;
1106 // First check that the device supports the requested number of
1108 UInt32 deviceChannels = 0;
1109 for ( iStream=0; iStream<nStreams; iStream++ )
1110 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1112 if ( deviceChannels < ( channels + firstChannel ) ) {
1114 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1115 errorText_ = errorStream_.str();
1119 // Look for a single stream meeting our needs.
1120 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1121 for ( iStream=0; iStream<nStreams; iStream++ ) {
1122 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1123 if ( streamChannels >= channels + offsetCounter ) {
1124 firstStream = iStream;
1125 channelOffset = offsetCounter;
1129 if ( streamChannels > offsetCounter ) break;
1130 offsetCounter -= streamChannels;
1133 // If we didn't find a single stream above, then we should be able
1134 // to meet the channel specification with multiple streams.
1135 if ( foundStream == false ) {
1137 offsetCounter = firstChannel;
1138 for ( iStream=0; iStream<nStreams; iStream++ ) {
1139 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1140 if ( streamChannels > offsetCounter ) break;
1141 offsetCounter -= streamChannels;
1144 firstStream = iStream;
1145 channelOffset = offsetCounter;
1146 Int32 channelCounter = channels + offsetCounter - streamChannels;
1148 if ( streamChannels > 1 ) monoMode = false;
1149 while ( channelCounter > 0 ) {
1150 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1151 if ( streamChannels > 1 ) monoMode = false;
1152 channelCounter -= streamChannels;
1159 // Determine the buffer size.
1160 AudioValueRange bufferRange;
1161 dataSize = sizeof( AudioValueRange );
1162 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1163 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1165 if ( result != noErr ) {
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1172 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1173 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1175 // Set the buffer size. For multiple streams, I'm assuming we only
1176 // need to make this setting for the master channel.
1177 UInt32 theSize = (UInt32) *bufferSize;
1178 dataSize = sizeof( UInt32 );
1179 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1180 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1182 if ( result != noErr ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1188 // If attempting to setup a duplex stream, the bufferSize parameter
1189 // MUST be the same in both directions!
1190 *bufferSize = theSize;
1191 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1192 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1193 errorText_ = errorStream_.str();
1197 stream_.bufferSize = *bufferSize;
1198 stream_.nBuffers = 1;
1200 // Try to set "hog" mode ... it's not clear to me this is working.
1201 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1203 dataSize = sizeof( hog_pid );
1204 property.mSelector = kAudioDevicePropertyHogMode;
1205 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1206 if ( result != noErr ) {
1207 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1208 errorText_ = errorStream_.str();
1212 if ( hog_pid != getpid() ) {
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1215 if ( result != noErr ) {
1216 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1217 errorText_ = errorStream_.str();
1223 // Check and if necessary, change the sample rate for the device.
1224 Float64 nominalRate;
1225 dataSize = sizeof( Float64 );
1226 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1230 errorText_ = errorStream_.str();
1234 // Only change the sample rate if off by more than 1 Hz.
1235 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1237 // Set a property listener for the sample rate change
1238 Float64 reportedRate = 0.0;
1239 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1240 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1241 if ( result != noErr ) {
1242 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1243 errorText_ = errorStream_.str();
1247 nominalRate = (Float64) sampleRate;
1248 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1249 if ( result != noErr ) {
1250 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1251 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1252 errorText_ = errorStream_.str();
1256 // Now wait until the reported nominal rate is what we just set.
1257 UInt32 microCounter = 0;
1258 while ( reportedRate != nominalRate ) {
1259 microCounter += 5000;
1260 if ( microCounter > 5000000 ) break;
1264 // Remove the property listener.
1265 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1267 if ( microCounter > 5000000 ) {
1268 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1269 errorText_ = errorStream_.str();
1274 // Now set the stream format for all streams. Also, check the
1275 // physical format of the device and change that if necessary.
1276 AudioStreamBasicDescription description;
1277 dataSize = sizeof( AudioStreamBasicDescription );
1278 property.mSelector = kAudioStreamPropertyVirtualFormat;
1279 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1280 if ( result != noErr ) {
1281 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1282 errorText_ = errorStream_.str();
1286 // Set the sample rate and data format id. However, only make the
1287 // change if the sample rate is not within 1.0 of the desired
1288 // rate and the format is not linear pcm.
1289 bool updateFormat = false;
1290 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1291 description.mSampleRate = (Float64) sampleRate;
1292 updateFormat = true;
1295 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1296 description.mFormatID = kAudioFormatLinearPCM;
1297 updateFormat = true;
1300 if ( updateFormat ) {
1301 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1302 if ( result != noErr ) {
1303 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1304 errorText_ = errorStream_.str();
1309 // Now check the physical format.
1310 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1311 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1312 if ( result != noErr ) {
1313 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1314 errorText_ = errorStream_.str();
1318 //std::cout << "Current physical stream format:" << std::endl;
1319 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1320 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1321 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1322 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1324 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1325 description.mFormatID = kAudioFormatLinearPCM;
1326 //description.mSampleRate = (Float64) sampleRate;
1327 AudioStreamBasicDescription testDescription = description;
1330 // We'll try higher bit rates first and then work our way down.
1331 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1332 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1333 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1334 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1335 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1336 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1337 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1338 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1339 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1340 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1341 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1342 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1343 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1345 bool setPhysicalFormat = false;
1346 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1347 testDescription = description;
1348 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1349 testDescription.mFormatFlags = physicalFormats[i].second;
1350 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1351 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1353 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1354 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1355 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1356 if ( result == noErr ) {
1357 setPhysicalFormat = true;
1358 //std::cout << "Updated physical stream format:" << std::endl;
1359 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1360 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1361 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1362 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1367 if ( !setPhysicalFormat ) {
1368 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1369 errorText_ = errorStream_.str();
1372 } // done setting virtual/physical formats.
1374 // Get the stream / device latency.
1376 dataSize = sizeof( UInt32 );
1377 property.mSelector = kAudioDevicePropertyLatency;
1378 if ( AudioObjectHasProperty( id, &property ) == true ) {
1379 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1380 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1382 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1383 errorText_ = errorStream_.str();
1384 error( RtAudioError::WARNING );
1388 // Byte-swapping: According to AudioHardware.h, the stream data will
1389 // always be presented in native-endian format, so we should never
1390 // need to byte swap.
1391 stream_.doByteSwap[mode] = false;
1393 // From the CoreAudio documentation, PCM data must be supplied as
1395 stream_.userFormat = format;
1396 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1398 if ( streamCount == 1 )
1399 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1400 else // multiple streams
1401 stream_.nDeviceChannels[mode] = channels;
1402 stream_.nUserChannels[mode] = channels;
1403 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1404 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1405 else stream_.userInterleaved = true;
1406 stream_.deviceInterleaved[mode] = true;
1407 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1409 // Set flags for buffer conversion.
1410 stream_.doConvertBuffer[mode] = false;
1411 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1412 stream_.doConvertBuffer[mode] = true;
1413 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1414 stream_.doConvertBuffer[mode] = true;
1415 if ( streamCount == 1 ) {
1416 if ( stream_.nUserChannels[mode] > 1 &&
1417 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1418 stream_.doConvertBuffer[mode] = true;
1420 else if ( monoMode && stream_.userInterleaved )
1421 stream_.doConvertBuffer[mode] = true;
1423 // Allocate our CoreHandle structure for the stream.
1424 CoreHandle *handle = 0;
1425 if ( stream_.apiHandle == 0 ) {
1427 handle = new CoreHandle;
1429 catch ( std::bad_alloc& ) {
1430 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1434 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1435 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1438 stream_.apiHandle = (void *) handle;
1441 handle = (CoreHandle *) stream_.apiHandle;
1442 handle->iStream[mode] = firstStream;
1443 handle->nStreams[mode] = streamCount;
1444 handle->id[mode] = id;
1446 // Allocate necessary internal buffers.
1447 unsigned long bufferBytes;
1448 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1449 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1450 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1451 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1452 if ( stream_.userBuffer[mode] == NULL ) {
1453 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1457 // If possible, we will make use of the CoreAudio stream buffers as
1458 // "device buffers". However, we can't do this if using multiple
1460 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1462 bool makeBuffer = true;
1463 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1464 if ( mode == INPUT ) {
1465 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1466 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1467 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1472 bufferBytes *= *bufferSize;
1473 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1474 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1475 if ( stream_.deviceBuffer == NULL ) {
1476 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1482 stream_.sampleRate = sampleRate;
1483 stream_.device[mode] = device;
1484 stream_.state = STREAM_STOPPED;
1485 stream_.callbackInfo.object = (void *) this;
1487 // Setup the buffer conversion information structure.
1488 if ( stream_.doConvertBuffer[mode] ) {
1489 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1490 else setConvertInfo( mode, channelOffset );
1493 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1494 // Only one callback procedure per device.
1495 stream_.mode = DUPLEX;
1497 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1498 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1500 // deprecated in favor of AudioDeviceCreateIOProcID()
1501 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1503 if ( result != noErr ) {
1504 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1505 errorText_ = errorStream_.str();
1508 if ( stream_.mode == OUTPUT && mode == INPUT )
1509 stream_.mode = DUPLEX;
1511 stream_.mode = mode;
1514 // Setup the device property listener for over/underload.
1515 property.mSelector = kAudioDeviceProcessorOverload;
1516 property.mScope = kAudioObjectPropertyScopeGlobal;
1517 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1523 pthread_cond_destroy( &handle->condition );
1525 stream_.apiHandle = 0;
1528 for ( int i=0; i<2; i++ ) {
1529 if ( stream_.userBuffer[i] ) {
1530 free( stream_.userBuffer[i] );
1531 stream_.userBuffer[i] = 0;
1535 if ( stream_.deviceBuffer ) {
1536 free( stream_.deviceBuffer );
1537 stream_.deviceBuffer = 0;
1540 stream_.state = STREAM_CLOSED;
1544 void RtApiCore :: closeStream( void )
1546 if ( stream_.state == STREAM_CLOSED ) {
1547 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1548 error( RtAudioError::WARNING );
1552 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1553 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1555 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1556 kAudioObjectPropertyScopeGlobal,
1557 kAudioObjectPropertyElementMaster };
1559 property.mSelector = kAudioDeviceProcessorOverload;
1560 property.mScope = kAudioObjectPropertyScopeGlobal;
1561 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1562 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1563 error( RtAudioError::WARNING );
1566 if ( stream_.state == STREAM_RUNNING )
1567 AudioDeviceStop( handle->id[0], callbackHandler );
1568 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1569 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1571 // deprecated in favor of AudioDeviceDestroyIOProcID()
1572 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1576 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1578 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1579 kAudioObjectPropertyScopeGlobal,
1580 kAudioObjectPropertyElementMaster };
1582 property.mSelector = kAudioDeviceProcessorOverload;
1583 property.mScope = kAudioObjectPropertyScopeGlobal;
1584 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1585 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1586 error( RtAudioError::WARNING );
1589 if ( stream_.state == STREAM_RUNNING )
1590 AudioDeviceStop( handle->id[1], callbackHandler );
1591 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1592 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1594 // deprecated in favor of AudioDeviceDestroyIOProcID()
1595 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1599 for ( int i=0; i<2; i++ ) {
1600 if ( stream_.userBuffer[i] ) {
1601 free( stream_.userBuffer[i] );
1602 stream_.userBuffer[i] = 0;
1606 if ( stream_.deviceBuffer ) {
1607 free( stream_.deviceBuffer );
1608 stream_.deviceBuffer = 0;
1611 // Destroy pthread condition variable.
1612 pthread_cond_destroy( &handle->condition );
1614 stream_.apiHandle = 0;
1616 stream_.mode = UNINITIALIZED;
1617 stream_.state = STREAM_CLOSED;
1620 void RtApiCore :: startStream( void )
1623 if ( stream_.state == STREAM_RUNNING ) {
1624 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1625 error( RtAudioError::WARNING );
1629 OSStatus result = noErr;
1630 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1631 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1633 result = AudioDeviceStart( handle->id[0], callbackHandler );
1634 if ( result != noErr ) {
1635 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1636 errorText_ = errorStream_.str();
1641 if ( stream_.mode == INPUT ||
1642 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1644 result = AudioDeviceStart( handle->id[1], callbackHandler );
1645 if ( result != noErr ) {
1646 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1647 errorText_ = errorStream_.str();
1652 handle->drainCounter = 0;
1653 handle->internalDrain = false;
1654 stream_.state = STREAM_RUNNING;
1657 if ( result == noErr ) return;
1658 error( RtAudioError::SYSTEM_ERROR );
1661 void RtApiCore :: stopStream( void )
1664 if ( stream_.state == STREAM_STOPPED ) {
1665 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1666 error( RtAudioError::WARNING );
1670 OSStatus result = noErr;
1671 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1672 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1674 if ( handle->drainCounter == 0 ) {
1675 handle->drainCounter = 2;
1676 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1679 result = AudioDeviceStop( handle->id[0], callbackHandler );
1680 if ( result != noErr ) {
1681 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1682 errorText_ = errorStream_.str();
1687 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1689 result = AudioDeviceStop( handle->id[1], callbackHandler );
1690 if ( result != noErr ) {
1691 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1692 errorText_ = errorStream_.str();
1697 stream_.state = STREAM_STOPPED;
1700 if ( result == noErr ) return;
1701 error( RtAudioError::SYSTEM_ERROR );
1704 void RtApiCore :: abortStream( void )
1707 if ( stream_.state == STREAM_STOPPED ) {
1708 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1709 error( RtAudioError::WARNING );
1713 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1714 handle->drainCounter = 2;
1719 // This function will be called by a spawned thread when the user
1720 // callback function signals that the stream should be stopped or
1721 // aborted. It is better to handle it this way because the
1722 // callbackEvent() function probably should return before the AudioDeviceStop()
1723 // function is called.
1724 static void *coreStopStream( void *ptr )
1726 CallbackInfo *info = (CallbackInfo *) ptr;
1727 RtApiCore *object = (RtApiCore *) info->object;
1729 object->stopStream();
1730 pthread_exit( NULL );
1733 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1734 const AudioBufferList *inBufferList,
1735 const AudioBufferList *outBufferList )
1737 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1738 if ( stream_.state == STREAM_CLOSED ) {
1739 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1740 error( RtAudioError::WARNING );
1744 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1745 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1747 // Check if we were draining the stream and signal is finished.
1748 if ( handle->drainCounter > 3 ) {
1749 ThreadHandle threadId;
1751 stream_.state = STREAM_STOPPING;
1752 if ( handle->internalDrain == true )
1753 pthread_create( &threadId, NULL, coreStopStream, info );
1754 else // external call to stopStream()
1755 pthread_cond_signal( &handle->condition );
1759 AudioDeviceID outputDevice = handle->id[0];
1761 // Invoke user callback to get fresh output data UNLESS we are
1762 // draining stream or duplex mode AND the input/output devices are
1763 // different AND this function is called for the input device.
1764 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1765 RtAudioCallback callback = (RtAudioCallback) info->callback;
1766 double streamTime = getStreamTime();
1767 RtAudioStreamStatus status = 0;
1768 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1769 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1770 handle->xrun[0] = false;
1772 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1773 status |= RTAUDIO_INPUT_OVERFLOW;
1774 handle->xrun[1] = false;
1777 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1778 stream_.bufferSize, streamTime, status, info->userData );
1779 if ( cbReturnValue == 2 ) {
1780 stream_.state = STREAM_STOPPING;
1781 handle->drainCounter = 2;
1785 else if ( cbReturnValue == 1 ) {
1786 handle->drainCounter = 1;
1787 handle->internalDrain = true;
1791 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1793 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1795 if ( handle->nStreams[0] == 1 ) {
1796 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1798 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1800 else { // fill multiple streams with zeros
1801 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1802 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1804 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1808 else if ( handle->nStreams[0] == 1 ) {
1809 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1810 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1811 stream_.userBuffer[0], stream_.convertInfo[0] );
1813 else { // copy from user buffer
1814 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1815 stream_.userBuffer[0],
1816 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1819 else { // fill multiple streams
1820 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1821 if ( stream_.doConvertBuffer[0] ) {
1822 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1823 inBuffer = (Float32 *) stream_.deviceBuffer;
1826 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1827 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1828 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1829 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1830 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1833 else { // fill multiple multi-channel streams with interleaved data
1834 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1837 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1838 UInt32 inChannels = stream_.nUserChannels[0];
1839 if ( stream_.doConvertBuffer[0] ) {
1840 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1841 inChannels = stream_.nDeviceChannels[0];
1844 if ( inInterleaved ) inOffset = 1;
1845 else inOffset = stream_.bufferSize;
1847 channelsLeft = inChannels;
1848 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1850 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1851 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1854 // Account for possible channel offset in first stream
1855 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1856 streamChannels -= stream_.channelOffset[0];
1857 outJump = stream_.channelOffset[0];
1861 // Account for possible unfilled channels at end of the last stream
1862 if ( streamChannels > channelsLeft ) {
1863 outJump = streamChannels - channelsLeft;
1864 streamChannels = channelsLeft;
1867 // Determine input buffer offsets and skips
1868 if ( inInterleaved ) {
1869 inJump = inChannels;
1870 in += inChannels - channelsLeft;
1874 in += (inChannels - channelsLeft) * inOffset;
1877 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1878 for ( unsigned int j=0; j<streamChannels; j++ ) {
1879 *out++ = in[j*inOffset];
1884 channelsLeft -= streamChannels;
1890 // Don't bother draining input
1891 if ( handle->drainCounter ) {
1892 handle->drainCounter++;
1896 AudioDeviceID inputDevice;
1897 inputDevice = handle->id[1];
1898 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1900 if ( handle->nStreams[1] == 1 ) {
1901 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1902 convertBuffer( stream_.userBuffer[1],
1903 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1904 stream_.convertInfo[1] );
1906 else { // copy to user buffer
1907 memcpy( stream_.userBuffer[1],
1908 inBufferList->mBuffers[handle->iStream[1]].mData,
1909 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1912 else { // read from multiple streams
1913 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1914 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1916 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1917 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1918 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1919 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1920 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1923 else { // read from multiple multi-channel streams
1924 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1927 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1928 UInt32 outChannels = stream_.nUserChannels[1];
1929 if ( stream_.doConvertBuffer[1] ) {
1930 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1931 outChannels = stream_.nDeviceChannels[1];
1934 if ( outInterleaved ) outOffset = 1;
1935 else outOffset = stream_.bufferSize;
1937 channelsLeft = outChannels;
1938 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1940 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1941 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1944 // Account for possible channel offset in first stream
1945 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1946 streamChannels -= stream_.channelOffset[1];
1947 inJump = stream_.channelOffset[1];
1951 // Account for possible unread channels at end of the last stream
1952 if ( streamChannels > channelsLeft ) {
1953 inJump = streamChannels - channelsLeft;
1954 streamChannels = channelsLeft;
1957 // Determine output buffer offsets and skips
1958 if ( outInterleaved ) {
1959 outJump = outChannels;
1960 out += outChannels - channelsLeft;
1964 out += (outChannels - channelsLeft) * outOffset;
1967 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1968 for ( unsigned int j=0; j<streamChannels; j++ ) {
1969 out[j*outOffset] = *in++;
1974 channelsLeft -= streamChannels;
1978 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1979 convertBuffer( stream_.userBuffer[1],
1980 stream_.deviceBuffer,
1981 stream_.convertInfo[1] );
1987 //MUTEX_UNLOCK( &stream_.mutex );
1989 RtApi::tickStreamTime();
1993 const char* RtApiCore :: getErrorCode( OSStatus code )
1997 case kAudioHardwareNotRunningError:
1998 return "kAudioHardwareNotRunningError";
2000 case kAudioHardwareUnspecifiedError:
2001 return "kAudioHardwareUnspecifiedError";
2003 case kAudioHardwareUnknownPropertyError:
2004 return "kAudioHardwareUnknownPropertyError";
2006 case kAudioHardwareBadPropertySizeError:
2007 return "kAudioHardwareBadPropertySizeError";
2009 case kAudioHardwareIllegalOperationError:
2010 return "kAudioHardwareIllegalOperationError";
2012 case kAudioHardwareBadObjectError:
2013 return "kAudioHardwareBadObjectError";
2015 case kAudioHardwareBadDeviceError:
2016 return "kAudioHardwareBadDeviceError";
2018 case kAudioHardwareBadStreamError:
2019 return "kAudioHardwareBadStreamError";
2021 case kAudioHardwareUnsupportedOperationError:
2022 return "kAudioHardwareUnsupportedOperationError";
2024 case kAudioDeviceUnsupportedFormatError:
2025 return "kAudioDeviceUnsupportedFormatError";
2027 case kAudioDevicePermissionsError:
2028 return "kAudioDevicePermissionsError";
2031 return "CoreAudio unknown error";
2035 //******************** End of __MACOSX_CORE__ *********************//
2038 #if defined(__UNIX_JACK__)
2040 // JACK is a low-latency audio server, originally written for the
2041 // GNU/Linux operating system and now also ported to OS-X. It can
2042 // connect a number of different applications to an audio device, as
2043 // well as allowing them to share audio between themselves.
2045 // When using JACK with RtAudio, "devices" refer to JACK clients that
2046 // have ports connected to the server. The JACK server is typically
2047 // started in a terminal as follows:
2049 // .jackd -d alsa -d hw:0
2051 // or through an interface program such as qjackctl. Many of the
2052 // parameters normally set for a stream are fixed by the JACK server
2053 // and can be specified when the JACK server is started. In
2056 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2058 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2059 // frames, and number of buffers = 4. Once the server is running, it
2060 // is not possible to override these values. If the values are not
2061 // specified in the command-line, the JACK server uses default values.
2063 // The JACK server does not have to be running when an instance of
2064 // RtApiJack is created, though the function getDeviceCount() will
2065 // report 0 devices found until JACK has been started. When no
2066 // devices are available (i.e., the JACK server is not running), a
2067 // stream cannot be opened.
2069 #include <jack/jack.h>
2073 // A structure to hold various information related to the Jack API
2076 jack_client_t *client;
2077 jack_port_t **ports[2];
2078 std::string deviceName[2];
2080 pthread_cond_t condition;
2081 int drainCounter; // Tracks callback counts when draining
2082 bool internalDrain; // Indicates if stop is initiated from callback or not.
2085 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2088 #if !defined(__RTAUDIO_DEBUG__)
2089 static void jackSilentError( const char * ) {};
2092 RtApiJack :: RtApiJack()
2093 :shouldAutoconnect_(true) {
2094 // Nothing to do here.
2095 #if !defined(__RTAUDIO_DEBUG__)
2096 // Turn off Jack's internal error reporting.
2097 jack_set_error_function( &jackSilentError );
2101 RtApiJack :: ~RtApiJack()
2103 if ( stream_.state != STREAM_CLOSED ) closeStream();
2106 unsigned int RtApiJack :: getDeviceCount( void )
2108 // See if we can become a jack client.
2109 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2110 jack_status_t *status = NULL;
2111 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2112 if ( client == 0 ) return 0;
2115 std::string port, previousPort;
2116 unsigned int nChannels = 0, nDevices = 0;
2117 ports = jack_get_ports( client, NULL, NULL, 0 );
2119 // Parse the port names up to the first colon (:).
2122 port = (char *) ports[ nChannels ];
2123 iColon = port.find(":");
2124 if ( iColon != std::string::npos ) {
2125 port = port.substr( 0, iColon + 1 );
2126 if ( port != previousPort ) {
2128 previousPort = port;
2131 } while ( ports[++nChannels] );
2135 jack_client_close( client );
2139 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2141 RtAudio::DeviceInfo info;
2142 info.probed = false;
2144 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2145 jack_status_t *status = NULL;
2146 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2147 if ( client == 0 ) {
2148 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2149 error( RtAudioError::WARNING );
2154 std::string port, previousPort;
2155 unsigned int nPorts = 0, nDevices = 0;
2156 ports = jack_get_ports( client, NULL, NULL, 0 );
2158 // Parse the port names up to the first colon (:).
2161 port = (char *) ports[ nPorts ];
2162 iColon = port.find(":");
2163 if ( iColon != std::string::npos ) {
2164 port = port.substr( 0, iColon );
2165 if ( port != previousPort ) {
2166 if ( nDevices == device ) info.name = port;
2168 previousPort = port;
2171 } while ( ports[++nPorts] );
2175 if ( device >= nDevices ) {
2176 jack_client_close( client );
2177 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2178 error( RtAudioError::INVALID_USE );
2182 // Get the current jack server sample rate.
2183 info.sampleRates.clear();
2185 info.preferredSampleRate = jack_get_sample_rate( client );
2186 info.sampleRates.push_back( info.preferredSampleRate );
2188 // Count the available ports containing the client name as device
2189 // channels. Jack "input ports" equal RtAudio output channels.
2190 unsigned int nChannels = 0;
2191 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2193 while ( ports[ nChannels ] ) nChannels++;
2195 info.outputChannels = nChannels;
2198 // Jack "output ports" equal RtAudio input channels.
2200 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2202 while ( ports[ nChannels ] ) nChannels++;
2204 info.inputChannels = nChannels;
2207 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2208 jack_client_close(client);
2209 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2210 error( RtAudioError::WARNING );
2214 // If device opens for both playback and capture, we determine the channels.
2215 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2216 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2218 // Jack always uses 32-bit floats.
2219 info.nativeFormats = RTAUDIO_FLOAT32;
2221 // Jack doesn't provide default devices so we'll use the first available one.
2222 if ( device == 0 && info.outputChannels > 0 )
2223 info.isDefaultOutput = true;
2224 if ( device == 0 && info.inputChannels > 0 )
2225 info.isDefaultInput = true;
2227 jack_client_close(client);
2232 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2234 CallbackInfo *info = (CallbackInfo *) infoPointer;
2236 RtApiJack *object = (RtApiJack *) info->object;
2237 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2242 // This function will be called by a spawned thread when the Jack
2243 // server signals that it is shutting down. It is necessary to handle
2244 // it this way because the jackShutdown() function must return before
2245 // the jack_deactivate() function (in closeStream()) will return.
2246 static void *jackCloseStream( void *ptr )
2248 CallbackInfo *info = (CallbackInfo *) ptr;
2249 RtApiJack *object = (RtApiJack *) info->object;
2251 object->closeStream();
2253 pthread_exit( NULL );
2255 static void jackShutdown( void *infoPointer )
2257 CallbackInfo *info = (CallbackInfo *) infoPointer;
2258 RtApiJack *object = (RtApiJack *) info->object;
2260 // Check current stream state. If stopped, then we'll assume this
2261 // was called as a result of a call to RtApiJack::stopStream (the
2262 // deactivation of a client handle causes this function to be called).
2263 // If not, we'll assume the Jack server is shutting down or some
2264 // other problem occurred and we should close the stream.
2265 if ( object->isStreamRunning() == false ) return;
2267 ThreadHandle threadId;
2268 pthread_create( &threadId, NULL, jackCloseStream, info );
2269 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2272 static int jackXrun( void *infoPointer )
2274 JackHandle *handle = *((JackHandle **) infoPointer);
2276 if ( handle->ports[0] ) handle->xrun[0] = true;
2277 if ( handle->ports[1] ) handle->xrun[1] = true;
2282 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2283 unsigned int firstChannel, unsigned int sampleRate,
2284 RtAudioFormat format, unsigned int *bufferSize,
2285 RtAudio::StreamOptions *options )
2287 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2289 // Look for jack server and try to become a client (only do once per stream).
2290 jack_client_t *client = 0;
2291 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2292 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2293 jack_status_t *status = NULL;
2294 if ( options && !options->streamName.empty() )
2295 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2297 client = jack_client_open( "RtApiJack", jackoptions, status );
2298 if ( client == 0 ) {
2299 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2300 error( RtAudioError::WARNING );
2305 // The handle must have been created on an earlier pass.
2306 client = handle->client;
2310 std::string port, previousPort, deviceName;
2311 unsigned int nPorts = 0, nDevices = 0;
2312 ports = jack_get_ports( client, NULL, NULL, 0 );
2314 // Parse the port names up to the first colon (:).
2317 port = (char *) ports[ nPorts ];
2318 iColon = port.find(":");
2319 if ( iColon != std::string::npos ) {
2320 port = port.substr( 0, iColon );
2321 if ( port != previousPort ) {
2322 if ( nDevices == device ) deviceName = port;
2324 previousPort = port;
2327 } while ( ports[++nPorts] );
2331 if ( device >= nDevices ) {
2332 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2336 // Count the available ports containing the client name as device
2337 // channels. Jack "input ports" equal RtAudio output channels.
2338 unsigned int nChannels = 0;
2339 unsigned long flag = JackPortIsInput;
2340 if ( mode == INPUT ) flag = JackPortIsOutput;
2341 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2343 while ( ports[ nChannels ] ) nChannels++;
2347 // Compare the jack ports for specified client to the requested number of channels.
2348 if ( nChannels < (channels + firstChannel) ) {
2349 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2350 errorText_ = errorStream_.str();
2354 // Check the jack server sample rate.
2355 unsigned int jackRate = jack_get_sample_rate( client );
2356 if ( sampleRate != jackRate ) {
2357 jack_client_close( client );
2358 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2359 errorText_ = errorStream_.str();
2362 stream_.sampleRate = jackRate;
2364 // Get the latency of the JACK port.
2365 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2366 if ( ports[ firstChannel ] ) {
2368 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2369 // the range (usually the min and max are equal)
2370 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2371 // get the latency range
2372 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2373 // be optimistic, use the min!
2374 stream_.latency[mode] = latrange.min;
2375 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2379 // The jack server always uses 32-bit floating-point data.
2380 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2381 stream_.userFormat = format;
2383 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2384 else stream_.userInterleaved = true;
2386 // Jack always uses non-interleaved buffers.
2387 stream_.deviceInterleaved[mode] = false;
2389 // Jack always provides host byte-ordered data.
2390 stream_.doByteSwap[mode] = false;
2392 // Get the buffer size. The buffer size and number of buffers
2393 // (periods) is set when the jack server is started.
2394 stream_.bufferSize = (int) jack_get_buffer_size( client );
2395 *bufferSize = stream_.bufferSize;
2397 stream_.nDeviceChannels[mode] = channels;
2398 stream_.nUserChannels[mode] = channels;
2400 // Set flags for buffer conversion.
2401 stream_.doConvertBuffer[mode] = false;
2402 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2403 stream_.doConvertBuffer[mode] = true;
2404 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2405 stream_.nUserChannels[mode] > 1 )
2406 stream_.doConvertBuffer[mode] = true;
2408 // Allocate our JackHandle structure for the stream.
2409 if ( handle == 0 ) {
2411 handle = new JackHandle;
2413 catch ( std::bad_alloc& ) {
2414 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2418 if ( pthread_cond_init(&handle->condition, NULL) ) {
2419 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2422 stream_.apiHandle = (void *) handle;
2423 handle->client = client;
2425 handle->deviceName[mode] = deviceName;
2427 // Allocate necessary internal buffers.
2428 unsigned long bufferBytes;
2429 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2430 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2431 if ( stream_.userBuffer[mode] == NULL ) {
2432 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2436 if ( stream_.doConvertBuffer[mode] ) {
2438 bool makeBuffer = true;
2439 if ( mode == OUTPUT )
2440 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2441 else { // mode == INPUT
2442 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2443 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2444 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2445 if ( bufferBytes < bytesOut ) makeBuffer = false;
2450 bufferBytes *= *bufferSize;
2451 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2452 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2453 if ( stream_.deviceBuffer == NULL ) {
2454 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2460 // Allocate memory for the Jack ports (channels) identifiers.
2461 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2462 if ( handle->ports[mode] == NULL ) {
2463 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2467 stream_.device[mode] = device;
2468 stream_.channelOffset[mode] = firstChannel;
2469 stream_.state = STREAM_STOPPED;
2470 stream_.callbackInfo.object = (void *) this;
2472 if ( stream_.mode == OUTPUT && mode == INPUT )
2473 // We had already set up the stream for output.
2474 stream_.mode = DUPLEX;
2476 stream_.mode = mode;
2477 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2478 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2479 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2482 // Register our ports.
2484 if ( mode == OUTPUT ) {
2485 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2486 snprintf( label, 64, "outport %d", i );
2487 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2488 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2492 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2493 snprintf( label, 64, "inport %d", i );
2494 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2495 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2499 // Setup the buffer conversion information structure. We don't use
2500 // buffers to do channel offsets, so we override that parameter
2502 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2504 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2510 pthread_cond_destroy( &handle->condition );
2511 jack_client_close( handle->client );
2513 if ( handle->ports[0] ) free( handle->ports[0] );
2514 if ( handle->ports[1] ) free( handle->ports[1] );
2517 stream_.apiHandle = 0;
2520 for ( int i=0; i<2; i++ ) {
2521 if ( stream_.userBuffer[i] ) {
2522 free( stream_.userBuffer[i] );
2523 stream_.userBuffer[i] = 0;
2527 if ( stream_.deviceBuffer ) {
2528 free( stream_.deviceBuffer );
2529 stream_.deviceBuffer = 0;
2535 void RtApiJack :: closeStream( void )
2537 if ( stream_.state == STREAM_CLOSED ) {
2538 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2539 error( RtAudioError::WARNING );
2543 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2546 if ( stream_.state == STREAM_RUNNING )
2547 jack_deactivate( handle->client );
2549 jack_client_close( handle->client );
2553 if ( handle->ports[0] ) free( handle->ports[0] );
2554 if ( handle->ports[1] ) free( handle->ports[1] );
2555 pthread_cond_destroy( &handle->condition );
2557 stream_.apiHandle = 0;
2560 for ( int i=0; i<2; i++ ) {
2561 if ( stream_.userBuffer[i] ) {
2562 free( stream_.userBuffer[i] );
2563 stream_.userBuffer[i] = 0;
2567 if ( stream_.deviceBuffer ) {
2568 free( stream_.deviceBuffer );
2569 stream_.deviceBuffer = 0;
2572 stream_.mode = UNINITIALIZED;
2573 stream_.state = STREAM_CLOSED;
2576 void RtApiJack :: startStream( void )
2579 if ( stream_.state == STREAM_RUNNING ) {
2580 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2581 error( RtAudioError::WARNING );
2585 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2586 int result = jack_activate( handle->client );
2588 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2594 // Get the list of available ports.
2595 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2597 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2598 if ( ports == NULL) {
2599 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2603 // Now make the port connections. Since RtAudio wasn't designed to
2604 // allow the user to select particular channels of a device, we'll
2605 // just open the first "nChannels" ports with offset.
2606 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2608 if ( ports[ stream_.channelOffset[0] + i ] )
2609 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2612 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2619 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2621 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2622 if ( ports == NULL) {
2623 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2627 // Now make the port connections. See note above.
2628 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2630 if ( ports[ stream_.channelOffset[1] + i ] )
2631 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2634 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2641 handle->drainCounter = 0;
2642 handle->internalDrain = false;
2643 stream_.state = STREAM_RUNNING;
2646 if ( result == 0 ) return;
2647 error( RtAudioError::SYSTEM_ERROR );
2650 void RtApiJack :: stopStream( void )
2653 if ( stream_.state == STREAM_STOPPED ) {
2654 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2655 error( RtAudioError::WARNING );
2659 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2660 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2662 if ( handle->drainCounter == 0 ) {
2663 handle->drainCounter = 2;
2664 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2668 jack_deactivate( handle->client );
2669 stream_.state = STREAM_STOPPED;
2672 void RtApiJack :: abortStream( void )
2675 if ( stream_.state == STREAM_STOPPED ) {
2676 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2677 error( RtAudioError::WARNING );
2681 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2682 handle->drainCounter = 2;
2687 // This function will be called by a spawned thread when the user
2688 // callback function signals that the stream should be stopped or
2689 // aborted. It is necessary to handle it this way because the
2690 // callbackEvent() function must return before the jack_deactivate()
2691 // function will return.
2692 static void *jackStopStream( void *ptr )
2694 CallbackInfo *info = (CallbackInfo *) ptr;
2695 RtApiJack *object = (RtApiJack *) info->object;
2697 object->stopStream();
2698 pthread_exit( NULL );
2701 bool RtApiJack :: callbackEvent( unsigned long nframes )
2703 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2704 if ( stream_.state == STREAM_CLOSED ) {
2705 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2706 error( RtAudioError::WARNING );
2709 if ( stream_.bufferSize != nframes ) {
2710 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2711 error( RtAudioError::WARNING );
2715 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2716 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2718 // Check if we were draining the stream and signal is finished.
2719 if ( handle->drainCounter > 3 ) {
2720 ThreadHandle threadId;
2722 stream_.state = STREAM_STOPPING;
2723 if ( handle->internalDrain == true )
2724 pthread_create( &threadId, NULL, jackStopStream, info );
2726 pthread_cond_signal( &handle->condition );
2730 // Invoke user callback first, to get fresh output data.
2731 if ( handle->drainCounter == 0 ) {
2732 RtAudioCallback callback = (RtAudioCallback) info->callback;
2733 double streamTime = getStreamTime();
2734 RtAudioStreamStatus status = 0;
2735 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2736 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2737 handle->xrun[0] = false;
2739 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2740 status |= RTAUDIO_INPUT_OVERFLOW;
2741 handle->xrun[1] = false;
2743 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2744 stream_.bufferSize, streamTime, status, info->userData );
2745 if ( cbReturnValue == 2 ) {
2746 stream_.state = STREAM_STOPPING;
2747 handle->drainCounter = 2;
2749 pthread_create( &id, NULL, jackStopStream, info );
2752 else if ( cbReturnValue == 1 ) {
2753 handle->drainCounter = 1;
2754 handle->internalDrain = true;
2758 jack_default_audio_sample_t *jackbuffer;
2759 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2760 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2762 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2764 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2765 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2766 memset( jackbuffer, 0, bufferBytes );
2770 else if ( stream_.doConvertBuffer[0] ) {
2772 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2774 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2775 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2776 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2779 else { // no buffer conversion
2780 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2781 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2782 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2787 // Don't bother draining input
2788 if ( handle->drainCounter ) {
2789 handle->drainCounter++;
2793 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2795 if ( stream_.doConvertBuffer[1] ) {
2796 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2797 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2798 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2800 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2802 else { // no buffer conversion
2803 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2804 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2805 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2811 RtApi::tickStreamTime();
2814 //******************** End of __UNIX_JACK__ *********************//
2817 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2819 // The ASIO API is designed around a callback scheme, so this
2820 // implementation is similar to that used for OS-X CoreAudio and Linux
2821 // Jack. The primary constraint with ASIO is that it only allows
2822 // access to a single driver at a time. Thus, it is not possible to
2823 // have more than one simultaneous RtAudio stream.
2825 // This implementation also requires a number of external ASIO files
2826 // and a few global variables. The ASIO callback scheme does not
2827 // allow for the passing of user data, so we must create a global
2828 // pointer to our callbackInfo structure.
2830 // On unix systems, we make use of a pthread condition variable.
2831 // Since there is no equivalent in Windows, I hacked something based
2832 // on information found in
2833 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2835 #include "asiosys.h"
2837 #include "iasiothiscallresolver.h"
2838 #include "asiodrivers.h"
2841 static AsioDrivers drivers;
2842 static ASIOCallbacks asioCallbacks;
2843 static ASIODriverInfo driverInfo;
2844 static CallbackInfo *asioCallbackInfo;
2845 static bool asioXRun;
2848 int drainCounter; // Tracks callback counts when draining
2849 bool internalDrain; // Indicates if stop is initiated from callback or not.
2850 ASIOBufferInfo *bufferInfos;
2854 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2857 // Function declarations (definitions at end of section)
2858 static const char* getAsioErrorString( ASIOError result );
2859 static void sampleRateChanged( ASIOSampleRate sRate );
2860 static long asioMessages( long selector, long value, void* message, double* opt );
2862 RtApiAsio :: RtApiAsio()
2864 // ASIO cannot run on a multi-threaded appartment. You can call
2865 // CoInitialize beforehand, but it must be for appartment threading
2866 // (in which case, CoInitilialize will return S_FALSE here).
2867 coInitialized_ = false;
2868 HRESULT hr = CoInitialize( NULL );
2870 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2871 error( RtAudioError::WARNING );
2873 coInitialized_ = true;
2875 drivers.removeCurrentDriver();
2876 driverInfo.asioVersion = 2;
2878 // See note in DirectSound implementation about GetDesktopWindow().
2879 driverInfo.sysRef = GetForegroundWindow();
2882 RtApiAsio :: ~RtApiAsio()
2884 if ( stream_.state != STREAM_CLOSED ) closeStream();
2885 if ( coInitialized_ ) CoUninitialize();
2888 unsigned int RtApiAsio :: getDeviceCount( void )
2890 return (unsigned int) drivers.asioGetNumDev();
2893 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2895 RtAudio::DeviceInfo info;
2896 info.probed = false;
2899 unsigned int nDevices = getDeviceCount();
2900 if ( nDevices == 0 ) {
2901 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2902 error( RtAudioError::INVALID_USE );
2906 if ( device >= nDevices ) {
2907 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2908 error( RtAudioError::INVALID_USE );
2912 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2913 if ( stream_.state != STREAM_CLOSED ) {
2914 if ( device >= devices_.size() ) {
2915 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2916 error( RtAudioError::WARNING );
2919 return devices_[ device ];
2922 char driverName[32];
2923 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2926 errorText_ = errorStream_.str();
2927 error( RtAudioError::WARNING );
2931 info.name = driverName;
2933 if ( !drivers.loadDriver( driverName ) ) {
2934 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2935 errorText_ = errorStream_.str();
2936 error( RtAudioError::WARNING );
2940 result = ASIOInit( &driverInfo );
2941 if ( result != ASE_OK ) {
2942 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2943 errorText_ = errorStream_.str();
2944 error( RtAudioError::WARNING );
2948 // Determine the device channel information.
2949 long inputChannels, outputChannels;
2950 result = ASIOGetChannels( &inputChannels, &outputChannels );
2951 if ( result != ASE_OK ) {
2952 drivers.removeCurrentDriver();
2953 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2954 errorText_ = errorStream_.str();
2955 error( RtAudioError::WARNING );
2959 info.outputChannels = outputChannels;
2960 info.inputChannels = inputChannels;
2961 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2962 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2964 // Determine the supported sample rates.
2965 info.sampleRates.clear();
2966 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2967 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2968 if ( result == ASE_OK ) {
2969 info.sampleRates.push_back( SAMPLE_RATES[i] );
2971 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2972 info.preferredSampleRate = SAMPLE_RATES[i];
2976 // Determine supported data types ... just check first channel and assume rest are the same.
2977 ASIOChannelInfo channelInfo;
2978 channelInfo.channel = 0;
2979 channelInfo.isInput = true;
2980 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2981 result = ASIOGetChannelInfo( &channelInfo );
2982 if ( result != ASE_OK ) {
2983 drivers.removeCurrentDriver();
2984 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2986 error( RtAudioError::WARNING );
2990 info.nativeFormats = 0;
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2992 info.nativeFormats |= RTAUDIO_SINT16;
2993 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2994 info.nativeFormats |= RTAUDIO_SINT32;
2995 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2996 info.nativeFormats |= RTAUDIO_FLOAT32;
2997 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2998 info.nativeFormats |= RTAUDIO_FLOAT64;
2999 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
3000 info.nativeFormats |= RTAUDIO_SINT24;
3002 if ( info.outputChannels > 0 )
3003 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
3004 if ( info.inputChannels > 0 )
3005 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
3008 drivers.removeCurrentDriver();
3012 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
3014 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
3015 object->callbackEvent( index );
3018 void RtApiAsio :: saveDeviceInfo( void )
3022 unsigned int nDevices = getDeviceCount();
3023 devices_.resize( nDevices );
3024 for ( unsigned int i=0; i<nDevices; i++ )
3025 devices_[i] = getDeviceInfo( i );
3028 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3029 unsigned int firstChannel, unsigned int sampleRate,
3030 RtAudioFormat format, unsigned int *bufferSize,
3031 RtAudio::StreamOptions *options )
3032 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3034 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3036 // For ASIO, a duplex stream MUST use the same driver.
3037 if ( isDuplexInput && stream_.device[0] != device ) {
3038 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3042 char driverName[32];
3043 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3044 if ( result != ASE_OK ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3046 errorText_ = errorStream_.str();
3050 // Only load the driver once for duplex stream.
3051 if ( !isDuplexInput ) {
3052 // The getDeviceInfo() function will not work when a stream is open
3053 // because ASIO does not allow multiple devices to run at the same
3054 // time. Thus, we'll probe the system before opening a stream and
3055 // save the results for use by getDeviceInfo().
3056 this->saveDeviceInfo();
3058 if ( !drivers.loadDriver( driverName ) ) {
3059 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3060 errorText_ = errorStream_.str();
3064 result = ASIOInit( &driverInfo );
3065 if ( result != ASE_OK ) {
3066 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3067 errorText_ = errorStream_.str();
3072 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3073 bool buffersAllocated = false;
3074 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3075 unsigned int nChannels;
3078 // Check the device channel count.
3079 long inputChannels, outputChannels;
3080 result = ASIOGetChannels( &inputChannels, &outputChannels );
3081 if ( result != ASE_OK ) {
3082 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3083 errorText_ = errorStream_.str();
3087 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3088 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3089 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3090 errorText_ = errorStream_.str();
3093 stream_.nDeviceChannels[mode] = channels;
3094 stream_.nUserChannels[mode] = channels;
3095 stream_.channelOffset[mode] = firstChannel;
3097 // Verify the sample rate is supported.
3098 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3099 if ( result != ASE_OK ) {
3100 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3101 errorText_ = errorStream_.str();
3105 // Get the current sample rate
3106 ASIOSampleRate currentRate;
3107 result = ASIOGetSampleRate( ¤tRate );
3108 if ( result != ASE_OK ) {
3109 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3110 errorText_ = errorStream_.str();
3114 // Set the sample rate only if necessary
3115 if ( currentRate != sampleRate ) {
3116 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3117 if ( result != ASE_OK ) {
3118 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3119 errorText_ = errorStream_.str();
3124 // Determine the driver data type.
3125 ASIOChannelInfo channelInfo;
3126 channelInfo.channel = 0;
3127 if ( mode == OUTPUT ) channelInfo.isInput = false;
3128 else channelInfo.isInput = true;
3129 result = ASIOGetChannelInfo( &channelInfo );
3130 if ( result != ASE_OK ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3132 errorText_ = errorStream_.str();
3136 // Assuming WINDOWS host is always little-endian.
3137 stream_.doByteSwap[mode] = false;
3138 stream_.userFormat = format;
3139 stream_.deviceFormat[mode] = 0;
3140 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3141 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3142 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3144 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3145 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3146 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3148 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3149 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3150 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3152 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3153 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3154 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3156 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3157 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3158 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3161 if ( stream_.deviceFormat[mode] == 0 ) {
3162 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3163 errorText_ = errorStream_.str();
3167 // Set the buffer size. For a duplex stream, this will end up
3168 // setting the buffer size based on the input constraints, which
3170 long minSize, maxSize, preferSize, granularity;
3171 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3172 if ( result != ASE_OK ) {
3173 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3174 errorText_ = errorStream_.str();
3178 if ( isDuplexInput ) {
3179 // When this is the duplex input (output was opened before), then we have to use the same
3180 // buffersize as the output, because it might use the preferred buffer size, which most
3181 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3182 // So instead of throwing an error, make them equal. The caller uses the reference
3183 // to the "bufferSize" param as usual to set up processing buffers.
3185 *bufferSize = stream_.bufferSize;
3188 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3189 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3190 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3191 else if ( granularity == -1 ) {
3192 // Make sure bufferSize is a power of two.
3193 int log2_of_min_size = 0;
3194 int log2_of_max_size = 0;
3196 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3197 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3198 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3201 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3202 int min_delta_num = log2_of_min_size;
3204 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3205 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3206 if (current_delta < min_delta) {
3207 min_delta = current_delta;
3212 *bufferSize = ( (unsigned int)1 << min_delta_num );
3213 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3214 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3216 else if ( granularity != 0 ) {
3217 // Set to an even multiple of granularity, rounding up.
3218 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3223 // we don't use it anymore, see above!
3224 // Just left it here for the case...
3225 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3226 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3231 stream_.bufferSize = *bufferSize;
3232 stream_.nBuffers = 2;
3234 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3235 else stream_.userInterleaved = true;
3237 // ASIO always uses non-interleaved buffers.
3238 stream_.deviceInterleaved[mode] = false;
3240 // Allocate, if necessary, our AsioHandle structure for the stream.
3241 if ( handle == 0 ) {
3243 handle = new AsioHandle;
3245 catch ( std::bad_alloc& ) {
3246 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3249 handle->bufferInfos = 0;
3251 // Create a manual-reset event.
3252 handle->condition = CreateEvent( NULL, // no security
3253 TRUE, // manual-reset
3254 FALSE, // non-signaled initially
3256 stream_.apiHandle = (void *) handle;
3259 // Create the ASIO internal buffers. Since RtAudio sets up input
3260 // and output separately, we'll have to dispose of previously
3261 // created output buffers for a duplex stream.
3262 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3263 ASIODisposeBuffers();
3264 if ( handle->bufferInfos ) free( handle->bufferInfos );
3267 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3269 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3270 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3271 if ( handle->bufferInfos == NULL ) {
3272 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3273 errorText_ = errorStream_.str();
3277 ASIOBufferInfo *infos;
3278 infos = handle->bufferInfos;
3279 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3280 infos->isInput = ASIOFalse;
3281 infos->channelNum = i + stream_.channelOffset[0];
3282 infos->buffers[0] = infos->buffers[1] = 0;
3284 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3285 infos->isInput = ASIOTrue;
3286 infos->channelNum = i + stream_.channelOffset[1];
3287 infos->buffers[0] = infos->buffers[1] = 0;
3290 // prepare for callbacks
3291 stream_.sampleRate = sampleRate;
3292 stream_.device[mode] = device;
3293 stream_.mode = isDuplexInput ? DUPLEX : mode;
3295 // store this class instance before registering callbacks, that are going to use it
3296 asioCallbackInfo = &stream_.callbackInfo;
3297 stream_.callbackInfo.object = (void *) this;
3299 // Set up the ASIO callback structure and create the ASIO data buffers.
3300 asioCallbacks.bufferSwitch = &bufferSwitch;
3301 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3302 asioCallbacks.asioMessage = &asioMessages;
3303 asioCallbacks.bufferSwitchTimeInfo = NULL;
3304 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3305 if ( result != ASE_OK ) {
3306 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3307 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3308 // in that case, let's be naïve and try that instead
3309 *bufferSize = preferSize;
3310 stream_.bufferSize = *bufferSize;
3311 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3314 if ( result != ASE_OK ) {
3315 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3316 errorText_ = errorStream_.str();
3319 buffersAllocated = true;
3320 stream_.state = STREAM_STOPPED;
3322 // Set flags for buffer conversion.
3323 stream_.doConvertBuffer[mode] = false;
3324 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3325 stream_.doConvertBuffer[mode] = true;
3326 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3327 stream_.nUserChannels[mode] > 1 )
3328 stream_.doConvertBuffer[mode] = true;
3330 // Allocate necessary internal buffers
3331 unsigned long bufferBytes;
3332 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3333 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3334 if ( stream_.userBuffer[mode] == NULL ) {
3335 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3339 if ( stream_.doConvertBuffer[mode] ) {
3341 bool makeBuffer = true;
3342 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3343 if ( isDuplexInput && stream_.deviceBuffer ) {
3344 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3345 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3349 bufferBytes *= *bufferSize;
3350 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3351 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3352 if ( stream_.deviceBuffer == NULL ) {
3353 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3359 // Determine device latencies
3360 long inputLatency, outputLatency;
3361 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3362 if ( result != ASE_OK ) {
3363 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3364 errorText_ = errorStream_.str();
3365 error( RtAudioError::WARNING); // warn but don't fail
3368 stream_.latency[0] = outputLatency;
3369 stream_.latency[1] = inputLatency;
3372 // Setup the buffer conversion information structure. We don't use
3373 // buffers to do channel offsets, so we override that parameter
3375 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3380 if ( !isDuplexInput ) {
3381 // the cleanup for error in the duplex input, is done by RtApi::openStream
3382 // So we clean up for single channel only
3384 if ( buffersAllocated )
3385 ASIODisposeBuffers();
3387 drivers.removeCurrentDriver();
3390 CloseHandle( handle->condition );
3391 if ( handle->bufferInfos )
3392 free( handle->bufferInfos );
3395 stream_.apiHandle = 0;
3399 if ( stream_.userBuffer[mode] ) {
3400 free( stream_.userBuffer[mode] );
3401 stream_.userBuffer[mode] = 0;
3404 if ( stream_.deviceBuffer ) {
3405 free( stream_.deviceBuffer );
3406 stream_.deviceBuffer = 0;
3411 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3413 void RtApiAsio :: closeStream()
3415 if ( stream_.state == STREAM_CLOSED ) {
3416 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3417 error( RtAudioError::WARNING );
3421 if ( stream_.state == STREAM_RUNNING ) {
3422 stream_.state = STREAM_STOPPED;
3425 ASIODisposeBuffers();
3426 drivers.removeCurrentDriver();
3428 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3430 CloseHandle( handle->condition );
3431 if ( handle->bufferInfos )
3432 free( handle->bufferInfos );
3434 stream_.apiHandle = 0;
3437 for ( int i=0; i<2; i++ ) {
3438 if ( stream_.userBuffer[i] ) {
3439 free( stream_.userBuffer[i] );
3440 stream_.userBuffer[i] = 0;
3444 if ( stream_.deviceBuffer ) {
3445 free( stream_.deviceBuffer );
3446 stream_.deviceBuffer = 0;
3449 stream_.mode = UNINITIALIZED;
3450 stream_.state = STREAM_CLOSED;
3453 bool stopThreadCalled = false;
3455 void RtApiAsio :: startStream()
3458 if ( stream_.state == STREAM_RUNNING ) {
3459 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3460 error( RtAudioError::WARNING );
3464 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3465 ASIOError result = ASIOStart();
3466 if ( result != ASE_OK ) {
3467 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3468 errorText_ = errorStream_.str();
3472 handle->drainCounter = 0;
3473 handle->internalDrain = false;
3474 ResetEvent( handle->condition );
3475 stream_.state = STREAM_RUNNING;
3479 stopThreadCalled = false;
3481 if ( result == ASE_OK ) return;
3482 error( RtAudioError::SYSTEM_ERROR );
3485 void RtApiAsio :: stopStream()
3488 if ( stream_.state == STREAM_STOPPED ) {
3489 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3490 error( RtAudioError::WARNING );
3494 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3495 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3496 if ( handle->drainCounter == 0 ) {
3497 handle->drainCounter = 2;
3498 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3502 stream_.state = STREAM_STOPPED;
3504 ASIOError result = ASIOStop();
3505 if ( result != ASE_OK ) {
3506 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3507 errorText_ = errorStream_.str();
3510 if ( result == ASE_OK ) return;
3511 error( RtAudioError::SYSTEM_ERROR );
3514 void RtApiAsio :: abortStream()
3517 if ( stream_.state == STREAM_STOPPED ) {
3518 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3519 error( RtAudioError::WARNING );
3523 // The following lines were commented-out because some behavior was
3524 // noted where the device buffers need to be zeroed to avoid
3525 // continuing sound, even when the device buffers are completely
3526 // disposed. So now, calling abort is the same as calling stop.
3527 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3528 // handle->drainCounter = 2;
3532 // This function will be called by a spawned thread when the user
3533 // callback function signals that the stream should be stopped or
3534 // aborted. It is necessary to handle it this way because the
3535 // callbackEvent() function must return before the ASIOStop()
3536 // function will return.
3537 static unsigned __stdcall asioStopStream( void *ptr )
3539 CallbackInfo *info = (CallbackInfo *) ptr;
3540 RtApiAsio *object = (RtApiAsio *) info->object;
3542 object->stopStream();
3547 bool RtApiAsio :: callbackEvent( long bufferIndex )
3549 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3550 if ( stream_.state == STREAM_CLOSED ) {
3551 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3552 error( RtAudioError::WARNING );
3556 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3557 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3559 // Check if we were draining the stream and signal if finished.
3560 if ( handle->drainCounter > 3 ) {
3562 stream_.state = STREAM_STOPPING;
3563 if ( handle->internalDrain == false )
3564 SetEvent( handle->condition );
3565 else { // spawn a thread to stop the stream
3567 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3568 &stream_.callbackInfo, 0, &threadId );
3573 // Invoke user callback to get fresh output data UNLESS we are
3575 if ( handle->drainCounter == 0 ) {
3576 RtAudioCallback callback = (RtAudioCallback) info->callback;
3577 double streamTime = getStreamTime();
3578 RtAudioStreamStatus status = 0;
3579 if ( stream_.mode != INPUT && asioXRun == true ) {
3580 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3583 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3584 status |= RTAUDIO_INPUT_OVERFLOW;
3587 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3588 stream_.bufferSize, streamTime, status, info->userData );
3589 if ( cbReturnValue == 2 ) {
3590 stream_.state = STREAM_STOPPING;
3591 handle->drainCounter = 2;
3593 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3594 &stream_.callbackInfo, 0, &threadId );
3597 else if ( cbReturnValue == 1 ) {
3598 handle->drainCounter = 1;
3599 handle->internalDrain = true;
3603 unsigned int nChannels, bufferBytes, i, j;
3604 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3605 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3607 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3609 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3611 for ( i=0, j=0; i<nChannels; i++ ) {
3612 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3613 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3617 else if ( stream_.doConvertBuffer[0] ) {
3619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3620 if ( stream_.doByteSwap[0] )
3621 byteSwapBuffer( stream_.deviceBuffer,
3622 stream_.bufferSize * stream_.nDeviceChannels[0],
3623 stream_.deviceFormat[0] );
3625 for ( i=0, j=0; i<nChannels; i++ ) {
3626 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3627 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3628 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3634 if ( stream_.doByteSwap[0] )
3635 byteSwapBuffer( stream_.userBuffer[0],
3636 stream_.bufferSize * stream_.nUserChannels[0],
3637 stream_.userFormat );
3639 for ( i=0, j=0; i<nChannels; i++ ) {
3640 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3641 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3642 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3648 // Don't bother draining input
3649 if ( handle->drainCounter ) {
3650 handle->drainCounter++;
3654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3656 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3658 if (stream_.doConvertBuffer[1]) {
3660 // Always interleave ASIO input data.
3661 for ( i=0, j=0; i<nChannels; i++ ) {
3662 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3663 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3664 handle->bufferInfos[i].buffers[bufferIndex],
3668 if ( stream_.doByteSwap[1] )
3669 byteSwapBuffer( stream_.deviceBuffer,
3670 stream_.bufferSize * stream_.nDeviceChannels[1],
3671 stream_.deviceFormat[1] );
3672 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3676 for ( i=0, j=0; i<nChannels; i++ ) {
3677 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3678 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3679 handle->bufferInfos[i].buffers[bufferIndex],
3684 if ( stream_.doByteSwap[1] )
3685 byteSwapBuffer( stream_.userBuffer[1],
3686 stream_.bufferSize * stream_.nUserChannels[1],
3687 stream_.userFormat );
3692 // The following call was suggested by Malte Clasen. While the API
3693 // documentation indicates it should not be required, some device
3694 // drivers apparently do not function correctly without it.
3697 RtApi::tickStreamTime();
3701 static void sampleRateChanged( ASIOSampleRate sRate )
3703 // The ASIO documentation says that this usually only happens during
3704 // external sync. Audio processing is not stopped by the driver,
3705 // actual sample rate might not have even changed, maybe only the
3706 // sample rate status of an AES/EBU or S/PDIF digital input at the
3709 RtApi *object = (RtApi *) asioCallbackInfo->object;
3711 object->stopStream();
3713 catch ( RtAudioError &exception ) {
3714 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3718 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3721 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3725 switch( selector ) {
3726 case kAsioSelectorSupported:
3727 if ( value == kAsioResetRequest
3728 || value == kAsioEngineVersion
3729 || value == kAsioResyncRequest
3730 || value == kAsioLatenciesChanged
3731 // The following three were added for ASIO 2.0, you don't
3732 // necessarily have to support them.
3733 || value == kAsioSupportsTimeInfo
3734 || value == kAsioSupportsTimeCode
3735 || value == kAsioSupportsInputMonitor)
3738 case kAsioResetRequest:
3739 // Defer the task and perform the reset of the driver during the
3740 // next "safe" situation. You cannot reset the driver right now,
3741 // as this code is called from the driver. Reset the driver is
3742 // done by completely destruct is. I.e. ASIOStop(),
3743 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3745 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3748 case kAsioResyncRequest:
3749 // This informs the application that the driver encountered some
3750 // non-fatal data loss. It is used for synchronization purposes
3751 // of different media. Added mainly to work around the Win16Mutex
3752 // problems in Windows 95/98 with the Windows Multimedia system,
3753 // which could lose data because the Mutex was held too long by
3754 // another thread. However a driver can issue it in other
3756 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3760 case kAsioLatenciesChanged:
3761 // This will inform the host application that the drivers were
3762 // latencies changed. Beware, it this does not mean that the
3763 // buffer sizes have changed! You might need to update internal
3765 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3768 case kAsioEngineVersion:
3769 // Return the supported ASIO version of the host application. If
3770 // a host application does not implement this selector, ASIO 1.0
3771 // is assumed by the driver.
3774 case kAsioSupportsTimeInfo:
3775 // Informs the driver whether the
3776 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3777 // For compatibility with ASIO 1.0 drivers the host application
3778 // should always support the "old" bufferSwitch method, too.
3781 case kAsioSupportsTimeCode:
3782 // Informs the driver whether application is interested in time
3783 // code info. If an application does not need to know about time
3784 // code, the driver has less work to do.
3791 static const char* getAsioErrorString( ASIOError result )
3799 static const Messages m[] =
3801 { ASE_NotPresent, "Hardware input or output is not present or available." },
3802 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3803 { ASE_InvalidParameter, "Invalid input parameter." },
3804 { ASE_InvalidMode, "Invalid mode." },
3805 { ASE_SPNotAdvancing, "Sample position not advancing." },
3806 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3807 { ASE_NoMemory, "Not enough memory to complete the request." }
3810 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3811 if ( m[i].value == result ) return m[i].message;
3813 return "Unknown error.";
3816 //******************** End of __WINDOWS_ASIO__ *********************//
3820 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3822 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3823 // - Introduces support for the Windows WASAPI API
3824 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3825 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3826 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3831 #include <audioclient.h>
3833 #include <mmdeviceapi.h>
3834 #include <functiondiscoverykeys_devpkey.h>
3837 //=============================================================================
3839 #define SAFE_RELEASE( objectPtr )\
3842 objectPtr->Release();\
3846 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3848 //-----------------------------------------------------------------------------
3850 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3851 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3852 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3853 // provide intermediate storage for read / write synchronization.
3867 // sets the length of the internal ring buffer
3868 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3871 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3873 bufferSize_ = bufferSize;
3878 // attempt to push a buffer into the ring buffer at the current "in" index
3879 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3881 if ( !buffer || // incoming buffer is NULL
3882 bufferSize == 0 || // incoming buffer has no data
3883 bufferSize > bufferSize_ ) // incoming buffer too large
3888 unsigned int relOutIndex = outIndex_;
3889 unsigned int inIndexEnd = inIndex_ + bufferSize;
3890 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3891 relOutIndex += bufferSize_;
3894 // "in" index can end on the "out" index but cannot begin at it
3895 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3896 return false; // not enough space between "in" index and "out" index
3899 // copy buffer from external to internal
3900 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3901 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3902 int fromInSize = bufferSize - fromZeroSize;
3907 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3908 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3910 case RTAUDIO_SINT16:
3911 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3912 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3914 case RTAUDIO_SINT24:
3915 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3916 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3918 case RTAUDIO_SINT32:
3919 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3920 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3922 case RTAUDIO_FLOAT32:
3923 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3924 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3926 case RTAUDIO_FLOAT64:
3927 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3928 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3932 // update "in" index
3933 inIndex_ += bufferSize;
3934 inIndex_ %= bufferSize_;
3939 // attempt to pull a buffer from the ring buffer from the current "out" index
3940 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3942 if ( !buffer || // incoming buffer is NULL
3943 bufferSize == 0 || // incoming buffer has no data
3944 bufferSize > bufferSize_ ) // incoming buffer too large
3949 unsigned int relInIndex = inIndex_;
3950 unsigned int outIndexEnd = outIndex_ + bufferSize;
3951 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3952 relInIndex += bufferSize_;
3955 // "out" index can begin at and end on the "in" index
3956 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3957 return false; // not enough space between "out" index and "in" index
3960 // copy buffer from internal to external
3961 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3962 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3963 int fromOutSize = bufferSize - fromZeroSize;
3968 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3969 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3971 case RTAUDIO_SINT16:
3972 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3973 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3975 case RTAUDIO_SINT24:
3976 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3977 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3979 case RTAUDIO_SINT32:
3980 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3981 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3983 case RTAUDIO_FLOAT32:
3984 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3985 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3987 case RTAUDIO_FLOAT64:
3988 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3989 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3993 // update "out" index
3994 outIndex_ += bufferSize;
3995 outIndex_ %= bufferSize_;
4002 unsigned int bufferSize_;
4003 unsigned int inIndex_;
4004 unsigned int outIndex_;
4007 //-----------------------------------------------------------------------------
4009 // A structure to hold various information related to the WASAPI implementation.
4012 IAudioClient* captureAudioClient;
4013 IAudioClient* renderAudioClient;
4014 IAudioCaptureClient* captureClient;
4015 IAudioRenderClient* renderClient;
4016 HANDLE captureEvent;
4020 : captureAudioClient( NULL ),
4021 renderAudioClient( NULL ),
4022 captureClient( NULL ),
4023 renderClient( NULL ),
4024 captureEvent( NULL ),
4025 renderEvent( NULL ) {}
4028 //=============================================================================
4030 RtApiWasapi::RtApiWasapi()
4031 : coInitialized_( false ), deviceEnumerator_( NULL )
4033 // WASAPI can run either apartment or multi-threaded
4034 HRESULT hr = CoInitialize( NULL );
4035 if ( !FAILED( hr ) )
4036 coInitialized_ = true;
4038 // Instantiate device enumerator
4039 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4040 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4041 ( void** ) &deviceEnumerator_ );
4043 if ( FAILED( hr ) ) {
4044 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4045 error( RtAudioError::DRIVER_ERROR );
4049 //-----------------------------------------------------------------------------
4051 RtApiWasapi::~RtApiWasapi()
4053 if ( stream_.state != STREAM_CLOSED )
4056 SAFE_RELEASE( deviceEnumerator_ );
4058 // If this object previously called CoInitialize()
4059 if ( coInitialized_ )
4063 //=============================================================================
4065 unsigned int RtApiWasapi::getDeviceCount( void )
4067 unsigned int captureDeviceCount = 0;
4068 unsigned int renderDeviceCount = 0;
4070 IMMDeviceCollection* captureDevices = NULL;
4071 IMMDeviceCollection* renderDevices = NULL;
4073 // Count capture devices
4075 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4076 if ( FAILED( hr ) ) {
4077 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4081 hr = captureDevices->GetCount( &captureDeviceCount );
4082 if ( FAILED( hr ) ) {
4083 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4087 // Count render devices
4088 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4089 if ( FAILED( hr ) ) {
4090 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4094 hr = renderDevices->GetCount( &renderDeviceCount );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4101 // release all references
4102 SAFE_RELEASE( captureDevices );
4103 SAFE_RELEASE( renderDevices );
4105 if ( errorText_.empty() )
4106 return captureDeviceCount + renderDeviceCount;
4108 error( RtAudioError::DRIVER_ERROR );
4112 //-----------------------------------------------------------------------------
4114 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4116 RtAudio::DeviceInfo info;
4117 unsigned int captureDeviceCount = 0;
4118 unsigned int renderDeviceCount = 0;
4119 std::string defaultDeviceName;
4120 bool isCaptureDevice = false;
4122 PROPVARIANT deviceNameProp;
4123 PROPVARIANT defaultDeviceNameProp;
4125 IMMDeviceCollection* captureDevices = NULL;
4126 IMMDeviceCollection* renderDevices = NULL;
4127 IMMDevice* devicePtr = NULL;
4128 IMMDevice* defaultDevicePtr = NULL;
4129 IAudioClient* audioClient = NULL;
4130 IPropertyStore* devicePropStore = NULL;
4131 IPropertyStore* defaultDevicePropStore = NULL;
4133 WAVEFORMATEX* deviceFormat = NULL;
4134 WAVEFORMATEX* closestMatchFormat = NULL;
4137 info.probed = false;
4139 // Count capture devices
4141 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4142 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4143 if ( FAILED( hr ) ) {
4144 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4148 hr = captureDevices->GetCount( &captureDeviceCount );
4149 if ( FAILED( hr ) ) {
4150 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4154 // Count render devices
4155 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4156 if ( FAILED( hr ) ) {
4157 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4161 hr = renderDevices->GetCount( &renderDeviceCount );
4162 if ( FAILED( hr ) ) {
4163 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4167 // validate device index
4168 if ( device >= captureDeviceCount + renderDeviceCount ) {
4169 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4170 errorType = RtAudioError::INVALID_USE;
4174 // determine whether index falls within capture or render devices
4175 if ( device >= renderDeviceCount ) {
4176 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4177 if ( FAILED( hr ) ) {
4178 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4181 isCaptureDevice = true;
4184 hr = renderDevices->Item( device, &devicePtr );
4185 if ( FAILED( hr ) ) {
4186 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4189 isCaptureDevice = false;
4192 // get default device name
4193 if ( isCaptureDevice ) {
4194 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4195 if ( FAILED( hr ) ) {
4196 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4201 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4202 if ( FAILED( hr ) ) {
4203 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4208 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4209 if ( FAILED( hr ) ) {
4210 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4213 PropVariantInit( &defaultDeviceNameProp );
4215 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4216 if ( FAILED( hr ) ) {
4217 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4221 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4224 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4225 if ( FAILED( hr ) ) {
4226 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4230 PropVariantInit( &deviceNameProp );
4232 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4238 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4241 if ( isCaptureDevice ) {
4242 info.isDefaultInput = info.name == defaultDeviceName;
4243 info.isDefaultOutput = false;
4246 info.isDefaultInput = false;
4247 info.isDefaultOutput = info.name == defaultDeviceName;
4251 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4252 if ( FAILED( hr ) ) {
4253 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4257 hr = audioClient->GetMixFormat( &deviceFormat );
4258 if ( FAILED( hr ) ) {
4259 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4263 if ( isCaptureDevice ) {
4264 info.inputChannels = deviceFormat->nChannels;
4265 info.outputChannels = 0;
4266 info.duplexChannels = 0;
4269 info.inputChannels = 0;
4270 info.outputChannels = deviceFormat->nChannels;
4271 info.duplexChannels = 0;
4274 // sample rates (WASAPI only supports the one native sample rate)
4275 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4277 info.sampleRates.clear();
4278 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4281 info.nativeFormats = 0;
4283 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4284 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4285 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4287 if ( deviceFormat->wBitsPerSample == 32 ) {
4288 info.nativeFormats |= RTAUDIO_FLOAT32;
4290 else if ( deviceFormat->wBitsPerSample == 64 ) {
4291 info.nativeFormats |= RTAUDIO_FLOAT64;
4294 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4295 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4296 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4298 if ( deviceFormat->wBitsPerSample == 8 ) {
4299 info.nativeFormats |= RTAUDIO_SINT8;
4301 else if ( deviceFormat->wBitsPerSample == 16 ) {
4302 info.nativeFormats |= RTAUDIO_SINT16;
4304 else if ( deviceFormat->wBitsPerSample == 24 ) {
4305 info.nativeFormats |= RTAUDIO_SINT24;
4307 else if ( deviceFormat->wBitsPerSample == 32 ) {
4308 info.nativeFormats |= RTAUDIO_SINT32;
4316 // release all references
4317 PropVariantClear( &deviceNameProp );
4318 PropVariantClear( &defaultDeviceNameProp );
4320 SAFE_RELEASE( captureDevices );
4321 SAFE_RELEASE( renderDevices );
4322 SAFE_RELEASE( devicePtr );
4323 SAFE_RELEASE( defaultDevicePtr );
4324 SAFE_RELEASE( audioClient );
4325 SAFE_RELEASE( devicePropStore );
4326 SAFE_RELEASE( defaultDevicePropStore );
4328 CoTaskMemFree( deviceFormat );
4329 CoTaskMemFree( closestMatchFormat );
4331 if ( !errorText_.empty() )
4336 //-----------------------------------------------------------------------------
4338 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4340 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4341 if ( getDeviceInfo( i ).isDefaultOutput ) {
4349 //-----------------------------------------------------------------------------
4351 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4353 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4354 if ( getDeviceInfo( i ).isDefaultInput ) {
4362 //-----------------------------------------------------------------------------
4364 void RtApiWasapi::closeStream( void )
4366 if ( stream_.state == STREAM_CLOSED ) {
4367 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4368 error( RtAudioError::WARNING );
4372 if ( stream_.state != STREAM_STOPPED )
4375 // clean up stream memory
4376 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4377 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4379 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4380 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4382 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4383 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4385 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4386 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4388 delete ( WasapiHandle* ) stream_.apiHandle;
4389 stream_.apiHandle = NULL;
4391 for ( int i = 0; i < 2; i++ ) {
4392 if ( stream_.userBuffer[i] ) {
4393 free( stream_.userBuffer[i] );
4394 stream_.userBuffer[i] = 0;
4398 if ( stream_.deviceBuffer ) {
4399 free( stream_.deviceBuffer );
4400 stream_.deviceBuffer = 0;
4403 // update stream state
4404 stream_.state = STREAM_CLOSED;
4407 //-----------------------------------------------------------------------------
4409 void RtApiWasapi::startStream( void )
4413 if ( stream_.state == STREAM_RUNNING ) {
4414 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4415 error( RtAudioError::WARNING );
4419 // update stream state
4420 stream_.state = STREAM_RUNNING;
4422 // create WASAPI stream thread
4423 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4425 if ( !stream_.callbackInfo.thread ) {
4426 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4427 error( RtAudioError::THREAD_ERROR );
4430 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4431 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4435 //-----------------------------------------------------------------------------
4437 void RtApiWasapi::stopStream( void )
4441 if ( stream_.state == STREAM_STOPPED ) {
4442 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4443 error( RtAudioError::WARNING );
4447 // inform stream thread by setting stream state to STREAM_STOPPING
4448 stream_.state = STREAM_STOPPING;
4450 // wait until stream thread is stopped
4451 while( stream_.state != STREAM_STOPPED ) {
4455 // Wait for the last buffer to play before stopping.
4456 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4458 // stop capture client if applicable
4459 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4460 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4461 if ( FAILED( hr ) ) {
4462 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4463 error( RtAudioError::DRIVER_ERROR );
4468 // stop render client if applicable
4469 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4470 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4471 if ( FAILED( hr ) ) {
4472 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4473 error( RtAudioError::DRIVER_ERROR );
4478 // close thread handle
4479 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4480 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4481 error( RtAudioError::THREAD_ERROR );
4485 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4488 //-----------------------------------------------------------------------------
4490 void RtApiWasapi::abortStream( void )
4494 if ( stream_.state == STREAM_STOPPED ) {
4495 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4496 error( RtAudioError::WARNING );
4500 // inform stream thread by setting stream state to STREAM_STOPPING
4501 stream_.state = STREAM_STOPPING;
4503 // wait until stream thread is stopped
4504 while ( stream_.state != STREAM_STOPPED ) {
4508 // stop capture client if applicable
4509 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4510 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4511 if ( FAILED( hr ) ) {
4512 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4513 error( RtAudioError::DRIVER_ERROR );
4518 // stop render client if applicable
4519 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4520 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4521 if ( FAILED( hr ) ) {
4522 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4523 error( RtAudioError::DRIVER_ERROR );
4528 // close thread handle
4529 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4530 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4531 error( RtAudioError::THREAD_ERROR );
4535 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4538 //-----------------------------------------------------------------------------
4540 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4541 unsigned int firstChannel, unsigned int sampleRate,
4542 RtAudioFormat format, unsigned int* bufferSize,
4543 RtAudio::StreamOptions* options )
4545 bool methodResult = FAILURE;
4546 unsigned int captureDeviceCount = 0;
4547 unsigned int renderDeviceCount = 0;
4549 IMMDeviceCollection* captureDevices = NULL;
4550 IMMDeviceCollection* renderDevices = NULL;
4551 IMMDevice* devicePtr = NULL;
4552 WAVEFORMATEX* deviceFormat = NULL;
4553 unsigned int bufferBytes;
4554 stream_.state = STREAM_STOPPED;
4555 RtAudio::DeviceInfo deviceInfo;
4557 // create API Handle if not already created
4558 if ( !stream_.apiHandle )
4559 stream_.apiHandle = ( void* ) new WasapiHandle();
4561 // Count capture devices
4563 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4564 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4565 if ( FAILED( hr ) ) {
4566 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4570 hr = captureDevices->GetCount( &captureDeviceCount );
4571 if ( FAILED( hr ) ) {
4572 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4576 // Count render devices
4577 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4578 if ( FAILED( hr ) ) {
4579 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4583 hr = renderDevices->GetCount( &renderDeviceCount );
4584 if ( FAILED( hr ) ) {
4585 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4589 // validate device index
4590 if ( device >= captureDeviceCount + renderDeviceCount ) {
4591 errorType = RtAudioError::INVALID_USE;
4592 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4596 deviceInfo = getDeviceInfo( device );
4598 // validate sample rate
4599 if ( sampleRate != deviceInfo.preferredSampleRate )
4601 errorType = RtAudioError::INVALID_USE;
4602 std::stringstream ss;
4603 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4604 << "Hz sample rate not supported. This device only supports "
4605 << deviceInfo.preferredSampleRate << "Hz.";
4606 errorText_ = ss.str();
4610 // determine whether index falls within capture or render devices
4611 if ( device >= renderDeviceCount ) {
4612 if ( mode != INPUT ) {
4613 errorType = RtAudioError::INVALID_USE;
4614 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4618 // retrieve captureAudioClient from devicePtr
4619 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4621 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4622 if ( FAILED( hr ) ) {
4623 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4627 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4628 NULL, ( void** ) &captureAudioClient );
4629 if ( FAILED( hr ) ) {
4630 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4634 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4635 if ( FAILED( hr ) ) {
4636 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4640 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4641 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4644 if ( mode != OUTPUT ) {
4645 errorType = RtAudioError::INVALID_USE;
4646 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4650 // retrieve renderAudioClient from devicePtr
4651 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4653 hr = renderDevices->Item( device, &devicePtr );
4654 if ( FAILED( hr ) ) {
4655 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4659 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4660 NULL, ( void** ) &renderAudioClient );
4661 if ( FAILED( hr ) ) {
4662 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4666 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4667 if ( FAILED( hr ) ) {
4668 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4672 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4673 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4677 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4678 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4679 stream_.mode = DUPLEX;
4682 stream_.mode = mode;
4685 stream_.device[mode] = device;
4686 stream_.doByteSwap[mode] = false;
4687 stream_.sampleRate = sampleRate;
4688 stream_.bufferSize = *bufferSize;
4689 stream_.nBuffers = 1;
4690 stream_.nUserChannels[mode] = channels;
4691 stream_.channelOffset[mode] = firstChannel;
4692 stream_.userFormat = format;
4693 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4695 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4696 stream_.userInterleaved = false;
4698 stream_.userInterleaved = true;
4699 stream_.deviceInterleaved[mode] = true;
4701 // Set flags for buffer conversion.
4702 stream_.doConvertBuffer[mode] = false;
4703 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4704 stream_.nUserChannels != stream_.nDeviceChannels )
4705 stream_.doConvertBuffer[mode] = true;
4706 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4707 stream_.nUserChannels[mode] > 1 )
4708 stream_.doConvertBuffer[mode] = true;
4710 if ( stream_.doConvertBuffer[mode] )
4711 setConvertInfo( mode, 0 );
4713 // Allocate necessary internal buffers
4714 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4716 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4717 if ( !stream_.userBuffer[mode] ) {
4718 errorType = RtAudioError::MEMORY_ERROR;
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4723 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4724 stream_.callbackInfo.priority = 15;
4726 stream_.callbackInfo.priority = 0;
4728 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4729 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4731 methodResult = SUCCESS;
4735 SAFE_RELEASE( captureDevices );
4736 SAFE_RELEASE( renderDevices );
4737 SAFE_RELEASE( devicePtr );
4738 CoTaskMemFree( deviceFormat );
4740 // if method failed, close the stream
4741 if ( methodResult == FAILURE )
4744 if ( !errorText_.empty() )
4746 return methodResult;
4749 //=============================================================================
4751 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4754 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4759 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4762 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4767 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4770 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4775 //-----------------------------------------------------------------------------
4777 void RtApiWasapi::wasapiThread()
4779 // as this is a new thread, we must CoInitialize it
4780 CoInitialize( NULL );
4784 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4785 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4786 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4787 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4788 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4789 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4791 WAVEFORMATEX* captureFormat = NULL;
4792 WAVEFORMATEX* renderFormat = NULL;
4793 WasapiBuffer captureBuffer;
4794 WasapiBuffer renderBuffer;
4796 // declare local stream variables
4797 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4798 BYTE* streamBuffer = NULL;
4799 unsigned long captureFlags = 0;
4800 unsigned int bufferFrameCount = 0;
4801 unsigned int numFramesPadding = 0;
4802 bool callbackPushed = false;
4803 bool callbackPulled = false;
4804 bool callbackStopped = false;
4805 int callbackResult = 0;
4807 unsigned int deviceBuffSize = 0;
4810 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4812 // Attempt to assign "Pro Audio" characteristic to thread
4813 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4815 DWORD taskIndex = 0;
4816 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4817 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4818 FreeLibrary( AvrtDll );
4821 // start capture stream if applicable
4822 if ( captureAudioClient ) {
4823 hr = captureAudioClient->GetMixFormat( &captureFormat );
4824 if ( FAILED( hr ) ) {
4825 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4829 // initialize capture stream according to desire buffer size
4830 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4832 if ( !captureClient ) {
4833 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4834 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4835 desiredBufferPeriod,
4836 desiredBufferPeriod,
4839 if ( FAILED( hr ) ) {
4840 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4844 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4845 ( void** ) &captureClient );
4846 if ( FAILED( hr ) ) {
4847 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4851 // configure captureEvent to trigger on every available capture buffer
4852 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4853 if ( !captureEvent ) {
4854 errorType = RtAudioError::SYSTEM_ERROR;
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4859 hr = captureAudioClient->SetEventHandle( captureEvent );
4860 if ( FAILED( hr ) ) {
4861 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4865 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4866 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4869 unsigned int inBufferSize = 0;
4870 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4871 if ( FAILED( hr ) ) {
4872 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4876 // scale outBufferSize according to stream->user sample rate ratio
4877 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4878 inBufferSize *= stream_.nDeviceChannels[INPUT];
4880 // set captureBuffer size
4881 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4883 // reset the capture stream
4884 hr = captureAudioClient->Reset();
4885 if ( FAILED( hr ) ) {
4886 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4890 // start the capture stream
4891 hr = captureAudioClient->Start();
4892 if ( FAILED( hr ) ) {
4893 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4898 // start render stream if applicable
4899 if ( renderAudioClient ) {
4900 hr = renderAudioClient->GetMixFormat( &renderFormat );
4901 if ( FAILED( hr ) ) {
4902 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4906 // initialize render stream according to desire buffer size
4907 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4909 if ( !renderClient ) {
4910 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4911 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4912 desiredBufferPeriod,
4913 desiredBufferPeriod,
4916 if ( FAILED( hr ) ) {
4917 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4921 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4922 ( void** ) &renderClient );
4923 if ( FAILED( hr ) ) {
4924 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4928 // configure renderEvent to trigger on every available render buffer
4929 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4930 if ( !renderEvent ) {
4931 errorType = RtAudioError::SYSTEM_ERROR;
4932 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4936 hr = renderAudioClient->SetEventHandle( renderEvent );
4937 if ( FAILED( hr ) ) {
4938 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4942 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4943 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4946 unsigned int outBufferSize = 0;
4947 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4948 if ( FAILED( hr ) ) {
4949 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4953 // scale inBufferSize according to user->stream sample rate ratio
4954 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4955 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4957 // set renderBuffer size
4958 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4960 // reset the render stream
4961 hr = renderAudioClient->Reset();
4962 if ( FAILED( hr ) ) {
4963 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4967 // start the render stream
4968 hr = renderAudioClient->Start();
4969 if ( FAILED( hr ) ) {
4970 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4975 if ( stream_.mode == INPUT ) {
4976 using namespace std; // for roundf
4977 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4979 else if ( stream_.mode == OUTPUT ) {
4980 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4982 else if ( stream_.mode == DUPLEX ) {
4983 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4984 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4987 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4988 if ( !stream_.deviceBuffer ) {
4989 errorType = RtAudioError::MEMORY_ERROR;
4990 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4994 // stream process loop
4995 while ( stream_.state != STREAM_STOPPING ) {
4996 if ( !callbackPulled ) {
4999 // 1. Pull callback buffer from inputBuffer
5000 // 2. If 1. was successful: Convert callback buffer to user format
5002 if ( captureAudioClient ) {
5003 // Pull callback buffer from inputBuffer
5004 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
5005 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
5006 stream_.deviceFormat[INPUT] );
5008 if ( callbackPulled ) {
5009 if ( stream_.doConvertBuffer[INPUT] ) {
5010 // Convert callback buffer to user format
5011 convertBuffer( stream_.userBuffer[INPUT],
5012 stream_.deviceBuffer,
5013 stream_.convertInfo[INPUT] );
5016 // no further conversion, simple copy deviceBuffer to userBuffer
5017 memcpy( stream_.userBuffer[INPUT],
5018 stream_.deviceBuffer,
5019 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5024 // if there is no capture stream, set callbackPulled flag
5025 callbackPulled = true;
5030 // 1. Execute user callback method
5031 // 2. Handle return value from callback
5033 // if callback has not requested the stream to stop
5034 if ( callbackPulled && !callbackStopped ) {
5035 // Execute user callback method
5036 callbackResult = callback( stream_.userBuffer[OUTPUT],
5037 stream_.userBuffer[INPUT],
5040 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5041 stream_.callbackInfo.userData );
5043 // Handle return value from callback
5044 if ( callbackResult == 1 ) {
5045 // instantiate a thread to stop this thread
5046 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5047 if ( !threadHandle ) {
5048 errorType = RtAudioError::THREAD_ERROR;
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5052 else if ( !CloseHandle( threadHandle ) ) {
5053 errorType = RtAudioError::THREAD_ERROR;
5054 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5058 callbackStopped = true;
5060 else if ( callbackResult == 2 ) {
5061 // instantiate a thread to stop this thread
5062 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5063 if ( !threadHandle ) {
5064 errorType = RtAudioError::THREAD_ERROR;
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5068 else if ( !CloseHandle( threadHandle ) ) {
5069 errorType = RtAudioError::THREAD_ERROR;
5070 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5074 callbackStopped = true;
5081 // 1. Convert callback buffer to stream format
5082 // 2. Push callback buffer into outputBuffer
5084 if ( renderAudioClient && callbackPulled ) {
5085 if ( stream_.doConvertBuffer[OUTPUT] ) {
5086 // Convert callback buffer to stream format
5087 convertBuffer( stream_.deviceBuffer,
5088 stream_.userBuffer[OUTPUT],
5089 stream_.convertInfo[OUTPUT] );
5093 // Push callback buffer into outputBuffer
5094 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5095 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5096 stream_.deviceFormat[OUTPUT] );
5099 // if there is no render stream, set callbackPushed flag
5100 callbackPushed = true;
5105 // 1. Get capture buffer from stream
5106 // 2. Push capture buffer into inputBuffer
5107 // 3. If 2. was successful: Release capture buffer
5109 if ( captureAudioClient ) {
5110 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5111 if ( !callbackPulled ) {
5112 WaitForSingleObject( captureEvent, INFINITE );
5115 // Get capture buffer from stream
5116 hr = captureClient->GetBuffer( &streamBuffer,
5118 &captureFlags, NULL, NULL );
5119 if ( FAILED( hr ) ) {
5120 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5124 if ( bufferFrameCount != 0 ) {
5125 // Push capture buffer into inputBuffer
5126 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5127 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5128 stream_.deviceFormat[INPUT] ) )
5130 // Release capture buffer
5131 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5132 if ( FAILED( hr ) ) {
5133 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5139 // Inform WASAPI that capture was unsuccessful
5140 hr = captureClient->ReleaseBuffer( 0 );
5141 if ( FAILED( hr ) ) {
5142 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5149 // Inform WASAPI that capture was unsuccessful
5150 hr = captureClient->ReleaseBuffer( 0 );
5151 if ( FAILED( hr ) ) {
5152 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5160 // 1. Get render buffer from stream
5161 // 2. Pull next buffer from outputBuffer
5162 // 3. If 2. was successful: Fill render buffer with next buffer
5163 // Release render buffer
5165 if ( renderAudioClient ) {
5166 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5167 if ( callbackPulled && !callbackPushed ) {
5168 WaitForSingleObject( renderEvent, INFINITE );
5171 // Get render buffer from stream
5172 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5173 if ( FAILED( hr ) ) {
5174 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5178 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5179 if ( FAILED( hr ) ) {
5180 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5184 bufferFrameCount -= numFramesPadding;
5186 if ( bufferFrameCount != 0 ) {
5187 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5188 if ( FAILED( hr ) ) {
5189 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5193 // Pull next buffer from outputBuffer
5194 // Fill render buffer with next buffer
5195 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5196 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5197 stream_.deviceFormat[OUTPUT] ) )
5199 // Release render buffer
5200 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5201 if ( FAILED( hr ) ) {
5202 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5208 // Inform WASAPI that render was unsuccessful
5209 hr = renderClient->ReleaseBuffer( 0, 0 );
5210 if ( FAILED( hr ) ) {
5211 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5218 // Inform WASAPI that render was unsuccessful
5219 hr = renderClient->ReleaseBuffer( 0, 0 );
5220 if ( FAILED( hr ) ) {
5221 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5227 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5228 if ( callbackPushed ) {
5229 callbackPulled = false;
5231 RtApi::tickStreamTime();
5238 CoTaskMemFree( captureFormat );
5239 CoTaskMemFree( renderFormat );
5243 // update stream state
5244 stream_.state = STREAM_STOPPED;
5246 if ( errorText_.empty() )
5252 //******************** End of __WINDOWS_WASAPI__ *********************//
5256 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5258 // Modified by Robin Davies, October 2005
5259 // - Improvements to DirectX pointer chasing.
5260 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5261 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5262 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5263 // Changed device query structure for RtAudio 4.0.7, January 2010
5265 #include <windows.h>
5266 #include <process.h>
5267 #include <mmsystem.h>
5271 #include <algorithm>
5273 #if defined(__MINGW32__)
5274 // missing from latest mingw winapi
5275 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5276 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5277 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5278 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5281 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5283 #ifdef _MSC_VER // if Microsoft Visual C++
5284 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5287 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5289 if ( pointer > bufferSize ) pointer -= bufferSize;
5290 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5291 if ( pointer < earlierPointer ) pointer += bufferSize;
5292 return pointer >= earlierPointer && pointer < laterPointer;
5295 // A structure to hold various information related to the DirectSound
5296 // API implementation.
5298 unsigned int drainCounter; // Tracks callback counts when draining
5299 bool internalDrain; // Indicates if stop is initiated from callback or not.
5303 UINT bufferPointer[2];
5304 DWORD dsBufferSize[2];
5305 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5309 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5312 // Declarations for utility functions, callbacks, and structures
5313 // specific to the DirectSound implementation.
5314 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5315 LPCTSTR description,
5319 static const char* getErrorString( int code );
5321 static unsigned __stdcall callbackHandler( void *ptr );
5330 : found(false) { validId[0] = false; validId[1] = false; }
5333 struct DsProbeData {
5335 std::vector<struct DsDevice>* dsDevices;
5338 RtApiDs :: RtApiDs()
5340 // Dsound will run both-threaded. If CoInitialize fails, then just
5341 // accept whatever the mainline chose for a threading model.
5342 coInitialized_ = false;
5343 HRESULT hr = CoInitialize( NULL );
5344 if ( !FAILED( hr ) ) coInitialized_ = true;
5347 RtApiDs :: ~RtApiDs()
5349 if ( stream_.state != STREAM_CLOSED ) closeStream();
5350 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5353 // The DirectSound default output is always the first device.
5354 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5359 // The DirectSound default input is always the first input device,
5360 // which is the first capture device enumerated.
5361 unsigned int RtApiDs :: getDefaultInputDevice( void )
5366 unsigned int RtApiDs :: getDeviceCount( void )
5368 // Set query flag for previously found devices to false, so that we
5369 // can check for any devices that have disappeared.
5370 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5371 dsDevices[i].found = false;
5373 // Query DirectSound devices.
5374 struct DsProbeData probeInfo;
5375 probeInfo.isInput = false;
5376 probeInfo.dsDevices = &dsDevices;
5377 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5378 if ( FAILED( result ) ) {
5379 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5380 errorText_ = errorStream_.str();
5381 error( RtAudioError::WARNING );
5384 // Query DirectSoundCapture devices.
5385 probeInfo.isInput = true;
5386 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5387 if ( FAILED( result ) ) {
5388 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5389 errorText_ = errorStream_.str();
5390 error( RtAudioError::WARNING );
5393 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5394 for ( unsigned int i=0; i<dsDevices.size(); ) {
5395 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5399 return static_cast<unsigned int>(dsDevices.size());
5402 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5404 RtAudio::DeviceInfo info;
5405 info.probed = false;
5407 if ( dsDevices.size() == 0 ) {
5408 // Force a query of all devices
5410 if ( dsDevices.size() == 0 ) {
5411 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5412 error( RtAudioError::INVALID_USE );
5417 if ( device >= dsDevices.size() ) {
5418 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5419 error( RtAudioError::INVALID_USE );
5424 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5426 LPDIRECTSOUND output;
5428 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5429 if ( FAILED( result ) ) {
5430 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5431 errorText_ = errorStream_.str();
5432 error( RtAudioError::WARNING );
5436 outCaps.dwSize = sizeof( outCaps );
5437 result = output->GetCaps( &outCaps );
5438 if ( FAILED( result ) ) {
5440 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5441 errorText_ = errorStream_.str();
5442 error( RtAudioError::WARNING );
5446 // Get output channel information.
5447 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5449 // Get sample rate information.
5450 info.sampleRates.clear();
5451 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5452 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5453 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5454 info.sampleRates.push_back( SAMPLE_RATES[k] );
5456 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5457 info.preferredSampleRate = SAMPLE_RATES[k];
5461 // Get format information.
5462 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5463 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5467 if ( getDefaultOutputDevice() == device )
5468 info.isDefaultOutput = true;
5470 if ( dsDevices[ device ].validId[1] == false ) {
5471 info.name = dsDevices[ device ].name;
5478 LPDIRECTSOUNDCAPTURE input;
5479 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5480 if ( FAILED( result ) ) {
5481 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5482 errorText_ = errorStream_.str();
5483 error( RtAudioError::WARNING );
5488 inCaps.dwSize = sizeof( inCaps );
5489 result = input->GetCaps( &inCaps );
5490 if ( FAILED( result ) ) {
5492 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5493 errorText_ = errorStream_.str();
5494 error( RtAudioError::WARNING );
5498 // Get input channel information.
5499 info.inputChannels = inCaps.dwChannels;
5501 // Get sample rate and format information.
5502 std::vector<unsigned int> rates;
5503 if ( inCaps.dwChannels >= 2 ) {
5504 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5505 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5506 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5507 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5508 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5509 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5510 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5511 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5513 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5514 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5515 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5516 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5517 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5519 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5520 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5521 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5522 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5523 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5526 else if ( inCaps.dwChannels == 1 ) {
5527 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5528 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5529 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5530 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5531 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5532 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5533 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5534 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5536 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5537 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5538 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5539 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5540 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5542 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5543 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5544 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5545 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5546 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5549 else info.inputChannels = 0; // technically, this would be an error
5553 if ( info.inputChannels == 0 ) return info;
5555 // Copy the supported rates to the info structure but avoid duplication.
5557 for ( unsigned int i=0; i<rates.size(); i++ ) {
5559 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5560 if ( rates[i] == info.sampleRates[j] ) {
5565 if ( found == false ) info.sampleRates.push_back( rates[i] );
5567 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5569 // If device opens for both playback and capture, we determine the channels.
5570 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5571 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5573 if ( device == 0 ) info.isDefaultInput = true;
5575 // Copy name and return.
5576 info.name = dsDevices[ device ].name;
5581 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5582 unsigned int firstChannel, unsigned int sampleRate,
5583 RtAudioFormat format, unsigned int *bufferSize,
5584 RtAudio::StreamOptions *options )
5586 if ( channels + firstChannel > 2 ) {
5587 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5591 size_t nDevices = dsDevices.size();
5592 if ( nDevices == 0 ) {
5593 // This should not happen because a check is made before this function is called.
5594 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5598 if ( device >= nDevices ) {
5599 // This should not happen because a check is made before this function is called.
5600 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5604 if ( mode == OUTPUT ) {
5605 if ( dsDevices[ device ].validId[0] == false ) {
5606 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5607 errorText_ = errorStream_.str();
5611 else { // mode == INPUT
5612 if ( dsDevices[ device ].validId[1] == false ) {
5613 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5614 errorText_ = errorStream_.str();
5619 // According to a note in PortAudio, using GetDesktopWindow()
5620 // instead of GetForegroundWindow() is supposed to avoid problems
5621 // that occur when the application's window is not the foreground
5622 // window. Also, if the application window closes before the
5623 // DirectSound buffer, DirectSound can crash. In the past, I had
5624 // problems when using GetDesktopWindow() but it seems fine now
5625 // (January 2010). I'll leave it commented here.
5626 // HWND hWnd = GetForegroundWindow();
5627 HWND hWnd = GetDesktopWindow();
5629 // Check the numberOfBuffers parameter and limit the lowest value to
5630 // two. This is a judgement call and a value of two is probably too
5631 // low for capture, but it should work for playback.
5633 if ( options ) nBuffers = options->numberOfBuffers;
5634 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5635 if ( nBuffers < 2 ) nBuffers = 3;
5637 // Check the lower range of the user-specified buffer size and set
5638 // (arbitrarily) to a lower bound of 32.
5639 if ( *bufferSize < 32 ) *bufferSize = 32;
5641 // Create the wave format structure. The data format setting will
5642 // be determined later.
5643 WAVEFORMATEX waveFormat;
5644 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5645 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5646 waveFormat.nChannels = channels + firstChannel;
5647 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5649 // Determine the device buffer size. By default, we'll use the value
5650 // defined above (32K), but we will grow it to make allowances for
5651 // very large software buffer sizes.
5652 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5653 DWORD dsPointerLeadTime = 0;
5655 void *ohandle = 0, *bhandle = 0;
5657 if ( mode == OUTPUT ) {
5659 LPDIRECTSOUND output;
5660 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5661 if ( FAILED( result ) ) {
5662 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5663 errorText_ = errorStream_.str();
5668 outCaps.dwSize = sizeof( outCaps );
5669 result = output->GetCaps( &outCaps );
5670 if ( FAILED( result ) ) {
5672 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5673 errorText_ = errorStream_.str();
5677 // Check channel information.
5678 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5679 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5680 errorText_ = errorStream_.str();
5684 // Check format information. Use 16-bit format unless not
5685 // supported or user requests 8-bit.
5686 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5687 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5688 waveFormat.wBitsPerSample = 16;
5689 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5692 waveFormat.wBitsPerSample = 8;
5693 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5695 stream_.userFormat = format;
5697 // Update wave format structure and buffer information.
5698 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5699 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5700 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5702 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5703 while ( dsPointerLeadTime * 2U > dsBufferSize )
5706 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5707 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5708 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5709 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5710 if ( FAILED( result ) ) {
5712 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5713 errorText_ = errorStream_.str();
5717 // Even though we will write to the secondary buffer, we need to
5718 // access the primary buffer to set the correct output format
5719 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5720 // buffer description.
5721 DSBUFFERDESC bufferDescription;
5722 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5723 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5724 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5726 // Obtain the primary buffer
5727 LPDIRECTSOUNDBUFFER buffer;
5728 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5729 if ( FAILED( result ) ) {
5731 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5732 errorText_ = errorStream_.str();
5736 // Set the primary DS buffer sound format.
5737 result = buffer->SetFormat( &waveFormat );
5738 if ( FAILED( result ) ) {
5740 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5741 errorText_ = errorStream_.str();
5745 // Setup the secondary DS buffer description.
5746 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5747 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5748 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5749 DSBCAPS_GLOBALFOCUS |
5750 DSBCAPS_GETCURRENTPOSITION2 |
5751 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5752 bufferDescription.dwBufferBytes = dsBufferSize;
5753 bufferDescription.lpwfxFormat = &waveFormat;
5755 // Try to create the secondary DS buffer. If that doesn't work,
5756 // try to use software mixing. Otherwise, there's a problem.
5757 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5758 if ( FAILED( result ) ) {
5759 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5760 DSBCAPS_GLOBALFOCUS |
5761 DSBCAPS_GETCURRENTPOSITION2 |
5762 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5763 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5764 if ( FAILED( result ) ) {
5766 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5767 errorText_ = errorStream_.str();
5772 // Get the buffer size ... might be different from what we specified.
5774 dsbcaps.dwSize = sizeof( DSBCAPS );
5775 result = buffer->GetCaps( &dsbcaps );
5776 if ( FAILED( result ) ) {
5779 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5780 errorText_ = errorStream_.str();
5784 dsBufferSize = dsbcaps.dwBufferBytes;
5786 // Lock the DS buffer
5789 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5790 if ( FAILED( result ) ) {
5793 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5794 errorText_ = errorStream_.str();
5798 // Zero the DS buffer
5799 ZeroMemory( audioPtr, dataLen );
5801 // Unlock the DS buffer
5802 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5803 if ( FAILED( result ) ) {
5806 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5807 errorText_ = errorStream_.str();
5811 ohandle = (void *) output;
5812 bhandle = (void *) buffer;
5815 if ( mode == INPUT ) {
5817 LPDIRECTSOUNDCAPTURE input;
5818 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5819 if ( FAILED( result ) ) {
5820 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5821 errorText_ = errorStream_.str();
5826 inCaps.dwSize = sizeof( inCaps );
5827 result = input->GetCaps( &inCaps );
5828 if ( FAILED( result ) ) {
5830 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5831 errorText_ = errorStream_.str();
5835 // Check channel information.
5836 if ( inCaps.dwChannels < channels + firstChannel ) {
5837 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5841 // Check format information. Use 16-bit format unless user
5843 DWORD deviceFormats;
5844 if ( channels + firstChannel == 2 ) {
5845 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5846 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5847 waveFormat.wBitsPerSample = 8;
5848 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5850 else { // assume 16-bit is supported
5851 waveFormat.wBitsPerSample = 16;
5852 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5855 else { // channel == 1
5856 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5857 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5858 waveFormat.wBitsPerSample = 8;
5859 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5861 else { // assume 16-bit is supported
5862 waveFormat.wBitsPerSample = 16;
5863 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5866 stream_.userFormat = format;
5868 // Update wave format structure and buffer information.
5869 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5870 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5871 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5873 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5874 while ( dsPointerLeadTime * 2U > dsBufferSize )
5877 // Setup the secondary DS buffer description.
5878 DSCBUFFERDESC bufferDescription;
5879 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5880 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5881 bufferDescription.dwFlags = 0;
5882 bufferDescription.dwReserved = 0;
5883 bufferDescription.dwBufferBytes = dsBufferSize;
5884 bufferDescription.lpwfxFormat = &waveFormat;
5886 // Create the capture buffer.
5887 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5888 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5889 if ( FAILED( result ) ) {
5891 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5892 errorText_ = errorStream_.str();
5896 // Get the buffer size ... might be different from what we specified.
5898 dscbcaps.dwSize = sizeof( DSCBCAPS );
5899 result = buffer->GetCaps( &dscbcaps );
5900 if ( FAILED( result ) ) {
5903 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5904 errorText_ = errorStream_.str();
5908 dsBufferSize = dscbcaps.dwBufferBytes;
5910 // NOTE: We could have a problem here if this is a duplex stream
5911 // and the play and capture hardware buffer sizes are different
5912 // (I'm actually not sure if that is a problem or not).
5913 // Currently, we are not verifying that.
5915 // Lock the capture buffer
5918 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5919 if ( FAILED( result ) ) {
5922 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5923 errorText_ = errorStream_.str();
5928 ZeroMemory( audioPtr, dataLen );
5930 // Unlock the buffer
5931 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5932 if ( FAILED( result ) ) {
5935 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5936 errorText_ = errorStream_.str();
5940 ohandle = (void *) input;
5941 bhandle = (void *) buffer;
5944 // Set various stream parameters
5945 DsHandle *handle = 0;
5946 stream_.nDeviceChannels[mode] = channels + firstChannel;
5947 stream_.nUserChannels[mode] = channels;
5948 stream_.bufferSize = *bufferSize;
5949 stream_.channelOffset[mode] = firstChannel;
5950 stream_.deviceInterleaved[mode] = true;
5951 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5952 else stream_.userInterleaved = true;
5954 // Set flag for buffer conversion
5955 stream_.doConvertBuffer[mode] = false;
5956 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5957 stream_.doConvertBuffer[mode] = true;
5958 if (stream_.userFormat != stream_.deviceFormat[mode])
5959 stream_.doConvertBuffer[mode] = true;
5960 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5961 stream_.nUserChannels[mode] > 1 )
5962 stream_.doConvertBuffer[mode] = true;
5964 // Allocate necessary internal buffers
5965 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5966 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5967 if ( stream_.userBuffer[mode] == NULL ) {
5968 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5972 if ( stream_.doConvertBuffer[mode] ) {
5974 bool makeBuffer = true;
5975 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5976 if ( mode == INPUT ) {
5977 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5978 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5979 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5984 bufferBytes *= *bufferSize;
5985 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5986 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5987 if ( stream_.deviceBuffer == NULL ) {
5988 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5994 // Allocate our DsHandle structures for the stream.
5995 if ( stream_.apiHandle == 0 ) {
5997 handle = new DsHandle;
5999 catch ( std::bad_alloc& ) {
6000 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6004 // Create a manual-reset event.
6005 handle->condition = CreateEvent( NULL, // no security
6006 TRUE, // manual-reset
6007 FALSE, // non-signaled initially
6009 stream_.apiHandle = (void *) handle;
6012 handle = (DsHandle *) stream_.apiHandle;
6013 handle->id[mode] = ohandle;
6014 handle->buffer[mode] = bhandle;
6015 handle->dsBufferSize[mode] = dsBufferSize;
6016 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6018 stream_.device[mode] = device;
6019 stream_.state = STREAM_STOPPED;
6020 if ( stream_.mode == OUTPUT && mode == INPUT )
6021 // We had already set up an output stream.
6022 stream_.mode = DUPLEX;
6024 stream_.mode = mode;
6025 stream_.nBuffers = nBuffers;
6026 stream_.sampleRate = sampleRate;
6028 // Setup the buffer conversion information structure.
6029 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6031 // Setup the callback thread.
6032 if ( stream_.callbackInfo.isRunning == false ) {
6034 stream_.callbackInfo.isRunning = true;
6035 stream_.callbackInfo.object = (void *) this;
6036 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6037 &stream_.callbackInfo, 0, &threadId );
6038 if ( stream_.callbackInfo.thread == 0 ) {
6039 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6043 // Boost DS thread priority
6044 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6050 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6051 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6052 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6053 if ( buffer ) buffer->Release();
6056 if ( handle->buffer[1] ) {
6057 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6058 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6059 if ( buffer ) buffer->Release();
6062 CloseHandle( handle->condition );
6064 stream_.apiHandle = 0;
6067 for ( int i=0; i<2; i++ ) {
6068 if ( stream_.userBuffer[i] ) {
6069 free( stream_.userBuffer[i] );
6070 stream_.userBuffer[i] = 0;
6074 if ( stream_.deviceBuffer ) {
6075 free( stream_.deviceBuffer );
6076 stream_.deviceBuffer = 0;
6079 stream_.state = STREAM_CLOSED;
6083 void RtApiDs :: closeStream()
6085 if ( stream_.state == STREAM_CLOSED ) {
6086 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6087 error( RtAudioError::WARNING );
6091 // Stop the callback thread.
6092 stream_.callbackInfo.isRunning = false;
6093 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6094 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6096 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6098 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6099 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6100 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6107 if ( handle->buffer[1] ) {
6108 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6109 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6116 CloseHandle( handle->condition );
6118 stream_.apiHandle = 0;
6121 for ( int i=0; i<2; i++ ) {
6122 if ( stream_.userBuffer[i] ) {
6123 free( stream_.userBuffer[i] );
6124 stream_.userBuffer[i] = 0;
6128 if ( stream_.deviceBuffer ) {
6129 free( stream_.deviceBuffer );
6130 stream_.deviceBuffer = 0;
6133 stream_.mode = UNINITIALIZED;
6134 stream_.state = STREAM_CLOSED;
6137 void RtApiDs :: startStream()
6140 if ( stream_.state == STREAM_RUNNING ) {
6141 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6142 error( RtAudioError::WARNING );
6146 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6148 // Increase scheduler frequency on lesser windows (a side-effect of
6149 // increasing timer accuracy). On greater windows (Win2K or later),
6150 // this is already in effect.
6151 timeBeginPeriod( 1 );
6153 buffersRolling = false;
6154 duplexPrerollBytes = 0;
6156 if ( stream_.mode == DUPLEX ) {
6157 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6158 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6162 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6164 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6165 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6166 if ( FAILED( result ) ) {
6167 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6168 errorText_ = errorStream_.str();
6173 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6175 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6176 result = buffer->Start( DSCBSTART_LOOPING );
6177 if ( FAILED( result ) ) {
6178 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6179 errorText_ = errorStream_.str();
6184 handle->drainCounter = 0;
6185 handle->internalDrain = false;
6186 ResetEvent( handle->condition );
6187 stream_.state = STREAM_RUNNING;
6190 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6193 void RtApiDs :: stopStream()
6196 if ( stream_.state == STREAM_STOPPED ) {
6197 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6198 error( RtAudioError::WARNING );
6205 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6206 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6207 if ( handle->drainCounter == 0 ) {
6208 handle->drainCounter = 2;
6209 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6212 stream_.state = STREAM_STOPPED;
6214 MUTEX_LOCK( &stream_.mutex );
6216 // Stop the buffer and clear memory
6217 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6218 result = buffer->Stop();
6219 if ( FAILED( result ) ) {
6220 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6221 errorText_ = errorStream_.str();
6225 // Lock the buffer and clear it so that if we start to play again,
6226 // we won't have old data playing.
6227 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6228 if ( FAILED( result ) ) {
6229 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6230 errorText_ = errorStream_.str();
6234 // Zero the DS buffer
6235 ZeroMemory( audioPtr, dataLen );
6237 // Unlock the DS buffer
6238 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6239 if ( FAILED( result ) ) {
6240 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6241 errorText_ = errorStream_.str();
6245 // If we start playing again, we must begin at beginning of buffer.
6246 handle->bufferPointer[0] = 0;
6249 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6250 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6254 stream_.state = STREAM_STOPPED;
6256 if ( stream_.mode != DUPLEX )
6257 MUTEX_LOCK( &stream_.mutex );
6259 result = buffer->Stop();
6260 if ( FAILED( result ) ) {
6261 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6262 errorText_ = errorStream_.str();
6266 // Lock the buffer and clear it so that if we start to play again,
6267 // we won't have old data playing.
6268 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6269 if ( FAILED( result ) ) {
6270 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6271 errorText_ = errorStream_.str();
6275 // Zero the DS buffer
6276 ZeroMemory( audioPtr, dataLen );
6278 // Unlock the DS buffer
6279 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6280 if ( FAILED( result ) ) {
6281 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6282 errorText_ = errorStream_.str();
6286 // If we start recording again, we must begin at beginning of buffer.
6287 handle->bufferPointer[1] = 0;
6291 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6292 MUTEX_UNLOCK( &stream_.mutex );
6294 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6297 void RtApiDs :: abortStream()
6300 if ( stream_.state == STREAM_STOPPED ) {
6301 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6302 error( RtAudioError::WARNING );
6306 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6307 handle->drainCounter = 2;
6312 void RtApiDs :: callbackEvent()
6314 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6315 Sleep( 50 ); // sleep 50 milliseconds
6319 if ( stream_.state == STREAM_CLOSED ) {
6320 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6321 error( RtAudioError::WARNING );
6325 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6326 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6328 // Check if we were draining the stream and signal is finished.
6329 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6331 stream_.state = STREAM_STOPPING;
6332 if ( handle->internalDrain == false )
6333 SetEvent( handle->condition );
6339 // Invoke user callback to get fresh output data UNLESS we are
6341 if ( handle->drainCounter == 0 ) {
6342 RtAudioCallback callback = (RtAudioCallback) info->callback;
6343 double streamTime = getStreamTime();
6344 RtAudioStreamStatus status = 0;
6345 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6346 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6347 handle->xrun[0] = false;
6349 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6350 status |= RTAUDIO_INPUT_OVERFLOW;
6351 handle->xrun[1] = false;
6353 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6354 stream_.bufferSize, streamTime, status, info->userData );
6355 if ( cbReturnValue == 2 ) {
6356 stream_.state = STREAM_STOPPING;
6357 handle->drainCounter = 2;
6361 else if ( cbReturnValue == 1 ) {
6362 handle->drainCounter = 1;
6363 handle->internalDrain = true;
6368 DWORD currentWritePointer, safeWritePointer;
6369 DWORD currentReadPointer, safeReadPointer;
6370 UINT nextWritePointer;
6372 LPVOID buffer1 = NULL;
6373 LPVOID buffer2 = NULL;
6374 DWORD bufferSize1 = 0;
6375 DWORD bufferSize2 = 0;
6380 MUTEX_LOCK( &stream_.mutex );
6381 if ( stream_.state == STREAM_STOPPED ) {
6382 MUTEX_UNLOCK( &stream_.mutex );
6386 if ( buffersRolling == false ) {
6387 if ( stream_.mode == DUPLEX ) {
6388 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6390 // It takes a while for the devices to get rolling. As a result,
6391 // there's no guarantee that the capture and write device pointers
6392 // will move in lockstep. Wait here for both devices to start
6393 // rolling, and then set our buffer pointers accordingly.
6394 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6395 // bytes later than the write buffer.
6397 // Stub: a serious risk of having a pre-emptive scheduling round
6398 // take place between the two GetCurrentPosition calls... but I'm
6399 // really not sure how to solve the problem. Temporarily boost to
6400 // Realtime priority, maybe; but I'm not sure what priority the
6401 // DirectSound service threads run at. We *should* be roughly
6402 // within a ms or so of correct.
6404 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6405 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6407 DWORD startSafeWritePointer, startSafeReadPointer;
6409 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6410 if ( FAILED( result ) ) {
6411 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6412 errorText_ = errorStream_.str();
6413 MUTEX_UNLOCK( &stream_.mutex );
6414 error( RtAudioError::SYSTEM_ERROR );
6417 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6418 if ( FAILED( result ) ) {
6419 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6420 errorText_ = errorStream_.str();
6421 MUTEX_UNLOCK( &stream_.mutex );
6422 error( RtAudioError::SYSTEM_ERROR );
6426 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6427 if ( FAILED( result ) ) {
6428 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6429 errorText_ = errorStream_.str();
6430 MUTEX_UNLOCK( &stream_.mutex );
6431 error( RtAudioError::SYSTEM_ERROR );
6434 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6435 if ( FAILED( result ) ) {
6436 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6437 errorText_ = errorStream_.str();
6438 MUTEX_UNLOCK( &stream_.mutex );
6439 error( RtAudioError::SYSTEM_ERROR );
6442 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6446 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6448 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6449 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6450 handle->bufferPointer[1] = safeReadPointer;
6452 else if ( stream_.mode == OUTPUT ) {
6454 // Set the proper nextWritePosition after initial startup.
6455 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6456 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6457 if ( FAILED( result ) ) {
6458 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6459 errorText_ = errorStream_.str();
6460 MUTEX_UNLOCK( &stream_.mutex );
6461 error( RtAudioError::SYSTEM_ERROR );
6464 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6465 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6468 buffersRolling = true;
6471 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6473 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6475 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6476 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6477 bufferBytes *= formatBytes( stream_.userFormat );
6478 memset( stream_.userBuffer[0], 0, bufferBytes );
6481 // Setup parameters and do buffer conversion if necessary.
6482 if ( stream_.doConvertBuffer[0] ) {
6483 buffer = stream_.deviceBuffer;
6484 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6485 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6486 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6489 buffer = stream_.userBuffer[0];
6490 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6491 bufferBytes *= formatBytes( stream_.userFormat );
6494 // No byte swapping necessary in DirectSound implementation.
6496 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6497 // unsigned. So, we need to convert our signed 8-bit data here to
6499 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6500 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6502 DWORD dsBufferSize = handle->dsBufferSize[0];
6503 nextWritePointer = handle->bufferPointer[0];
6505 DWORD endWrite, leadPointer;
6507 // Find out where the read and "safe write" pointers are.
6508 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6509 if ( FAILED( result ) ) {
6510 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6511 errorText_ = errorStream_.str();
6512 MUTEX_UNLOCK( &stream_.mutex );
6513 error( RtAudioError::SYSTEM_ERROR );
6517 // We will copy our output buffer into the region between
6518 // safeWritePointer and leadPointer. If leadPointer is not
6519 // beyond the next endWrite position, wait until it is.
6520 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6521 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6522 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6523 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6524 endWrite = nextWritePointer + bufferBytes;
6526 // Check whether the entire write region is behind the play pointer.
6527 if ( leadPointer >= endWrite ) break;
6529 // If we are here, then we must wait until the leadPointer advances
6530 // beyond the end of our next write region. We use the
6531 // Sleep() function to suspend operation until that happens.
6532 double millis = ( endWrite - leadPointer ) * 1000.0;
6533 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6534 if ( millis < 1.0 ) millis = 1.0;
6535 Sleep( (DWORD) millis );
6538 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6539 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6540 // We've strayed into the forbidden zone ... resync the read pointer.
6541 handle->xrun[0] = true;
6542 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6543 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6544 handle->bufferPointer[0] = nextWritePointer;
6545 endWrite = nextWritePointer + bufferBytes;
6548 // Lock free space in the buffer
6549 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6550 &bufferSize1, &buffer2, &bufferSize2, 0 );
6551 if ( FAILED( result ) ) {
6552 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6553 errorText_ = errorStream_.str();
6554 MUTEX_UNLOCK( &stream_.mutex );
6555 error( RtAudioError::SYSTEM_ERROR );
6559 // Copy our buffer into the DS buffer
6560 CopyMemory( buffer1, buffer, bufferSize1 );
6561 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6563 // Update our buffer offset and unlock sound buffer
6564 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6565 if ( FAILED( result ) ) {
6566 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6567 errorText_ = errorStream_.str();
6568 MUTEX_UNLOCK( &stream_.mutex );
6569 error( RtAudioError::SYSTEM_ERROR );
6572 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6573 handle->bufferPointer[0] = nextWritePointer;
6576 // Don't bother draining input
6577 if ( handle->drainCounter ) {
6578 handle->drainCounter++;
6582 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6584 // Setup parameters.
6585 if ( stream_.doConvertBuffer[1] ) {
6586 buffer = stream_.deviceBuffer;
6587 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6588 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6591 buffer = stream_.userBuffer[1];
6592 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6593 bufferBytes *= formatBytes( stream_.userFormat );
6596 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6597 long nextReadPointer = handle->bufferPointer[1];
6598 DWORD dsBufferSize = handle->dsBufferSize[1];
6600 // Find out where the write and "safe read" pointers are.
6601 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6602 if ( FAILED( result ) ) {
6603 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6604 errorText_ = errorStream_.str();
6605 MUTEX_UNLOCK( &stream_.mutex );
6606 error( RtAudioError::SYSTEM_ERROR );
6610 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6611 DWORD endRead = nextReadPointer + bufferBytes;
6613 // Handling depends on whether we are INPUT or DUPLEX.
6614 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6615 // then a wait here will drag the write pointers into the forbidden zone.
6617 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6618 // it's in a safe position. This causes dropouts, but it seems to be the only
6619 // practical way to sync up the read and write pointers reliably, given the
6620 // the very complex relationship between phase and increment of the read and write
6623 // In order to minimize audible dropouts in DUPLEX mode, we will
6624 // provide a pre-roll period of 0.5 seconds in which we return
6625 // zeros from the read buffer while the pointers sync up.
6627 if ( stream_.mode == DUPLEX ) {
6628 if ( safeReadPointer < endRead ) {
6629 if ( duplexPrerollBytes <= 0 ) {
6630 // Pre-roll time over. Be more agressive.
6631 int adjustment = endRead-safeReadPointer;
6633 handle->xrun[1] = true;
6635 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6636 // and perform fine adjustments later.
6637 // - small adjustments: back off by twice as much.
6638 if ( adjustment >= 2*bufferBytes )
6639 nextReadPointer = safeReadPointer-2*bufferBytes;
6641 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6643 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6647 // In pre=roll time. Just do it.
6648 nextReadPointer = safeReadPointer - bufferBytes;
6649 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6651 endRead = nextReadPointer + bufferBytes;
6654 else { // mode == INPUT
6655 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6656 // See comments for playback.
6657 double millis = (endRead - safeReadPointer) * 1000.0;
6658 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6659 if ( millis < 1.0 ) millis = 1.0;
6660 Sleep( (DWORD) millis );
6662 // Wake up and find out where we are now.
6663 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6664 if ( FAILED( result ) ) {
6665 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6666 errorText_ = errorStream_.str();
6667 MUTEX_UNLOCK( &stream_.mutex );
6668 error( RtAudioError::SYSTEM_ERROR );
6672 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6676 // Lock free space in the buffer
6677 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6678 &bufferSize1, &buffer2, &bufferSize2, 0 );
6679 if ( FAILED( result ) ) {
6680 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6681 errorText_ = errorStream_.str();
6682 MUTEX_UNLOCK( &stream_.mutex );
6683 error( RtAudioError::SYSTEM_ERROR );
6687 if ( duplexPrerollBytes <= 0 ) {
6688 // Copy our buffer into the DS buffer
6689 CopyMemory( buffer, buffer1, bufferSize1 );
6690 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6693 memset( buffer, 0, bufferSize1 );
6694 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6695 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6698 // Update our buffer offset and unlock sound buffer
6699 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6700 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6701 if ( FAILED( result ) ) {
6702 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6703 errorText_ = errorStream_.str();
6704 MUTEX_UNLOCK( &stream_.mutex );
6705 error( RtAudioError::SYSTEM_ERROR );
6708 handle->bufferPointer[1] = nextReadPointer;
6710 // No byte swapping necessary in DirectSound implementation.
6712 // If necessary, convert 8-bit data from unsigned to signed.
6713 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6714 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6716 // Do buffer conversion if necessary.
6717 if ( stream_.doConvertBuffer[1] )
6718 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6722 MUTEX_UNLOCK( &stream_.mutex );
6723 RtApi::tickStreamTime();
6726 // Definitions for utility functions and callbacks
6727 // specific to the DirectSound implementation.
6729 static unsigned __stdcall callbackHandler( void *ptr )
6731 CallbackInfo *info = (CallbackInfo *) ptr;
6732 RtApiDs *object = (RtApiDs *) info->object;
6733 bool* isRunning = &info->isRunning;
6735 while ( *isRunning == true ) {
6736 object->callbackEvent();
6743 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6744 LPCTSTR description,
6748 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6749 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6752 bool validDevice = false;
6753 if ( probeInfo.isInput == true ) {
6755 LPDIRECTSOUNDCAPTURE object;
6757 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6758 if ( hr != DS_OK ) return TRUE;
6760 caps.dwSize = sizeof(caps);
6761 hr = object->GetCaps( &caps );
6762 if ( hr == DS_OK ) {
6763 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6770 LPDIRECTSOUND object;
6771 hr = DirectSoundCreate( lpguid, &object, NULL );
6772 if ( hr != DS_OK ) return TRUE;
6774 caps.dwSize = sizeof(caps);
6775 hr = object->GetCaps( &caps );
6776 if ( hr == DS_OK ) {
6777 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6783 // If good device, then save its name and guid.
6784 std::string name = convertCharPointerToStdString( description );
6785 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6786 if ( lpguid == NULL )
6787 name = "Default Device";
6788 if ( validDevice ) {
6789 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6790 if ( dsDevices[i].name == name ) {
6791 dsDevices[i].found = true;
6792 if ( probeInfo.isInput ) {
6793 dsDevices[i].id[1] = lpguid;
6794 dsDevices[i].validId[1] = true;
6797 dsDevices[i].id[0] = lpguid;
6798 dsDevices[i].validId[0] = true;
6806 device.found = true;
6807 if ( probeInfo.isInput ) {
6808 device.id[1] = lpguid;
6809 device.validId[1] = true;
6812 device.id[0] = lpguid;
6813 device.validId[0] = true;
6815 dsDevices.push_back( device );
6821 static const char* getErrorString( int code )
6825 case DSERR_ALLOCATED:
6826 return "Already allocated";
6828 case DSERR_CONTROLUNAVAIL:
6829 return "Control unavailable";
6831 case DSERR_INVALIDPARAM:
6832 return "Invalid parameter";
6834 case DSERR_INVALIDCALL:
6835 return "Invalid call";
6838 return "Generic error";
6840 case DSERR_PRIOLEVELNEEDED:
6841 return "Priority level needed";
6843 case DSERR_OUTOFMEMORY:
6844 return "Out of memory";
6846 case DSERR_BADFORMAT:
6847 return "The sample rate or the channel format is not supported";
6849 case DSERR_UNSUPPORTED:
6850 return "Not supported";
6852 case DSERR_NODRIVER:
6855 case DSERR_ALREADYINITIALIZED:
6856 return "Already initialized";
6858 case DSERR_NOAGGREGATION:
6859 return "No aggregation";
6861 case DSERR_BUFFERLOST:
6862 return "Buffer lost";
6864 case DSERR_OTHERAPPHASPRIO:
6865 return "Another application already has priority";
6867 case DSERR_UNINITIALIZED:
6868 return "Uninitialized";
6871 return "DirectSound unknown error";
6874 //******************** End of __WINDOWS_DS__ *********************//
6878 #if defined(__LINUX_ALSA__)
6880 #include <alsa/asoundlib.h>
6883 // A structure to hold various information related to the ALSA API
6886 snd_pcm_t *handles[2];
6889 pthread_cond_t runnable_cv;
6893 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6896 static void *alsaCallbackHandler( void * ptr );
6898 RtApiAlsa :: RtApiAlsa()
6900 // Nothing to do here.
6903 RtApiAlsa :: ~RtApiAlsa()
6905 if ( stream_.state != STREAM_CLOSED ) closeStream();
6908 unsigned int RtApiAlsa :: getDeviceCount( void )
6910 unsigned nDevices = 0;
6911 int result, subdevice, card;
6915 // Count cards and devices
6917 snd_card_next( &card );
6918 while ( card >= 0 ) {
6919 sprintf( name, "hw:%d", card );
6920 result = snd_ctl_open( &handle, name, 0 );
6922 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6923 errorText_ = errorStream_.str();
6924 error( RtAudioError::WARNING );
6929 result = snd_ctl_pcm_next_device( handle, &subdevice );
6931 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6932 errorText_ = errorStream_.str();
6933 error( RtAudioError::WARNING );
6936 if ( subdevice < 0 )
6941 snd_ctl_close( handle );
6942 snd_card_next( &card );
6945 result = snd_ctl_open( &handle, "default", 0 );
6948 snd_ctl_close( handle );
6954 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6956 RtAudio::DeviceInfo info;
6957 info.probed = false;
6959 unsigned nDevices = 0;
6960 int result, subdevice, card;
6964 // Count cards and devices
6967 snd_card_next( &card );
6968 while ( card >= 0 ) {
6969 sprintf( name, "hw:%d", card );
6970 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6972 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6973 errorText_ = errorStream_.str();
6974 error( RtAudioError::WARNING );
6979 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6981 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6982 errorText_ = errorStream_.str();
6983 error( RtAudioError::WARNING );
6986 if ( subdevice < 0 ) break;
6987 if ( nDevices == device ) {
6988 sprintf( name, "hw:%d,%d", card, subdevice );
6994 snd_ctl_close( chandle );
6995 snd_card_next( &card );
6998 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6999 if ( result == 0 ) {
7000 if ( nDevices == device ) {
7001 strcpy( name, "default" );
7007 if ( nDevices == 0 ) {
7008 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7009 error( RtAudioError::INVALID_USE );
7013 if ( device >= nDevices ) {
7014 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7015 error( RtAudioError::INVALID_USE );
7021 // If a stream is already open, we cannot probe the stream devices.
7022 // Thus, use the saved results.
7023 if ( stream_.state != STREAM_CLOSED &&
7024 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7025 snd_ctl_close( chandle );
7026 if ( device >= devices_.size() ) {
7027 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7028 error( RtAudioError::WARNING );
7031 return devices_[ device ];
7034 int openMode = SND_PCM_ASYNC;
7035 snd_pcm_stream_t stream;
7036 snd_pcm_info_t *pcminfo;
7037 snd_pcm_info_alloca( &pcminfo );
7039 snd_pcm_hw_params_t *params;
7040 snd_pcm_hw_params_alloca( ¶ms );
7042 // First try for playback unless default device (which has subdev -1)
7043 stream = SND_PCM_STREAM_PLAYBACK;
7044 snd_pcm_info_set_stream( pcminfo, stream );
7045 if ( subdevice != -1 ) {
7046 snd_pcm_info_set_device( pcminfo, subdevice );
7047 snd_pcm_info_set_subdevice( pcminfo, 0 );
7049 result = snd_ctl_pcm_info( chandle, pcminfo );
7051 // Device probably doesn't support playback.
7056 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7058 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7059 errorText_ = errorStream_.str();
7060 error( RtAudioError::WARNING );
7064 // The device is open ... fill the parameter structure.
7065 result = snd_pcm_hw_params_any( phandle, params );
7067 snd_pcm_close( phandle );
7068 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7069 errorText_ = errorStream_.str();
7070 error( RtAudioError::WARNING );
7074 // Get output channel information.
7076 result = snd_pcm_hw_params_get_channels_max( params, &value );
7078 snd_pcm_close( phandle );
7079 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7080 errorText_ = errorStream_.str();
7081 error( RtAudioError::WARNING );
7084 info.outputChannels = value;
7085 snd_pcm_close( phandle );
7088 stream = SND_PCM_STREAM_CAPTURE;
7089 snd_pcm_info_set_stream( pcminfo, stream );
7091 // Now try for capture unless default device (with subdev = -1)
7092 if ( subdevice != -1 ) {
7093 result = snd_ctl_pcm_info( chandle, pcminfo );
7094 snd_ctl_close( chandle );
7096 // Device probably doesn't support capture.
7097 if ( info.outputChannels == 0 ) return info;
7098 goto probeParameters;
7102 snd_ctl_close( chandle );
7104 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7106 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7107 errorText_ = errorStream_.str();
7108 error( RtAudioError::WARNING );
7109 if ( info.outputChannels == 0 ) return info;
7110 goto probeParameters;
7113 // The device is open ... fill the parameter structure.
7114 result = snd_pcm_hw_params_any( phandle, params );
7116 snd_pcm_close( phandle );
7117 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7118 errorText_ = errorStream_.str();
7119 error( RtAudioError::WARNING );
7120 if ( info.outputChannels == 0 ) return info;
7121 goto probeParameters;
7124 result = snd_pcm_hw_params_get_channels_max( params, &value );
7126 snd_pcm_close( phandle );
7127 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7128 errorText_ = errorStream_.str();
7129 error( RtAudioError::WARNING );
7130 if ( info.outputChannels == 0 ) return info;
7131 goto probeParameters;
7133 info.inputChannels = value;
7134 snd_pcm_close( phandle );
7136 // If device opens for both playback and capture, we determine the channels.
7137 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7138 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7140 // ALSA doesn't provide default devices so we'll use the first available one.
7141 if ( device == 0 && info.outputChannels > 0 )
7142 info.isDefaultOutput = true;
7143 if ( device == 0 && info.inputChannels > 0 )
7144 info.isDefaultInput = true;
7147 // At this point, we just need to figure out the supported data
7148 // formats and sample rates. We'll proceed by opening the device in
7149 // the direction with the maximum number of channels, or playback if
7150 // they are equal. This might limit our sample rate options, but so
7153 if ( info.outputChannels >= info.inputChannels )
7154 stream = SND_PCM_STREAM_PLAYBACK;
7156 stream = SND_PCM_STREAM_CAPTURE;
7157 snd_pcm_info_set_stream( pcminfo, stream );
7159 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7161 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7162 errorText_ = errorStream_.str();
7163 error( RtAudioError::WARNING );
7167 // The device is open ... fill the parameter structure.
7168 result = snd_pcm_hw_params_any( phandle, params );
7170 snd_pcm_close( phandle );
7171 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7172 errorText_ = errorStream_.str();
7173 error( RtAudioError::WARNING );
7177 // Test our discrete set of sample rate values.
7178 info.sampleRates.clear();
7179 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7180 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7181 info.sampleRates.push_back( SAMPLE_RATES[i] );
7183 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7184 info.preferredSampleRate = SAMPLE_RATES[i];
7187 if ( info.sampleRates.size() == 0 ) {
7188 snd_pcm_close( phandle );
7189 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7190 errorText_ = errorStream_.str();
7191 error( RtAudioError::WARNING );
7195 // Probe the supported data formats ... we don't care about endian-ness just yet
7196 snd_pcm_format_t format;
7197 info.nativeFormats = 0;
7198 format = SND_PCM_FORMAT_S8;
7199 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7200 info.nativeFormats |= RTAUDIO_SINT8;
7201 format = SND_PCM_FORMAT_S16;
7202 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7203 info.nativeFormats |= RTAUDIO_SINT16;
7204 format = SND_PCM_FORMAT_S24;
7205 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7206 info.nativeFormats |= RTAUDIO_SINT24;
7207 format = SND_PCM_FORMAT_S32;
7208 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7209 info.nativeFormats |= RTAUDIO_SINT32;
7210 format = SND_PCM_FORMAT_FLOAT;
7211 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7212 info.nativeFormats |= RTAUDIO_FLOAT32;
7213 format = SND_PCM_FORMAT_FLOAT64;
7214 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7215 info.nativeFormats |= RTAUDIO_FLOAT64;
7217 // Check that we have at least one supported format
7218 if ( info.nativeFormats == 0 ) {
7219 snd_pcm_close( phandle );
7220 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7221 errorText_ = errorStream_.str();
7222 error( RtAudioError::WARNING );
7226 // Get the device name
7228 result = snd_card_get_name( card, &cardname );
7229 if ( result >= 0 ) {
7230 sprintf( name, "hw:%s,%d", cardname, subdevice );
7235 // That's all ... close the device and return
7236 snd_pcm_close( phandle );
7241 void RtApiAlsa :: saveDeviceInfo( void )
7245 unsigned int nDevices = getDeviceCount();
7246 devices_.resize( nDevices );
7247 for ( unsigned int i=0; i<nDevices; i++ )
7248 devices_[i] = getDeviceInfo( i );
7251 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7252 unsigned int firstChannel, unsigned int sampleRate,
7253 RtAudioFormat format, unsigned int *bufferSize,
7254 RtAudio::StreamOptions *options )
7257 #if defined(__RTAUDIO_DEBUG__)
7259 snd_output_stdio_attach(&out, stderr, 0);
7262 // I'm not using the "plug" interface ... too much inconsistent behavior.
7264 unsigned nDevices = 0;
7265 int result, subdevice, card;
7269 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7270 snprintf(name, sizeof(name), "%s", "default");
7272 // Count cards and devices
7274 snd_card_next( &card );
7275 while ( card >= 0 ) {
7276 sprintf( name, "hw:%d", card );
7277 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7279 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7280 errorText_ = errorStream_.str();
7285 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7286 if ( result < 0 ) break;
7287 if ( subdevice < 0 ) break;
7288 if ( nDevices == device ) {
7289 sprintf( name, "hw:%d,%d", card, subdevice );
7290 snd_ctl_close( chandle );
7295 snd_ctl_close( chandle );
7296 snd_card_next( &card );
7299 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7300 if ( result == 0 ) {
7301 if ( nDevices == device ) {
7302 strcpy( name, "default" );
7308 if ( nDevices == 0 ) {
7309 // This should not happen because a check is made before this function is called.
7310 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7314 if ( device >= nDevices ) {
7315 // This should not happen because a check is made before this function is called.
7316 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7323 // The getDeviceInfo() function will not work for a device that is
7324 // already open. Thus, we'll probe the system before opening a
7325 // stream and save the results for use by getDeviceInfo().
7326 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7327 this->saveDeviceInfo();
7329 snd_pcm_stream_t stream;
7330 if ( mode == OUTPUT )
7331 stream = SND_PCM_STREAM_PLAYBACK;
7333 stream = SND_PCM_STREAM_CAPTURE;
7336 int openMode = SND_PCM_ASYNC;
7337 result = snd_pcm_open( &phandle, name, stream, openMode );
7339 if ( mode == OUTPUT )
7340 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7342 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7343 errorText_ = errorStream_.str();
7347 // Fill the parameter structure.
7348 snd_pcm_hw_params_t *hw_params;
7349 snd_pcm_hw_params_alloca( &hw_params );
7350 result = snd_pcm_hw_params_any( phandle, hw_params );
7352 snd_pcm_close( phandle );
7353 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7354 errorText_ = errorStream_.str();
7358 #if defined(__RTAUDIO_DEBUG__)
7359 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7360 snd_pcm_hw_params_dump( hw_params, out );
7363 // Set access ... check user preference.
7364 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7365 stream_.userInterleaved = false;
7366 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7368 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7369 stream_.deviceInterleaved[mode] = true;
7372 stream_.deviceInterleaved[mode] = false;
7375 stream_.userInterleaved = true;
7376 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7378 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7379 stream_.deviceInterleaved[mode] = false;
7382 stream_.deviceInterleaved[mode] = true;
7386 snd_pcm_close( phandle );
7387 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7388 errorText_ = errorStream_.str();
7392 // Determine how to set the device format.
7393 stream_.userFormat = format;
7394 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7396 if ( format == RTAUDIO_SINT8 )
7397 deviceFormat = SND_PCM_FORMAT_S8;
7398 else if ( format == RTAUDIO_SINT16 )
7399 deviceFormat = SND_PCM_FORMAT_S16;
7400 else if ( format == RTAUDIO_SINT24 )
7401 deviceFormat = SND_PCM_FORMAT_S24;
7402 else if ( format == RTAUDIO_SINT32 )
7403 deviceFormat = SND_PCM_FORMAT_S32;
7404 else if ( format == RTAUDIO_FLOAT32 )
7405 deviceFormat = SND_PCM_FORMAT_FLOAT;
7406 else if ( format == RTAUDIO_FLOAT64 )
7407 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7409 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7410 stream_.deviceFormat[mode] = format;
7414 // The user requested format is not natively supported by the device.
7415 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7416 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7417 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7421 deviceFormat = SND_PCM_FORMAT_FLOAT;
7422 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7423 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7427 deviceFormat = SND_PCM_FORMAT_S32;
7428 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7429 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7433 deviceFormat = SND_PCM_FORMAT_S24;
7434 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7435 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7439 deviceFormat = SND_PCM_FORMAT_S16;
7440 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7441 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7445 deviceFormat = SND_PCM_FORMAT_S8;
7446 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7447 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7451 // If we get here, no supported format was found.
7452 snd_pcm_close( phandle );
7453 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7454 errorText_ = errorStream_.str();
7458 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7460 snd_pcm_close( phandle );
7461 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7462 errorText_ = errorStream_.str();
7466 // Determine whether byte-swaping is necessary.
7467 stream_.doByteSwap[mode] = false;
7468 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7469 result = snd_pcm_format_cpu_endian( deviceFormat );
7471 stream_.doByteSwap[mode] = true;
7472 else if (result < 0) {
7473 snd_pcm_close( phandle );
7474 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7475 errorText_ = errorStream_.str();
7480 // Set the sample rate.
7481 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7483 snd_pcm_close( phandle );
7484 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7485 errorText_ = errorStream_.str();
7489 // Determine the number of channels for this device. We support a possible
7490 // minimum device channel number > than the value requested by the user.
7491 stream_.nUserChannels[mode] = channels;
7493 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7494 unsigned int deviceChannels = value;
7495 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7496 snd_pcm_close( phandle );
7497 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7498 errorText_ = errorStream_.str();
7502 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7504 snd_pcm_close( phandle );
7505 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7506 errorText_ = errorStream_.str();
7509 deviceChannels = value;
7510 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7511 stream_.nDeviceChannels[mode] = deviceChannels;
7513 // Set the device channels.
7514 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7516 snd_pcm_close( phandle );
7517 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7518 errorText_ = errorStream_.str();
7522 // Set the buffer (or period) size.
7524 snd_pcm_uframes_t periodSize = *bufferSize;
7525 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7527 snd_pcm_close( phandle );
7528 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7529 errorText_ = errorStream_.str();
7532 *bufferSize = periodSize;
7534 // Set the buffer number, which in ALSA is referred to as the "period".
7535 unsigned int periods = 0;
7536 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7537 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7538 if ( periods < 2 ) periods = 4; // a fairly safe default value
7539 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7541 snd_pcm_close( phandle );
7542 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7543 errorText_ = errorStream_.str();
7547 // If attempting to setup a duplex stream, the bufferSize parameter
7548 // MUST be the same in both directions!
7549 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7550 snd_pcm_close( phandle );
7551 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7552 errorText_ = errorStream_.str();
7556 stream_.bufferSize = *bufferSize;
7558 // Install the hardware configuration
7559 result = snd_pcm_hw_params( phandle, hw_params );
7561 snd_pcm_close( phandle );
7562 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7563 errorText_ = errorStream_.str();
7567 #if defined(__RTAUDIO_DEBUG__)
7568 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7569 snd_pcm_hw_params_dump( hw_params, out );
7572 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7573 snd_pcm_sw_params_t *sw_params = NULL;
7574 snd_pcm_sw_params_alloca( &sw_params );
7575 snd_pcm_sw_params_current( phandle, sw_params );
7576 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7577 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7578 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7580 // The following two settings were suggested by Theo Veenker
7581 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7582 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7584 // here are two options for a fix
7585 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7586 snd_pcm_uframes_t val;
7587 snd_pcm_sw_params_get_boundary( sw_params, &val );
7588 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7590 result = snd_pcm_sw_params( phandle, sw_params );
7592 snd_pcm_close( phandle );
7593 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7594 errorText_ = errorStream_.str();
7598 #if defined(__RTAUDIO_DEBUG__)
7599 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7600 snd_pcm_sw_params_dump( sw_params, out );
7603 // Set flags for buffer conversion
7604 stream_.doConvertBuffer[mode] = false;
7605 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7606 stream_.doConvertBuffer[mode] = true;
7607 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7608 stream_.doConvertBuffer[mode] = true;
7609 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7610 stream_.nUserChannels[mode] > 1 )
7611 stream_.doConvertBuffer[mode] = true;
7613 // Allocate the ApiHandle if necessary and then save.
7614 AlsaHandle *apiInfo = 0;
7615 if ( stream_.apiHandle == 0 ) {
7617 apiInfo = (AlsaHandle *) new AlsaHandle;
7619 catch ( std::bad_alloc& ) {
7620 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7624 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7625 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7629 stream_.apiHandle = (void *) apiInfo;
7630 apiInfo->handles[0] = 0;
7631 apiInfo->handles[1] = 0;
7634 apiInfo = (AlsaHandle *) stream_.apiHandle;
7636 apiInfo->handles[mode] = phandle;
7639 // Allocate necessary internal buffers.
7640 unsigned long bufferBytes;
7641 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7642 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7643 if ( stream_.userBuffer[mode] == NULL ) {
7644 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7648 if ( stream_.doConvertBuffer[mode] ) {
7650 bool makeBuffer = true;
7651 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7652 if ( mode == INPUT ) {
7653 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7654 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7655 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7660 bufferBytes *= *bufferSize;
7661 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7662 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7663 if ( stream_.deviceBuffer == NULL ) {
7664 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7670 stream_.sampleRate = sampleRate;
7671 stream_.nBuffers = periods;
7672 stream_.device[mode] = device;
7673 stream_.state = STREAM_STOPPED;
7675 // Setup the buffer conversion information structure.
7676 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7678 // Setup thread if necessary.
7679 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7680 // We had already set up an output stream.
7681 stream_.mode = DUPLEX;
7682 // Link the streams if possible.
7683 apiInfo->synchronized = false;
7684 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7685 apiInfo->synchronized = true;
7687 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7688 error( RtAudioError::WARNING );
7692 stream_.mode = mode;
7694 // Setup callback thread.
7695 stream_.callbackInfo.object = (void *) this;
7697 // Set the thread attributes for joinable and realtime scheduling
7698 // priority (optional). The higher priority will only take affect
7699 // if the program is run as root or suid. Note, under Linux
7700 // processes with CAP_SYS_NICE privilege, a user can change
7701 // scheduling policy and priority (thus need not be root). See
7702 // POSIX "capabilities".
7703 pthread_attr_t attr;
7704 pthread_attr_init( &attr );
7705 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7706 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7707 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7708 stream_.callbackInfo.doRealtime = true;
7709 struct sched_param param;
7710 int priority = options->priority;
7711 int min = sched_get_priority_min( SCHED_RR );
7712 int max = sched_get_priority_max( SCHED_RR );
7713 if ( priority < min ) priority = min;
7714 else if ( priority > max ) priority = max;
7715 param.sched_priority = priority;
7717 // Set the policy BEFORE the priority. Otherwise it fails.
7718 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7719 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7720 // This is definitely required. Otherwise it fails.
7721 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7722 pthread_attr_setschedparam(&attr, ¶m);
7725 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7727 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7730 stream_.callbackInfo.isRunning = true;
7731 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7732 pthread_attr_destroy( &attr );
7734 // Failed. Try instead with default attributes.
7735 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7737 stream_.callbackInfo.isRunning = false;
7738 errorText_ = "RtApiAlsa::error creating callback thread!";
7748 pthread_cond_destroy( &apiInfo->runnable_cv );
7749 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7750 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7752 stream_.apiHandle = 0;
7755 if ( phandle) snd_pcm_close( phandle );
7757 for ( int i=0; i<2; i++ ) {
7758 if ( stream_.userBuffer[i] ) {
7759 free( stream_.userBuffer[i] );
7760 stream_.userBuffer[i] = 0;
7764 if ( stream_.deviceBuffer ) {
7765 free( stream_.deviceBuffer );
7766 stream_.deviceBuffer = 0;
7769 stream_.state = STREAM_CLOSED;
7773 void RtApiAlsa :: closeStream()
7775 if ( stream_.state == STREAM_CLOSED ) {
7776 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7777 error( RtAudioError::WARNING );
7781 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7782 stream_.callbackInfo.isRunning = false;
7783 MUTEX_LOCK( &stream_.mutex );
7784 if ( stream_.state == STREAM_STOPPED ) {
7785 apiInfo->runnable = true;
7786 pthread_cond_signal( &apiInfo->runnable_cv );
7788 MUTEX_UNLOCK( &stream_.mutex );
7789 pthread_join( stream_.callbackInfo.thread, NULL );
7791 if ( stream_.state == STREAM_RUNNING ) {
7792 stream_.state = STREAM_STOPPED;
7793 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7794 snd_pcm_drop( apiInfo->handles[0] );
7795 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7796 snd_pcm_drop( apiInfo->handles[1] );
7800 pthread_cond_destroy( &apiInfo->runnable_cv );
7801 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7802 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7804 stream_.apiHandle = 0;
7807 for ( int i=0; i<2; i++ ) {
7808 if ( stream_.userBuffer[i] ) {
7809 free( stream_.userBuffer[i] );
7810 stream_.userBuffer[i] = 0;
7814 if ( stream_.deviceBuffer ) {
7815 free( stream_.deviceBuffer );
7816 stream_.deviceBuffer = 0;
7819 stream_.mode = UNINITIALIZED;
7820 stream_.state = STREAM_CLOSED;
7823 void RtApiAlsa :: startStream()
7825 // This method calls snd_pcm_prepare if the device isn't already in that state.
7828 if ( stream_.state == STREAM_RUNNING ) {
7829 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7830 error( RtAudioError::WARNING );
7834 MUTEX_LOCK( &stream_.mutex );
7837 snd_pcm_state_t state;
7838 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7839 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7840 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7841 state = snd_pcm_state( handle[0] );
7842 if ( state != SND_PCM_STATE_PREPARED ) {
7843 result = snd_pcm_prepare( handle[0] );
7845 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7846 errorText_ = errorStream_.str();
7852 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7853 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7854 state = snd_pcm_state( handle[1] );
7855 if ( state != SND_PCM_STATE_PREPARED ) {
7856 result = snd_pcm_prepare( handle[1] );
7858 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7859 errorText_ = errorStream_.str();
7865 stream_.state = STREAM_RUNNING;
7868 apiInfo->runnable = true;
7869 pthread_cond_signal( &apiInfo->runnable_cv );
7870 MUTEX_UNLOCK( &stream_.mutex );
7872 if ( result >= 0 ) return;
7873 error( RtAudioError::SYSTEM_ERROR );
7876 void RtApiAlsa :: stopStream()
7879 if ( stream_.state == STREAM_STOPPED ) {
7880 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7881 error( RtAudioError::WARNING );
7885 stream_.state = STREAM_STOPPED;
7886 MUTEX_LOCK( &stream_.mutex );
7889 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7890 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7891 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7892 if ( apiInfo->synchronized )
7893 result = snd_pcm_drop( handle[0] );
7895 result = snd_pcm_drain( handle[0] );
7897 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7898 errorText_ = errorStream_.str();
7903 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7904 result = snd_pcm_drop( handle[1] );
7906 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7907 errorText_ = errorStream_.str();
7913 apiInfo->runnable = false; // fixes high CPU usage when stopped
7914 MUTEX_UNLOCK( &stream_.mutex );
7916 if ( result >= 0 ) return;
7917 error( RtAudioError::SYSTEM_ERROR );
7920 void RtApiAlsa :: abortStream()
7923 if ( stream_.state == STREAM_STOPPED ) {
7924 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7925 error( RtAudioError::WARNING );
7929 stream_.state = STREAM_STOPPED;
7930 MUTEX_LOCK( &stream_.mutex );
7933 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7934 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7935 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7936 result = snd_pcm_drop( handle[0] );
7938 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7939 errorText_ = errorStream_.str();
7944 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7945 result = snd_pcm_drop( handle[1] );
7947 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7948 errorText_ = errorStream_.str();
7954 apiInfo->runnable = false; // fixes high CPU usage when stopped
7955 MUTEX_UNLOCK( &stream_.mutex );
7957 if ( result >= 0 ) return;
7958 error( RtAudioError::SYSTEM_ERROR );
7961 void RtApiAlsa :: callbackEvent()
7963 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7964 if ( stream_.state == STREAM_STOPPED ) {
7965 MUTEX_LOCK( &stream_.mutex );
7966 while ( !apiInfo->runnable )
7967 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7969 if ( stream_.state != STREAM_RUNNING ) {
7970 MUTEX_UNLOCK( &stream_.mutex );
7973 MUTEX_UNLOCK( &stream_.mutex );
7976 if ( stream_.state == STREAM_CLOSED ) {
7977 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7978 error( RtAudioError::WARNING );
7982 int doStopStream = 0;
7983 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7984 double streamTime = getStreamTime();
7985 RtAudioStreamStatus status = 0;
7986 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7987 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7988 apiInfo->xrun[0] = false;
7990 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7991 status |= RTAUDIO_INPUT_OVERFLOW;
7992 apiInfo->xrun[1] = false;
7994 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7995 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7997 if ( doStopStream == 2 ) {
8002 MUTEX_LOCK( &stream_.mutex );
8004 // The state might change while waiting on a mutex.
8005 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8011 snd_pcm_sframes_t frames;
8012 RtAudioFormat format;
8013 handle = (snd_pcm_t **) apiInfo->handles;
8015 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8017 // Setup parameters.
8018 if ( stream_.doConvertBuffer[1] ) {
8019 buffer = stream_.deviceBuffer;
8020 channels = stream_.nDeviceChannels[1];
8021 format = stream_.deviceFormat[1];
8024 buffer = stream_.userBuffer[1];
8025 channels = stream_.nUserChannels[1];
8026 format = stream_.userFormat;
8029 // Read samples from device in interleaved/non-interleaved format.
8030 if ( stream_.deviceInterleaved[1] )
8031 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8033 void *bufs[channels];
8034 size_t offset = stream_.bufferSize * formatBytes( format );
8035 for ( int i=0; i<channels; i++ )
8036 bufs[i] = (void *) (buffer + (i * offset));
8037 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8040 if ( result < (int) stream_.bufferSize ) {
8041 // Either an error or overrun occured.
8042 if ( result == -EPIPE ) {
8043 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8044 if ( state == SND_PCM_STATE_XRUN ) {
8045 apiInfo->xrun[1] = true;
8046 result = snd_pcm_prepare( handle[1] );
8048 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8049 errorText_ = errorStream_.str();
8053 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8054 errorText_ = errorStream_.str();
8058 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8059 errorText_ = errorStream_.str();
8061 error( RtAudioError::WARNING );
8065 // Do byte swapping if necessary.
8066 if ( stream_.doByteSwap[1] )
8067 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8069 // Do buffer conversion if necessary.
8070 if ( stream_.doConvertBuffer[1] )
8071 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8073 // Check stream latency
8074 result = snd_pcm_delay( handle[1], &frames );
8075 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8080 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8082 // Setup parameters and do buffer conversion if necessary.
8083 if ( stream_.doConvertBuffer[0] ) {
8084 buffer = stream_.deviceBuffer;
8085 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8086 channels = stream_.nDeviceChannels[0];
8087 format = stream_.deviceFormat[0];
8090 buffer = stream_.userBuffer[0];
8091 channels = stream_.nUserChannels[0];
8092 format = stream_.userFormat;
8095 // Do byte swapping if necessary.
8096 if ( stream_.doByteSwap[0] )
8097 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8099 // Write samples to device in interleaved/non-interleaved format.
8100 if ( stream_.deviceInterleaved[0] )
8101 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8103 void *bufs[channels];
8104 size_t offset = stream_.bufferSize * formatBytes( format );
8105 for ( int i=0; i<channels; i++ )
8106 bufs[i] = (void *) (buffer + (i * offset));
8107 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8110 if ( result < (int) stream_.bufferSize ) {
8111 // Either an error or underrun occured.
8112 if ( result == -EPIPE ) {
8113 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8114 if ( state == SND_PCM_STATE_XRUN ) {
8115 apiInfo->xrun[0] = true;
8116 result = snd_pcm_prepare( handle[0] );
8118 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8119 errorText_ = errorStream_.str();
8122 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8125 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8126 errorText_ = errorStream_.str();
8130 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8131 errorText_ = errorStream_.str();
8133 error( RtAudioError::WARNING );
8137 // Check stream latency
8138 result = snd_pcm_delay( handle[0], &frames );
8139 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8143 MUTEX_UNLOCK( &stream_.mutex );
8145 RtApi::tickStreamTime();
8146 if ( doStopStream == 1 ) this->stopStream();
8149 static void *alsaCallbackHandler( void *ptr )
8151 CallbackInfo *info = (CallbackInfo *) ptr;
8152 RtApiAlsa *object = (RtApiAlsa *) info->object;
8153 bool *isRunning = &info->isRunning;
8155 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8156 if ( info->doRealtime ) {
8157 std::cerr << "RtAudio alsa: " <<
8158 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8159 "running realtime scheduling" << std::endl;
8163 while ( *isRunning == true ) {
8164 pthread_testcancel();
8165 object->callbackEvent();
8168 pthread_exit( NULL );
8171 //******************** End of __LINUX_ALSA__ *********************//
8174 #if defined(__LINUX_PULSE__)
8176 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8177 // and Tristan Matthews.
8179 #include <pulse/error.h>
8180 #include <pulse/simple.h>
8183 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8184 44100, 48000, 96000, 0};
8186 struct rtaudio_pa_format_mapping_t {
8187 RtAudioFormat rtaudio_format;
8188 pa_sample_format_t pa_format;
8191 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8192 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8193 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8194 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8195 {0, PA_SAMPLE_INVALID}};
8197 struct PulseAudioHandle {
8201 pthread_cond_t runnable_cv;
8203 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8206 RtApiPulse::~RtApiPulse()
8208 if ( stream_.state != STREAM_CLOSED )
8212 unsigned int RtApiPulse::getDeviceCount( void )
8217 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8219 RtAudio::DeviceInfo info;
8221 info.name = "PulseAudio";
8222 info.outputChannels = 2;
8223 info.inputChannels = 2;
8224 info.duplexChannels = 2;
8225 info.isDefaultOutput = true;
8226 info.isDefaultInput = true;
8228 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8229 info.sampleRates.push_back( *sr );
8231 info.preferredSampleRate = 48000;
8232 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8237 static void *pulseaudio_callback( void * user )
8239 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8240 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8241 volatile bool *isRunning = &cbi->isRunning;
8243 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8244 if (cbi->doRealtime) {
8245 std::cerr << "RtAudio pulse: " <<
8246 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8247 "running realtime scheduling" << std::endl;
8251 while ( *isRunning ) {
8252 pthread_testcancel();
8253 context->callbackEvent();
8256 pthread_exit( NULL );
8259 void RtApiPulse::closeStream( void )
8261 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8263 stream_.callbackInfo.isRunning = false;
8265 MUTEX_LOCK( &stream_.mutex );
8266 if ( stream_.state == STREAM_STOPPED ) {
8267 pah->runnable = true;
8268 pthread_cond_signal( &pah->runnable_cv );
8270 MUTEX_UNLOCK( &stream_.mutex );
8272 pthread_join( pah->thread, 0 );
8273 if ( pah->s_play ) {
8274 pa_simple_flush( pah->s_play, NULL );
8275 pa_simple_free( pah->s_play );
8278 pa_simple_free( pah->s_rec );
8280 pthread_cond_destroy( &pah->runnable_cv );
8282 stream_.apiHandle = 0;
8285 if ( stream_.userBuffer[0] ) {
8286 free( stream_.userBuffer[0] );
8287 stream_.userBuffer[0] = 0;
8289 if ( stream_.userBuffer[1] ) {
8290 free( stream_.userBuffer[1] );
8291 stream_.userBuffer[1] = 0;
8294 stream_.state = STREAM_CLOSED;
8295 stream_.mode = UNINITIALIZED;
8298 void RtApiPulse::callbackEvent( void )
8300 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8302 if ( stream_.state == STREAM_STOPPED ) {
8303 MUTEX_LOCK( &stream_.mutex );
8304 while ( !pah->runnable )
8305 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8307 if ( stream_.state != STREAM_RUNNING ) {
8308 MUTEX_UNLOCK( &stream_.mutex );
8311 MUTEX_UNLOCK( &stream_.mutex );
8314 if ( stream_.state == STREAM_CLOSED ) {
8315 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8316 "this shouldn't happen!";
8317 error( RtAudioError::WARNING );
8321 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8322 double streamTime = getStreamTime();
8323 RtAudioStreamStatus status = 0;
8324 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8325 stream_.bufferSize, streamTime, status,
8326 stream_.callbackInfo.userData );
8328 if ( doStopStream == 2 ) {
8333 MUTEX_LOCK( &stream_.mutex );
8334 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8335 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8337 if ( stream_.state != STREAM_RUNNING )
8342 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8343 if ( stream_.doConvertBuffer[OUTPUT] ) {
8344 convertBuffer( stream_.deviceBuffer,
8345 stream_.userBuffer[OUTPUT],
8346 stream_.convertInfo[OUTPUT] );
8347 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8348 formatBytes( stream_.deviceFormat[OUTPUT] );
8350 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8351 formatBytes( stream_.userFormat );
8353 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8354 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8355 pa_strerror( pa_error ) << ".";
8356 errorText_ = errorStream_.str();
8357 error( RtAudioError::WARNING );
8361 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8362 if ( stream_.doConvertBuffer[INPUT] )
8363 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8364 formatBytes( stream_.deviceFormat[INPUT] );
8366 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8367 formatBytes( stream_.userFormat );
8369 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8370 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8371 pa_strerror( pa_error ) << ".";
8372 errorText_ = errorStream_.str();
8373 error( RtAudioError::WARNING );
8375 if ( stream_.doConvertBuffer[INPUT] ) {
8376 convertBuffer( stream_.userBuffer[INPUT],
8377 stream_.deviceBuffer,
8378 stream_.convertInfo[INPUT] );
8383 MUTEX_UNLOCK( &stream_.mutex );
8384 RtApi::tickStreamTime();
8386 if ( doStopStream == 1 )
8390 void RtApiPulse::startStream( void )
8392 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8394 if ( stream_.state == STREAM_CLOSED ) {
8395 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8396 error( RtAudioError::INVALID_USE );
8399 if ( stream_.state == STREAM_RUNNING ) {
8400 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8401 error( RtAudioError::WARNING );
8405 MUTEX_LOCK( &stream_.mutex );
8407 stream_.state = STREAM_RUNNING;
8409 pah->runnable = true;
8410 pthread_cond_signal( &pah->runnable_cv );
8411 MUTEX_UNLOCK( &stream_.mutex );
8414 void RtApiPulse::stopStream( void )
8416 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8418 if ( stream_.state == STREAM_CLOSED ) {
8419 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8420 error( RtAudioError::INVALID_USE );
8423 if ( stream_.state == STREAM_STOPPED ) {
8424 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8425 error( RtAudioError::WARNING );
8429 stream_.state = STREAM_STOPPED;
8430 MUTEX_LOCK( &stream_.mutex );
8432 if ( pah && pah->s_play ) {
8434 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8435 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8436 pa_strerror( pa_error ) << ".";
8437 errorText_ = errorStream_.str();
8438 MUTEX_UNLOCK( &stream_.mutex );
8439 error( RtAudioError::SYSTEM_ERROR );
8444 stream_.state = STREAM_STOPPED;
8445 MUTEX_UNLOCK( &stream_.mutex );
8448 void RtApiPulse::abortStream( void )
8450 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8452 if ( stream_.state == STREAM_CLOSED ) {
8453 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8454 error( RtAudioError::INVALID_USE );
8457 if ( stream_.state == STREAM_STOPPED ) {
8458 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8459 error( RtAudioError::WARNING );
8463 stream_.state = STREAM_STOPPED;
8464 MUTEX_LOCK( &stream_.mutex );
8466 if ( pah && pah->s_play ) {
8468 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8469 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8470 pa_strerror( pa_error ) << ".";
8471 errorText_ = errorStream_.str();
8472 MUTEX_UNLOCK( &stream_.mutex );
8473 error( RtAudioError::SYSTEM_ERROR );
8478 stream_.state = STREAM_STOPPED;
8479 MUTEX_UNLOCK( &stream_.mutex );
8482 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8483 unsigned int channels, unsigned int firstChannel,
8484 unsigned int sampleRate, RtAudioFormat format,
8485 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8487 PulseAudioHandle *pah = 0;
8488 unsigned long bufferBytes = 0;
8491 if ( device != 0 ) return false;
8492 if ( mode != INPUT && mode != OUTPUT ) return false;
8493 if ( channels != 1 && channels != 2 ) {
8494 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8497 ss.channels = channels;
8499 if ( firstChannel != 0 ) return false;
8501 bool sr_found = false;
8502 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8503 if ( sampleRate == *sr ) {
8505 stream_.sampleRate = sampleRate;
8506 ss.rate = sampleRate;
8511 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8516 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8517 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8518 if ( format == sf->rtaudio_format ) {
8520 stream_.userFormat = sf->rtaudio_format;
8521 stream_.deviceFormat[mode] = stream_.userFormat;
8522 ss.format = sf->pa_format;
8526 if ( !sf_found ) { // Use internal data format conversion.
8527 stream_.userFormat = format;
8528 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8529 ss.format = PA_SAMPLE_FLOAT32LE;
8532 // Set other stream parameters.
8533 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8534 else stream_.userInterleaved = true;
8535 stream_.deviceInterleaved[mode] = true;
8536 stream_.nBuffers = 1;
8537 stream_.doByteSwap[mode] = false;
8538 stream_.nUserChannels[mode] = channels;
8539 stream_.nDeviceChannels[mode] = channels + firstChannel;
8540 stream_.channelOffset[mode] = 0;
8541 std::string streamName = "RtAudio";
8543 // Set flags for buffer conversion.
8544 stream_.doConvertBuffer[mode] = false;
8545 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8546 stream_.doConvertBuffer[mode] = true;
8547 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8548 stream_.doConvertBuffer[mode] = true;
8550 // Allocate necessary internal buffers.
8551 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8552 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8553 if ( stream_.userBuffer[mode] == NULL ) {
8554 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8557 stream_.bufferSize = *bufferSize;
8559 if ( stream_.doConvertBuffer[mode] ) {
8561 bool makeBuffer = true;
8562 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8563 if ( mode == INPUT ) {
8564 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8565 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8566 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8571 bufferBytes *= *bufferSize;
8572 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8573 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8574 if ( stream_.deviceBuffer == NULL ) {
8575 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8581 stream_.device[mode] = device;
8583 // Setup the buffer conversion information structure.
8584 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8586 if ( !stream_.apiHandle ) {
8587 PulseAudioHandle *pah = new PulseAudioHandle;
8589 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8593 stream_.apiHandle = pah;
8594 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8595 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8599 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8602 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8605 pa_buffer_attr buffer_attr;
8606 buffer_attr.fragsize = bufferBytes;
8607 buffer_attr.maxlength = -1;
8609 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8610 if ( !pah->s_rec ) {
8611 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8616 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8617 if ( !pah->s_play ) {
8618 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8626 if ( stream_.mode == UNINITIALIZED )
8627 stream_.mode = mode;
8628 else if ( stream_.mode == mode )
8631 stream_.mode = DUPLEX;
8633 if ( !stream_.callbackInfo.isRunning ) {
8634 stream_.callbackInfo.object = this;
8636 stream_.state = STREAM_STOPPED;
8637 // Set the thread attributes for joinable and realtime scheduling
8638 // priority (optional). The higher priority will only take affect
8639 // if the program is run as root or suid. Note, under Linux
8640 // processes with CAP_SYS_NICE privilege, a user can change
8641 // scheduling policy and priority (thus need not be root). See
8642 // POSIX "capabilities".
8643 pthread_attr_t attr;
8644 pthread_attr_init( &attr );
8645 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8646 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8647 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8648 stream_.callbackInfo.doRealtime = true;
8649 struct sched_param param;
8650 int priority = options->priority;
8651 int min = sched_get_priority_min( SCHED_RR );
8652 int max = sched_get_priority_max( SCHED_RR );
8653 if ( priority < min ) priority = min;
8654 else if ( priority > max ) priority = max;
8655 param.sched_priority = priority;
8657 // Set the policy BEFORE the priority. Otherwise it fails.
8658 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8659 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8660 // This is definitely required. Otherwise it fails.
8661 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8662 pthread_attr_setschedparam(&attr, ¶m);
8665 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8667 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8670 stream_.callbackInfo.isRunning = true;
8671 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8672 pthread_attr_destroy(&attr);
8674 // Failed. Try instead with default attributes.
8675 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8677 stream_.callbackInfo.isRunning = false;
8678 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8687 if ( pah && stream_.callbackInfo.isRunning ) {
8688 pthread_cond_destroy( &pah->runnable_cv );
8690 stream_.apiHandle = 0;
8693 for ( int i=0; i<2; i++ ) {
8694 if ( stream_.userBuffer[i] ) {
8695 free( stream_.userBuffer[i] );
8696 stream_.userBuffer[i] = 0;
8700 if ( stream_.deviceBuffer ) {
8701 free( stream_.deviceBuffer );
8702 stream_.deviceBuffer = 0;
8705 stream_.state = STREAM_CLOSED;
8709 //******************** End of __LINUX_PULSE__ *********************//
8712 #if defined(__LINUX_OSS__)
8715 #include <sys/ioctl.h>
8718 #include <sys/soundcard.h>
8722 static void *ossCallbackHandler(void * ptr);
8724 // A structure to hold various information related to the OSS API
8727 int id[2]; // device ids
8730 pthread_cond_t runnable;
8733 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8736 RtApiOss :: RtApiOss()
8738 // Nothing to do here.
8741 RtApiOss :: ~RtApiOss()
8743 if ( stream_.state != STREAM_CLOSED ) closeStream();
8746 unsigned int RtApiOss :: getDeviceCount( void )
8748 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8749 if ( mixerfd == -1 ) {
8750 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8751 error( RtAudioError::WARNING );
8755 oss_sysinfo sysinfo;
8756 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8758 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8759 error( RtAudioError::WARNING );
8764 return sysinfo.numaudios;
8767 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8769 RtAudio::DeviceInfo info;
8770 info.probed = false;
8772 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8773 if ( mixerfd == -1 ) {
8774 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8775 error( RtAudioError::WARNING );
8779 oss_sysinfo sysinfo;
8780 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8781 if ( result == -1 ) {
8783 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8784 error( RtAudioError::WARNING );
8788 unsigned nDevices = sysinfo.numaudios;
8789 if ( nDevices == 0 ) {
8791 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8792 error( RtAudioError::INVALID_USE );
8796 if ( device >= nDevices ) {
8798 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8799 error( RtAudioError::INVALID_USE );
8803 oss_audioinfo ainfo;
8805 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8807 if ( result == -1 ) {
8808 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8809 errorText_ = errorStream_.str();
8810 error( RtAudioError::WARNING );
8815 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8816 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8817 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8818 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8819 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8822 // Probe data formats ... do for input
8823 unsigned long mask = ainfo.iformats;
8824 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8825 info.nativeFormats |= RTAUDIO_SINT16;
8826 if ( mask & AFMT_S8 )
8827 info.nativeFormats |= RTAUDIO_SINT8;
8828 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8829 info.nativeFormats |= RTAUDIO_SINT32;
8831 if ( mask & AFMT_FLOAT )
8832 info.nativeFormats |= RTAUDIO_FLOAT32;
8834 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8835 info.nativeFormats |= RTAUDIO_SINT24;
8837 // Check that we have at least one supported format
8838 if ( info.nativeFormats == 0 ) {
8839 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8840 errorText_ = errorStream_.str();
8841 error( RtAudioError::WARNING );
8845 // Probe the supported sample rates.
8846 info.sampleRates.clear();
8847 if ( ainfo.nrates ) {
8848 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8849 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8850 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8851 info.sampleRates.push_back( SAMPLE_RATES[k] );
8853 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8854 info.preferredSampleRate = SAMPLE_RATES[k];
8862 // Check min and max rate values;
8863 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8864 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8865 info.sampleRates.push_back( SAMPLE_RATES[k] );
8867 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8868 info.preferredSampleRate = SAMPLE_RATES[k];
8873 if ( info.sampleRates.size() == 0 ) {
8874 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8875 errorText_ = errorStream_.str();
8876 error( RtAudioError::WARNING );
8880 info.name = ainfo.name;
8887 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8888 unsigned int firstChannel, unsigned int sampleRate,
8889 RtAudioFormat format, unsigned int *bufferSize,
8890 RtAudio::StreamOptions *options )
8892 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8893 if ( mixerfd == -1 ) {
8894 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8898 oss_sysinfo sysinfo;
8899 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8900 if ( result == -1 ) {
8902 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8906 unsigned nDevices = sysinfo.numaudios;
8907 if ( nDevices == 0 ) {
8908 // This should not happen because a check is made before this function is called.
8910 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8914 if ( device >= nDevices ) {
8915 // This should not happen because a check is made before this function is called.
8917 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8921 oss_audioinfo ainfo;
8923 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8925 if ( result == -1 ) {
8926 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8927 errorText_ = errorStream_.str();
8931 // Check if device supports input or output
8932 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8933 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8934 if ( mode == OUTPUT )
8935 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8937 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8938 errorText_ = errorStream_.str();
8943 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8944 if ( mode == OUTPUT )
8946 else { // mode == INPUT
8947 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8948 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8949 close( handle->id[0] );
8951 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8952 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8953 errorText_ = errorStream_.str();
8956 // Check that the number previously set channels is the same.
8957 if ( stream_.nUserChannels[0] != channels ) {
8958 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8959 errorText_ = errorStream_.str();
8968 // Set exclusive access if specified.
8969 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8971 // Try to open the device.
8973 fd = open( ainfo.devnode, flags, 0 );
8975 if ( errno == EBUSY )
8976 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8978 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8979 errorText_ = errorStream_.str();
8983 // For duplex operation, specifically set this mode (this doesn't seem to work).
8985 if ( flags | O_RDWR ) {
8986 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8987 if ( result == -1) {
8988 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8989 errorText_ = errorStream_.str();
8995 // Check the device channel support.
8996 stream_.nUserChannels[mode] = channels;
8997 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8999 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9000 errorText_ = errorStream_.str();
9004 // Set the number of channels.
9005 int deviceChannels = channels + firstChannel;
9006 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9007 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9009 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9010 errorText_ = errorStream_.str();
9013 stream_.nDeviceChannels[mode] = deviceChannels;
9015 // Get the data format mask
9017 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9018 if ( result == -1 ) {
9020 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9021 errorText_ = errorStream_.str();
9025 // Determine how to set the device format.
9026 stream_.userFormat = format;
9027 int deviceFormat = -1;
9028 stream_.doByteSwap[mode] = false;
9029 if ( format == RTAUDIO_SINT8 ) {
9030 if ( mask & AFMT_S8 ) {
9031 deviceFormat = AFMT_S8;
9032 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9035 else if ( format == RTAUDIO_SINT16 ) {
9036 if ( mask & AFMT_S16_NE ) {
9037 deviceFormat = AFMT_S16_NE;
9038 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9040 else if ( mask & AFMT_S16_OE ) {
9041 deviceFormat = AFMT_S16_OE;
9042 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9043 stream_.doByteSwap[mode] = true;
9046 else if ( format == RTAUDIO_SINT24 ) {
9047 if ( mask & AFMT_S24_NE ) {
9048 deviceFormat = AFMT_S24_NE;
9049 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9051 else if ( mask & AFMT_S24_OE ) {
9052 deviceFormat = AFMT_S24_OE;
9053 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9054 stream_.doByteSwap[mode] = true;
9057 else if ( format == RTAUDIO_SINT32 ) {
9058 if ( mask & AFMT_S32_NE ) {
9059 deviceFormat = AFMT_S32_NE;
9060 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9062 else if ( mask & AFMT_S32_OE ) {
9063 deviceFormat = AFMT_S32_OE;
9064 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9065 stream_.doByteSwap[mode] = true;
9069 if ( deviceFormat == -1 ) {
9070 // The user requested format is not natively supported by the device.
9071 if ( mask & AFMT_S16_NE ) {
9072 deviceFormat = AFMT_S16_NE;
9073 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9075 else if ( mask & AFMT_S32_NE ) {
9076 deviceFormat = AFMT_S32_NE;
9077 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9079 else if ( mask & AFMT_S24_NE ) {
9080 deviceFormat = AFMT_S24_NE;
9081 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9083 else if ( mask & AFMT_S16_OE ) {
9084 deviceFormat = AFMT_S16_OE;
9085 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9086 stream_.doByteSwap[mode] = true;
9088 else if ( mask & AFMT_S32_OE ) {
9089 deviceFormat = AFMT_S32_OE;
9090 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9091 stream_.doByteSwap[mode] = true;
9093 else if ( mask & AFMT_S24_OE ) {
9094 deviceFormat = AFMT_S24_OE;
9095 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9096 stream_.doByteSwap[mode] = true;
9098 else if ( mask & AFMT_S8) {
9099 deviceFormat = AFMT_S8;
9100 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9104 if ( stream_.deviceFormat[mode] == 0 ) {
9105 // This really shouldn't happen ...
9107 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9108 errorText_ = errorStream_.str();
9112 // Set the data format.
9113 int temp = deviceFormat;
9114 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9115 if ( result == -1 || deviceFormat != temp ) {
9117 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9118 errorText_ = errorStream_.str();
9122 // Attempt to set the buffer size. According to OSS, the minimum
9123 // number of buffers is two. The supposed minimum buffer size is 16
9124 // bytes, so that will be our lower bound. The argument to this
9125 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9126 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9127 // We'll check the actual value used near the end of the setup
9129 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9130 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9132 if ( options ) buffers = options->numberOfBuffers;
9133 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9134 if ( buffers < 2 ) buffers = 3;
9135 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9136 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9137 if ( result == -1 ) {
9139 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9140 errorText_ = errorStream_.str();
9143 stream_.nBuffers = buffers;
9145 // Save buffer size (in sample frames).
9146 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9147 stream_.bufferSize = *bufferSize;
9149 // Set the sample rate.
9150 int srate = sampleRate;
9151 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9152 if ( result == -1 ) {
9154 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9155 errorText_ = errorStream_.str();
9159 // Verify the sample rate setup worked.
9160 if ( abs( srate - (int)sampleRate ) > 100 ) {
9162 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9163 errorText_ = errorStream_.str();
9166 stream_.sampleRate = sampleRate;
9168 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9169 // We're doing duplex setup here.
9170 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9171 stream_.nDeviceChannels[0] = deviceChannels;
9174 // Set interleaving parameters.
9175 stream_.userInterleaved = true;
9176 stream_.deviceInterleaved[mode] = true;
9177 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9178 stream_.userInterleaved = false;
9180 // Set flags for buffer conversion
9181 stream_.doConvertBuffer[mode] = false;
9182 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9183 stream_.doConvertBuffer[mode] = true;
9184 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9185 stream_.doConvertBuffer[mode] = true;
9186 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9187 stream_.nUserChannels[mode] > 1 )
9188 stream_.doConvertBuffer[mode] = true;
9190 // Allocate the stream handles if necessary and then save.
9191 if ( stream_.apiHandle == 0 ) {
9193 handle = new OssHandle;
9195 catch ( std::bad_alloc& ) {
9196 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9200 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9201 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9205 stream_.apiHandle = (void *) handle;
9208 handle = (OssHandle *) stream_.apiHandle;
9210 handle->id[mode] = fd;
9212 // Allocate necessary internal buffers.
9213 unsigned long bufferBytes;
9214 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9215 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9216 if ( stream_.userBuffer[mode] == NULL ) {
9217 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9221 if ( stream_.doConvertBuffer[mode] ) {
9223 bool makeBuffer = true;
9224 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9225 if ( mode == INPUT ) {
9226 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9227 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9228 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9233 bufferBytes *= *bufferSize;
9234 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9235 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9236 if ( stream_.deviceBuffer == NULL ) {
9237 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9243 stream_.device[mode] = device;
9244 stream_.state = STREAM_STOPPED;
9246 // Setup the buffer conversion information structure.
9247 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9249 // Setup thread if necessary.
9250 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9251 // We had already set up an output stream.
9252 stream_.mode = DUPLEX;
9253 if ( stream_.device[0] == device ) handle->id[0] = fd;
9256 stream_.mode = mode;
9258 // Setup callback thread.
9259 stream_.callbackInfo.object = (void *) this;
9261 // Set the thread attributes for joinable and realtime scheduling
9262 // priority. The higher priority will only take affect if the
9263 // program is run as root or suid.
9264 pthread_attr_t attr;
9265 pthread_attr_init( &attr );
9266 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9267 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9268 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9269 stream_.callbackInfo.doRealtime = true;
9270 struct sched_param param;
9271 int priority = options->priority;
9272 int min = sched_get_priority_min( SCHED_RR );
9273 int max = sched_get_priority_max( SCHED_RR );
9274 if ( priority < min ) priority = min;
9275 else if ( priority > max ) priority = max;
9276 param.sched_priority = priority;
9278 // Set the policy BEFORE the priority. Otherwise it fails.
9279 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9280 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9281 // This is definitely required. Otherwise it fails.
9282 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9283 pthread_attr_setschedparam(&attr, ¶m);
9286 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9288 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9291 stream_.callbackInfo.isRunning = true;
9292 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9293 pthread_attr_destroy( &attr );
9295 // Failed. Try instead with default attributes.
9296 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9298 stream_.callbackInfo.isRunning = false;
9299 errorText_ = "RtApiOss::error creating callback thread!";
9309 pthread_cond_destroy( &handle->runnable );
9310 if ( handle->id[0] ) close( handle->id[0] );
9311 if ( handle->id[1] ) close( handle->id[1] );
9313 stream_.apiHandle = 0;
9316 for ( int i=0; i<2; i++ ) {
9317 if ( stream_.userBuffer[i] ) {
9318 free( stream_.userBuffer[i] );
9319 stream_.userBuffer[i] = 0;
9323 if ( stream_.deviceBuffer ) {
9324 free( stream_.deviceBuffer );
9325 stream_.deviceBuffer = 0;
9328 stream_.state = STREAM_CLOSED;
9332 void RtApiOss :: closeStream()
9334 if ( stream_.state == STREAM_CLOSED ) {
9335 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9336 error( RtAudioError::WARNING );
9340 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9341 stream_.callbackInfo.isRunning = false;
9342 MUTEX_LOCK( &stream_.mutex );
9343 if ( stream_.state == STREAM_STOPPED )
9344 pthread_cond_signal( &handle->runnable );
9345 MUTEX_UNLOCK( &stream_.mutex );
9346 pthread_join( stream_.callbackInfo.thread, NULL );
9348 if ( stream_.state == STREAM_RUNNING ) {
9349 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9350 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9352 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9353 stream_.state = STREAM_STOPPED;
9357 pthread_cond_destroy( &handle->runnable );
9358 if ( handle->id[0] ) close( handle->id[0] );
9359 if ( handle->id[1] ) close( handle->id[1] );
9361 stream_.apiHandle = 0;
9364 for ( int i=0; i<2; i++ ) {
9365 if ( stream_.userBuffer[i] ) {
9366 free( stream_.userBuffer[i] );
9367 stream_.userBuffer[i] = 0;
9371 if ( stream_.deviceBuffer ) {
9372 free( stream_.deviceBuffer );
9373 stream_.deviceBuffer = 0;
9376 stream_.mode = UNINITIALIZED;
9377 stream_.state = STREAM_CLOSED;
9380 void RtApiOss :: startStream()
9383 if ( stream_.state == STREAM_RUNNING ) {
9384 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9385 error( RtAudioError::WARNING );
9389 MUTEX_LOCK( &stream_.mutex );
9391 stream_.state = STREAM_RUNNING;
9393 // No need to do anything else here ... OSS automatically starts
9394 // when fed samples.
9396 MUTEX_UNLOCK( &stream_.mutex );
9398 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9399 pthread_cond_signal( &handle->runnable );
9402 void RtApiOss :: stopStream()
9405 if ( stream_.state == STREAM_STOPPED ) {
9406 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9407 error( RtAudioError::WARNING );
9411 MUTEX_LOCK( &stream_.mutex );
9413 // The state might change while waiting on a mutex.
9414 if ( stream_.state == STREAM_STOPPED ) {
9415 MUTEX_UNLOCK( &stream_.mutex );
9420 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9421 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9423 // Flush the output with zeros a few times.
9426 RtAudioFormat format;
9428 if ( stream_.doConvertBuffer[0] ) {
9429 buffer = stream_.deviceBuffer;
9430 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9431 format = stream_.deviceFormat[0];
9434 buffer = stream_.userBuffer[0];
9435 samples = stream_.bufferSize * stream_.nUserChannels[0];
9436 format = stream_.userFormat;
9439 memset( buffer, 0, samples * formatBytes(format) );
9440 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9441 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9442 if ( result == -1 ) {
9443 errorText_ = "RtApiOss::stopStream: audio write error.";
9444 error( RtAudioError::WARNING );
9448 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9449 if ( result == -1 ) {
9450 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9451 errorText_ = errorStream_.str();
9454 handle->triggered = false;
9457 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9458 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9459 if ( result == -1 ) {
9460 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9461 errorText_ = errorStream_.str();
9467 stream_.state = STREAM_STOPPED;
9468 MUTEX_UNLOCK( &stream_.mutex );
9470 if ( result != -1 ) return;
9471 error( RtAudioError::SYSTEM_ERROR );
9474 void RtApiOss :: abortStream()
9477 if ( stream_.state == STREAM_STOPPED ) {
9478 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9479 error( RtAudioError::WARNING );
9483 MUTEX_LOCK( &stream_.mutex );
9485 // The state might change while waiting on a mutex.
9486 if ( stream_.state == STREAM_STOPPED ) {
9487 MUTEX_UNLOCK( &stream_.mutex );
9492 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9493 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9494 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9495 if ( result == -1 ) {
9496 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9497 errorText_ = errorStream_.str();
9500 handle->triggered = false;
9503 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9504 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9505 if ( result == -1 ) {
9506 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9507 errorText_ = errorStream_.str();
9513 stream_.state = STREAM_STOPPED;
9514 MUTEX_UNLOCK( &stream_.mutex );
9516 if ( result != -1 ) return;
9517 error( RtAudioError::SYSTEM_ERROR );
9520 void RtApiOss :: callbackEvent()
9522 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9523 if ( stream_.state == STREAM_STOPPED ) {
9524 MUTEX_LOCK( &stream_.mutex );
9525 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9526 if ( stream_.state != STREAM_RUNNING ) {
9527 MUTEX_UNLOCK( &stream_.mutex );
9530 MUTEX_UNLOCK( &stream_.mutex );
9533 if ( stream_.state == STREAM_CLOSED ) {
9534 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9535 error( RtAudioError::WARNING );
9539 // Invoke user callback to get fresh output data.
9540 int doStopStream = 0;
9541 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9542 double streamTime = getStreamTime();
9543 RtAudioStreamStatus status = 0;
9544 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9545 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9546 handle->xrun[0] = false;
9548 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9549 status |= RTAUDIO_INPUT_OVERFLOW;
9550 handle->xrun[1] = false;
9552 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9553 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9554 if ( doStopStream == 2 ) {
9555 this->abortStream();
9559 MUTEX_LOCK( &stream_.mutex );
9561 // The state might change while waiting on a mutex.
9562 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9567 RtAudioFormat format;
9569 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9571 // Setup parameters and do buffer conversion if necessary.
9572 if ( stream_.doConvertBuffer[0] ) {
9573 buffer = stream_.deviceBuffer;
9574 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9575 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9576 format = stream_.deviceFormat[0];
9579 buffer = stream_.userBuffer[0];
9580 samples = stream_.bufferSize * stream_.nUserChannels[0];
9581 format = stream_.userFormat;
9584 // Do byte swapping if necessary.
9585 if ( stream_.doByteSwap[0] )
9586 byteSwapBuffer( buffer, samples, format );
9588 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9590 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9591 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9592 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9593 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9594 handle->triggered = true;
9597 // Write samples to device.
9598 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9600 if ( result == -1 ) {
9601 // We'll assume this is an underrun, though there isn't a
9602 // specific means for determining that.
9603 handle->xrun[0] = true;
9604 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9605 error( RtAudioError::WARNING );
9606 // Continue on to input section.
9610 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9612 // Setup parameters.
9613 if ( stream_.doConvertBuffer[1] ) {
9614 buffer = stream_.deviceBuffer;
9615 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9616 format = stream_.deviceFormat[1];
9619 buffer = stream_.userBuffer[1];
9620 samples = stream_.bufferSize * stream_.nUserChannels[1];
9621 format = stream_.userFormat;
9624 // Read samples from device.
9625 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9627 if ( result == -1 ) {
9628 // We'll assume this is an overrun, though there isn't a
9629 // specific means for determining that.
9630 handle->xrun[1] = true;
9631 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9632 error( RtAudioError::WARNING );
9636 // Do byte swapping if necessary.
9637 if ( stream_.doByteSwap[1] )
9638 byteSwapBuffer( buffer, samples, format );
9640 // Do buffer conversion if necessary.
9641 if ( stream_.doConvertBuffer[1] )
9642 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9646 MUTEX_UNLOCK( &stream_.mutex );
9648 RtApi::tickStreamTime();
9649 if ( doStopStream == 1 ) this->stopStream();
9652 static void *ossCallbackHandler( void *ptr )
9654 CallbackInfo *info = (CallbackInfo *) ptr;
9655 RtApiOss *object = (RtApiOss *) info->object;
9656 bool *isRunning = &info->isRunning;
9658 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9659 if (info->doRealtime) {
9660 std::cerr << "RtAudio oss: " <<
9661 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9662 "running realtime scheduling" << std::endl;
9666 while ( *isRunning == true ) {
9667 pthread_testcancel();
9668 object->callbackEvent();
9671 pthread_exit( NULL );
9674 //******************** End of __LINUX_OSS__ *********************//
9678 // *************************************************** //
9680 // Protected common (OS-independent) RtAudio methods.
9682 // *************************************************** //
9684 // This method can be modified to control the behavior of error
9685 // message printing.
9686 void RtApi :: error( RtAudioError::Type type )
9688 errorStream_.str(""); // clear the ostringstream
9690 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9691 if ( errorCallback ) {
9692 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9694 if ( firstErrorOccurred_ )
9697 firstErrorOccurred_ = true;
9698 const std::string errorMessage = errorText_;
9700 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9701 stream_.callbackInfo.isRunning = false; // exit from the thread
9705 errorCallback( type, errorMessage );
9706 firstErrorOccurred_ = false;
9710 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9711 std::cerr << '\n' << errorText_ << "\n\n";
9712 else if ( type != RtAudioError::WARNING )
9713 throw( RtAudioError( errorText_, type ) );
9716 void RtApi :: verifyStream()
9718 if ( stream_.state == STREAM_CLOSED ) {
9719 errorText_ = "RtApi:: a stream is not open!";
9720 error( RtAudioError::INVALID_USE );
9724 void RtApi :: clearStreamInfo()
9726 stream_.mode = UNINITIALIZED;
9727 stream_.state = STREAM_CLOSED;
9728 stream_.sampleRate = 0;
9729 stream_.bufferSize = 0;
9730 stream_.nBuffers = 0;
9731 stream_.userFormat = 0;
9732 stream_.userInterleaved = true;
9733 stream_.streamTime = 0.0;
9734 stream_.apiHandle = 0;
9735 stream_.deviceBuffer = 0;
9736 stream_.callbackInfo.callback = 0;
9737 stream_.callbackInfo.userData = 0;
9738 stream_.callbackInfo.isRunning = false;
9739 stream_.callbackInfo.errorCallback = 0;
9740 for ( int i=0; i<2; i++ ) {
9741 stream_.device[i] = 11111;
9742 stream_.doConvertBuffer[i] = false;
9743 stream_.deviceInterleaved[i] = true;
9744 stream_.doByteSwap[i] = false;
9745 stream_.nUserChannels[i] = 0;
9746 stream_.nDeviceChannels[i] = 0;
9747 stream_.channelOffset[i] = 0;
9748 stream_.deviceFormat[i] = 0;
9749 stream_.latency[i] = 0;
9750 stream_.userBuffer[i] = 0;
9751 stream_.convertInfo[i].channels = 0;
9752 stream_.convertInfo[i].inJump = 0;
9753 stream_.convertInfo[i].outJump = 0;
9754 stream_.convertInfo[i].inFormat = 0;
9755 stream_.convertInfo[i].outFormat = 0;
9756 stream_.convertInfo[i].inOffset.clear();
9757 stream_.convertInfo[i].outOffset.clear();
9761 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9763 if ( format == RTAUDIO_SINT16 )
9765 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9767 else if ( format == RTAUDIO_FLOAT64 )
9769 else if ( format == RTAUDIO_SINT24 )
9771 else if ( format == RTAUDIO_SINT8 )
9774 errorText_ = "RtApi::formatBytes: undefined format.";
9775 error( RtAudioError::WARNING );
9780 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9782 if ( mode == INPUT ) { // convert device to user buffer
9783 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9784 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9785 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9786 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9788 else { // convert user to device buffer
9789 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9790 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9791 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9792 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9795 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9796 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9798 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9800 // Set up the interleave/deinterleave offsets.
9801 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9802 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9803 ( mode == INPUT && stream_.userInterleaved ) ) {
9804 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9805 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9806 stream_.convertInfo[mode].outOffset.push_back( k );
9807 stream_.convertInfo[mode].inJump = 1;
9811 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9812 stream_.convertInfo[mode].inOffset.push_back( k );
9813 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9814 stream_.convertInfo[mode].outJump = 1;
9818 else { // no (de)interleaving
9819 if ( stream_.userInterleaved ) {
9820 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9821 stream_.convertInfo[mode].inOffset.push_back( k );
9822 stream_.convertInfo[mode].outOffset.push_back( k );
9826 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9827 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9828 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9829 stream_.convertInfo[mode].inJump = 1;
9830 stream_.convertInfo[mode].outJump = 1;
9835 // Add channel offset.
9836 if ( firstChannel > 0 ) {
9837 if ( stream_.deviceInterleaved[mode] ) {
9838 if ( mode == OUTPUT ) {
9839 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9840 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9843 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9844 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9848 if ( mode == OUTPUT ) {
9849 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9850 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9853 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9854 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9860 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9862 // This function does format conversion, input/output channel compensation, and
9863 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9864 // the lower three bytes of a 32-bit integer.
9866 // Clear our device buffer when in/out duplex device channels are different
9867 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9868 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9869 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9872 if (info.outFormat == RTAUDIO_FLOAT64) {
9874 Float64 *out = (Float64 *)outBuffer;
9876 if (info.inFormat == RTAUDIO_SINT8) {
9877 signed char *in = (signed char *)inBuffer;
9878 scale = 1.0 / 127.5;
9879 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9880 for (j=0; j<info.channels; j++) {
9881 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9882 out[info.outOffset[j]] += 0.5;
9883 out[info.outOffset[j]] *= scale;
9886 out += info.outJump;
9889 else if (info.inFormat == RTAUDIO_SINT16) {
9890 Int16 *in = (Int16 *)inBuffer;
9891 scale = 1.0 / 32767.5;
9892 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9893 for (j=0; j<info.channels; j++) {
9894 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9895 out[info.outOffset[j]] += 0.5;
9896 out[info.outOffset[j]] *= scale;
9899 out += info.outJump;
9902 else if (info.inFormat == RTAUDIO_SINT24) {
9903 Int24 *in = (Int24 *)inBuffer;
9904 scale = 1.0 / 8388607.5;
9905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9906 for (j=0; j<info.channels; j++) {
9907 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9908 out[info.outOffset[j]] += 0.5;
9909 out[info.outOffset[j]] *= scale;
9912 out += info.outJump;
9915 else if (info.inFormat == RTAUDIO_SINT32) {
9916 Int32 *in = (Int32 *)inBuffer;
9917 scale = 1.0 / 2147483647.5;
9918 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9919 for (j=0; j<info.channels; j++) {
9920 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9921 out[info.outOffset[j]] += 0.5;
9922 out[info.outOffset[j]] *= scale;
9925 out += info.outJump;
9928 else if (info.inFormat == RTAUDIO_FLOAT32) {
9929 Float32 *in = (Float32 *)inBuffer;
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9931 for (j=0; j<info.channels; j++) {
9932 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9935 out += info.outJump;
9938 else if (info.inFormat == RTAUDIO_FLOAT64) {
9939 // Channel compensation and/or (de)interleaving only.
9940 Float64 *in = (Float64 *)inBuffer;
9941 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9942 for (j=0; j<info.channels; j++) {
9943 out[info.outOffset[j]] = in[info.inOffset[j]];
9946 out += info.outJump;
9950 else if (info.outFormat == RTAUDIO_FLOAT32) {
9952 Float32 *out = (Float32 *)outBuffer;
9954 if (info.inFormat == RTAUDIO_SINT8) {
9955 signed char *in = (signed char *)inBuffer;
9956 scale = (Float32) ( 1.0 / 127.5 );
9957 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9958 for (j=0; j<info.channels; j++) {
9959 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9960 out[info.outOffset[j]] += 0.5;
9961 out[info.outOffset[j]] *= scale;
9964 out += info.outJump;
9967 else if (info.inFormat == RTAUDIO_SINT16) {
9968 Int16 *in = (Int16 *)inBuffer;
9969 scale = (Float32) ( 1.0 / 32767.5 );
9970 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9971 for (j=0; j<info.channels; j++) {
9972 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9973 out[info.outOffset[j]] += 0.5;
9974 out[info.outOffset[j]] *= scale;
9977 out += info.outJump;
9980 else if (info.inFormat == RTAUDIO_SINT24) {
9981 Int24 *in = (Int24 *)inBuffer;
9982 scale = (Float32) ( 1.0 / 8388607.5 );
9983 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9984 for (j=0; j<info.channels; j++) {
9985 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9986 out[info.outOffset[j]] += 0.5;
9987 out[info.outOffset[j]] *= scale;
9990 out += info.outJump;
9993 else if (info.inFormat == RTAUDIO_SINT32) {
9994 Int32 *in = (Int32 *)inBuffer;
9995 scale = (Float32) ( 1.0 / 2147483647.5 );
9996 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9997 for (j=0; j<info.channels; j++) {
9998 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9999 out[info.outOffset[j]] += 0.5;
10000 out[info.outOffset[j]] *= scale;
10003 out += info.outJump;
10006 else if (info.inFormat == RTAUDIO_FLOAT32) {
10007 // Channel compensation and/or (de)interleaving only.
10008 Float32 *in = (Float32 *)inBuffer;
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10010 for (j=0; j<info.channels; j++) {
10011 out[info.outOffset[j]] = in[info.inOffset[j]];
10014 out += info.outJump;
10017 else if (info.inFormat == RTAUDIO_FLOAT64) {
10018 Float64 *in = (Float64 *)inBuffer;
10019 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10020 for (j=0; j<info.channels; j++) {
10021 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10024 out += info.outJump;
10028 else if (info.outFormat == RTAUDIO_SINT32) {
10029 Int32 *out = (Int32 *)outBuffer;
10030 if (info.inFormat == RTAUDIO_SINT8) {
10031 signed char *in = (signed char *)inBuffer;
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10033 for (j=0; j<info.channels; j++) {
10034 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10035 out[info.outOffset[j]] <<= 24;
10038 out += info.outJump;
10041 else if (info.inFormat == RTAUDIO_SINT16) {
10042 Int16 *in = (Int16 *)inBuffer;
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10044 for (j=0; j<info.channels; j++) {
10045 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10046 out[info.outOffset[j]] <<= 16;
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_SINT24) {
10053 Int24 *in = (Int24 *)inBuffer;
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10055 for (j=0; j<info.channels; j++) {
10056 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10057 out[info.outOffset[j]] <<= 8;
10060 out += info.outJump;
10063 else if (info.inFormat == RTAUDIO_SINT32) {
10064 // Channel compensation and/or (de)interleaving only.
10065 Int32 *in = (Int32 *)inBuffer;
10066 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10067 for (j=0; j<info.channels; j++) {
10068 out[info.outOffset[j]] = in[info.inOffset[j]];
10071 out += info.outJump;
10074 else if (info.inFormat == RTAUDIO_FLOAT32) {
10075 Float32 *in = (Float32 *)inBuffer;
10076 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10077 for (j=0; j<info.channels; j++) {
10078 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10081 out += info.outJump;
10084 else if (info.inFormat == RTAUDIO_FLOAT64) {
10085 Float64 *in = (Float64 *)inBuffer;
10086 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10087 for (j=0; j<info.channels; j++) {
10088 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10091 out += info.outJump;
10095 else if (info.outFormat == RTAUDIO_SINT24) {
10096 Int24 *out = (Int24 *)outBuffer;
10097 if (info.inFormat == RTAUDIO_SINT8) {
10098 signed char *in = (signed char *)inBuffer;
10099 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10100 for (j=0; j<info.channels; j++) {
10101 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10102 //out[info.outOffset[j]] <<= 16;
10105 out += info.outJump;
10108 else if (info.inFormat == RTAUDIO_SINT16) {
10109 Int16 *in = (Int16 *)inBuffer;
10110 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10111 for (j=0; j<info.channels; j++) {
10112 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10113 //out[info.outOffset[j]] <<= 8;
10116 out += info.outJump;
10119 else if (info.inFormat == RTAUDIO_SINT24) {
10120 // Channel compensation and/or (de)interleaving only.
10121 Int24 *in = (Int24 *)inBuffer;
10122 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10123 for (j=0; j<info.channels; j++) {
10124 out[info.outOffset[j]] = in[info.inOffset[j]];
10127 out += info.outJump;
10130 else if (info.inFormat == RTAUDIO_SINT32) {
10131 Int32 *in = (Int32 *)inBuffer;
10132 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10133 for (j=0; j<info.channels; j++) {
10134 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10135 //out[info.outOffset[j]] >>= 8;
10138 out += info.outJump;
10141 else if (info.inFormat == RTAUDIO_FLOAT32) {
10142 Float32 *in = (Float32 *)inBuffer;
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10144 for (j=0; j<info.channels; j++) {
10145 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10148 out += info.outJump;
10151 else if (info.inFormat == RTAUDIO_FLOAT64) {
10152 Float64 *in = (Float64 *)inBuffer;
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10154 for (j=0; j<info.channels; j++) {
10155 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10158 out += info.outJump;
10162 else if (info.outFormat == RTAUDIO_SINT16) {
10163 Int16 *out = (Int16 *)outBuffer;
10164 if (info.inFormat == RTAUDIO_SINT8) {
10165 signed char *in = (signed char *)inBuffer;
10166 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10167 for (j=0; j<info.channels; j++) {
10168 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10169 out[info.outOffset[j]] <<= 8;
10172 out += info.outJump;
10175 else if (info.inFormat == RTAUDIO_SINT16) {
10176 // Channel compensation and/or (de)interleaving only.
10177 Int16 *in = (Int16 *)inBuffer;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = in[info.inOffset[j]];
10183 out += info.outJump;
10186 else if (info.inFormat == RTAUDIO_SINT24) {
10187 Int24 *in = (Int24 *)inBuffer;
10188 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10189 for (j=0; j<info.channels; j++) {
10190 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10193 out += info.outJump;
10196 else if (info.inFormat == RTAUDIO_SINT32) {
10197 Int32 *in = (Int32 *)inBuffer;
10198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10199 for (j=0; j<info.channels; j++) {
10200 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10203 out += info.outJump;
10206 else if (info.inFormat == RTAUDIO_FLOAT32) {
10207 Float32 *in = (Float32 *)inBuffer;
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10209 for (j=0; j<info.channels; j++) {
10210 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10213 out += info.outJump;
10216 else if (info.inFormat == RTAUDIO_FLOAT64) {
10217 Float64 *in = (Float64 *)inBuffer;
10218 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10219 for (j=0; j<info.channels; j++) {
10220 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10223 out += info.outJump;
10227 else if (info.outFormat == RTAUDIO_SINT8) {
10228 signed char *out = (signed char *)outBuffer;
10229 if (info.inFormat == RTAUDIO_SINT8) {
10230 // Channel compensation and/or (de)interleaving only.
10231 signed char *in = (signed char *)inBuffer;
10232 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10233 for (j=0; j<info.channels; j++) {
10234 out[info.outOffset[j]] = in[info.inOffset[j]];
10237 out += info.outJump;
10240 if (info.inFormat == RTAUDIO_SINT16) {
10241 Int16 *in = (Int16 *)inBuffer;
10242 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10243 for (j=0; j<info.channels; j++) {
10244 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10247 out += info.outJump;
10250 else if (info.inFormat == RTAUDIO_SINT24) {
10251 Int24 *in = (Int24 *)inBuffer;
10252 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10253 for (j=0; j<info.channels; j++) {
10254 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10257 out += info.outJump;
10260 else if (info.inFormat == RTAUDIO_SINT32) {
10261 Int32 *in = (Int32 *)inBuffer;
10262 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10263 for (j=0; j<info.channels; j++) {
10264 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10267 out += info.outJump;
10270 else if (info.inFormat == RTAUDIO_FLOAT32) {
10271 Float32 *in = (Float32 *)inBuffer;
10272 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10273 for (j=0; j<info.channels; j++) {
10274 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10277 out += info.outJump;
10280 else if (info.inFormat == RTAUDIO_FLOAT64) {
10281 Float64 *in = (Float64 *)inBuffer;
10282 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10283 for (j=0; j<info.channels; j++) {
10284 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10287 out += info.outJump;
10293 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10294 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10295 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10297 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10303 if ( format == RTAUDIO_SINT16 ) {
10304 for ( unsigned int i=0; i<samples; i++ ) {
10305 // Swap 1st and 2nd bytes.
10310 // Increment 2 bytes.
10314 else if ( format == RTAUDIO_SINT32 ||
10315 format == RTAUDIO_FLOAT32 ) {
10316 for ( unsigned int i=0; i<samples; i++ ) {
10317 // Swap 1st and 4th bytes.
10322 // Swap 2nd and 3rd bytes.
10328 // Increment 3 more bytes.
10332 else if ( format == RTAUDIO_SINT24 ) {
10333 for ( unsigned int i=0; i<samples; i++ ) {
10334 // Swap 1st and 3rd bytes.
10339 // Increment 2 more bytes.
10343 else if ( format == RTAUDIO_FLOAT64 ) {
10344 for ( unsigned int i=0; i<samples; i++ ) {
10345 // Swap 1st and 8th bytes
10350 // Swap 2nd and 7th bytes
10356 // Swap 3rd and 6th bytes
10362 // Swap 4th and 5th bytes
10368 // Increment 5 more bytes.
10374 // Indentation settings for Vim and Emacs
10376 // Local Variables:
10377 // c-basic-offset: 2
10378 // indent-tabs-mode: nil
10381 // vim: et sts=2 sw=2