1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_ALSA__)
111 apis.push_back( LINUX_ALSA );
113 #if defined(__LINUX_PULSE__)
114 apis.push_back( LINUX_PULSE );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, NULL, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, NULL, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = (JackHandle *) infoPointer;
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, NULL, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 // Count the available ports containing the client name as device
2196 // channels. Jack "input ports" equal RtAudio output channels.
2197 unsigned int nChannels = 0;
2198 unsigned long flag = JackPortIsInput;
2199 if ( mode == INPUT ) flag = JackPortIsOutput;
2200 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2202 while ( ports[ nChannels ] ) nChannels++;
2206 // Compare the jack ports for specified client to the requested number of channels.
2207 if ( nChannels < (channels + firstChannel) ) {
2208 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2209 errorText_ = errorStream_.str();
2213 // Check the jack server sample rate.
2214 unsigned int jackRate = jack_get_sample_rate( client );
2215 if ( sampleRate != jackRate ) {
2216 jack_client_close( client );
2217 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2218 errorText_ = errorStream_.str();
2221 stream_.sampleRate = jackRate;
2223 // Get the latency of the JACK port.
2224 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2225 if ( ports[ firstChannel ] ) {
2227 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2228 // the range (usually the min and max are equal)
2229 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2230 // get the latency range
2231 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2232 // be optimistic, use the min!
2233 stream_.latency[mode] = latrange.min;
2234 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2238 // The jack server always uses 32-bit floating-point data.
2239 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2240 stream_.userFormat = format;
2242 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2243 else stream_.userInterleaved = true;
2245 // Jack always uses non-interleaved buffers.
2246 stream_.deviceInterleaved[mode] = false;
2248 // Jack always provides host byte-ordered data.
2249 stream_.doByteSwap[mode] = false;
2251 // Get the buffer size. The buffer size and number of buffers
2252 // (periods) is set when the jack server is started.
2253 stream_.bufferSize = (int) jack_get_buffer_size( client );
2254 *bufferSize = stream_.bufferSize;
2256 stream_.nDeviceChannels[mode] = channels;
2257 stream_.nUserChannels[mode] = channels;
2259 // Set flags for buffer conversion.
2260 stream_.doConvertBuffer[mode] = false;
2261 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2262 stream_.doConvertBuffer[mode] = true;
2263 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2264 stream_.nUserChannels[mode] > 1 )
2265 stream_.doConvertBuffer[mode] = true;
2267 // Allocate our JackHandle structure for the stream.
2268 if ( handle == 0 ) {
2270 handle = new JackHandle;
2272 catch ( std::bad_alloc& ) {
2273 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2277 if ( pthread_cond_init(&handle->condition, NULL) ) {
2278 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2281 stream_.apiHandle = (void *) handle;
2282 handle->client = client;
2284 handle->deviceName[mode] = deviceName;
2286 // Allocate necessary internal buffers.
2287 unsigned long bufferBytes;
2288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2290 if ( stream_.userBuffer[mode] == NULL ) {
2291 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2295 if ( stream_.doConvertBuffer[mode] ) {
2297 bool makeBuffer = true;
2298 if ( mode == OUTPUT )
2299 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2300 else { // mode == INPUT
2301 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2302 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2303 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2304 if ( bufferBytes < bytesOut ) makeBuffer = false;
2309 bufferBytes *= *bufferSize;
2310 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2311 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2312 if ( stream_.deviceBuffer == NULL ) {
2313 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2319 // Allocate memory for the Jack ports (channels) identifiers.
2320 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2321 if ( handle->ports[mode] == NULL ) {
2322 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2326 stream_.device[mode] = device;
2327 stream_.channelOffset[mode] = firstChannel;
2328 stream_.state = STREAM_STOPPED;
2329 stream_.callbackInfo.object = (void *) this;
2331 if ( stream_.mode == OUTPUT && mode == INPUT )
2332 // We had already set up the stream for output.
2333 stream_.mode = DUPLEX;
2335 stream_.mode = mode;
2336 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2337 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2338 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2341 // Register our ports.
2343 if ( mode == OUTPUT ) {
2344 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2345 snprintf( label, 64, "outport %d", i );
2346 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2347 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2351 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2352 snprintf( label, 64, "inport %d", i );
2353 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2354 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2358 // Setup the buffer conversion information structure. We don't use
2359 // buffers to do channel offsets, so we override that parameter
2361 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2363 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2369 pthread_cond_destroy( &handle->condition );
2370 jack_client_close( handle->client );
2372 if ( handle->ports[0] ) free( handle->ports[0] );
2373 if ( handle->ports[1] ) free( handle->ports[1] );
2376 stream_.apiHandle = 0;
2379 for ( int i=0; i<2; i++ ) {
2380 if ( stream_.userBuffer[i] ) {
2381 free( stream_.userBuffer[i] );
2382 stream_.userBuffer[i] = 0;
2386 if ( stream_.deviceBuffer ) {
2387 free( stream_.deviceBuffer );
2388 stream_.deviceBuffer = 0;
2394 void RtApiJack :: closeStream( void )
2396 if ( stream_.state == STREAM_CLOSED ) {
2397 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2398 error( RtAudioError::WARNING );
2402 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2405 if ( stream_.state == STREAM_RUNNING )
2406 jack_deactivate( handle->client );
2408 jack_client_close( handle->client );
2412 if ( handle->ports[0] ) free( handle->ports[0] );
2413 if ( handle->ports[1] ) free( handle->ports[1] );
2414 pthread_cond_destroy( &handle->condition );
2416 stream_.apiHandle = 0;
2419 for ( int i=0; i<2; i++ ) {
2420 if ( stream_.userBuffer[i] ) {
2421 free( stream_.userBuffer[i] );
2422 stream_.userBuffer[i] = 0;
2426 if ( stream_.deviceBuffer ) {
2427 free( stream_.deviceBuffer );
2428 stream_.deviceBuffer = 0;
2431 stream_.mode = UNINITIALIZED;
2432 stream_.state = STREAM_CLOSED;
2435 void RtApiJack :: startStream( void )
2438 if ( stream_.state == STREAM_RUNNING ) {
2439 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2440 error( RtAudioError::WARNING );
2444 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2445 int result = jack_activate( handle->client );
2447 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2453 // Get the list of available ports.
2454 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2456 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2457 if ( ports == NULL) {
2458 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2462 // Now make the port connections. Since RtAudio wasn't designed to
2463 // allow the user to select particular channels of a device, we'll
2464 // just open the first "nChannels" ports with offset.
2465 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2467 if ( ports[ stream_.channelOffset[0] + i ] )
2468 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2471 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2478 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2480 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2481 if ( ports == NULL) {
2482 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2486 // Now make the port connections. See note above.
2487 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2489 if ( ports[ stream_.channelOffset[1] + i ] )
2490 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2493 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2500 handle->drainCounter = 0;
2501 handle->internalDrain = false;
2502 stream_.state = STREAM_RUNNING;
2505 if ( result == 0 ) return;
2506 error( RtAudioError::SYSTEM_ERROR );
2509 void RtApiJack :: stopStream( void )
2512 if ( stream_.state == STREAM_STOPPED ) {
2513 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2514 error( RtAudioError::WARNING );
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2521 if ( handle->drainCounter == 0 ) {
2522 handle->drainCounter = 2;
2523 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2527 jack_deactivate( handle->client );
2528 stream_.state = STREAM_STOPPED;
2531 void RtApiJack :: abortStream( void )
2534 if ( stream_.state == STREAM_STOPPED ) {
2535 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2536 error( RtAudioError::WARNING );
2540 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2541 handle->drainCounter = 2;
2546 // This function will be called by a spawned thread when the user
2547 // callback function signals that the stream should be stopped or
2548 // aborted. It is necessary to handle it this way because the
2549 // callbackEvent() function must return before the jack_deactivate()
2550 // function will return.
2551 static void *jackStopStream( void *ptr )
2553 CallbackInfo *info = (CallbackInfo *) ptr;
2554 RtApiJack *object = (RtApiJack *) info->object;
2556 object->stopStream();
2557 pthread_exit( NULL );
2560 bool RtApiJack :: callbackEvent( unsigned long nframes )
2562 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2563 if ( stream_.state == STREAM_CLOSED ) {
2564 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2565 error( RtAudioError::WARNING );
2568 if ( stream_.bufferSize != nframes ) {
2569 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2570 error( RtAudioError::WARNING );
2574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2575 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2577 // Check if we were draining the stream and signal is finished.
2578 if ( handle->drainCounter > 3 ) {
2579 ThreadHandle threadId;
2581 stream_.state = STREAM_STOPPING;
2582 if ( handle->internalDrain == true )
2583 pthread_create( &threadId, NULL, jackStopStream, info );
2585 pthread_cond_signal( &handle->condition );
2589 // Invoke user callback first, to get fresh output data.
2590 if ( handle->drainCounter == 0 ) {
2591 RtAudioCallback callback = (RtAudioCallback) info->callback;
2592 double streamTime = getStreamTime();
2593 RtAudioStreamStatus status = 0;
2594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2596 handle->xrun[0] = false;
2598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2599 status |= RTAUDIO_INPUT_OVERFLOW;
2600 handle->xrun[1] = false;
2602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2603 stream_.bufferSize, streamTime, status, info->userData );
2604 if ( cbReturnValue == 2 ) {
2605 stream_.state = STREAM_STOPPING;
2606 handle->drainCounter = 2;
2608 pthread_create( &id, NULL, jackStopStream, info );
2611 else if ( cbReturnValue == 1 ) {
2612 handle->drainCounter = 1;
2613 handle->internalDrain = true;
2617 jack_default_audio_sample_t *jackbuffer;
2618 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2621 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2625 memset( jackbuffer, 0, bufferBytes );
2629 else if ( stream_.doConvertBuffer[0] ) {
2631 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2633 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2638 else { // no buffer conversion
2639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2640 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2641 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2646 // Don't bother draining input
2647 if ( handle->drainCounter ) {
2648 handle->drainCounter++;
2652 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2654 if ( stream_.doConvertBuffer[1] ) {
2655 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2657 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2659 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2661 else { // no buffer conversion
2662 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2663 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2664 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2670 RtApi::tickStreamTime();
2673 //******************** End of __UNIX_JACK__ *********************//
2676 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2678 // The ASIO API is designed around a callback scheme, so this
2679 // implementation is similar to that used for OS-X CoreAudio and Linux
2680 // Jack. The primary constraint with ASIO is that it only allows
2681 // access to a single driver at a time. Thus, it is not possible to
2682 // have more than one simultaneous RtAudio stream.
2684 // This implementation also requires a number of external ASIO files
2685 // and a few global variables. The ASIO callback scheme does not
2686 // allow for the passing of user data, so we must create a global
2687 // pointer to our callbackInfo structure.
2689 // On unix systems, we make use of a pthread condition variable.
2690 // Since there is no equivalent in Windows, I hacked something based
2691 // on information found in
2692 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2694 #include "asiosys.h"
2696 #include "iasiothiscallresolver.h"
2697 #include "asiodrivers.h"
2700 static AsioDrivers drivers;
2701 static ASIOCallbacks asioCallbacks;
2702 static ASIODriverInfo driverInfo;
2703 static CallbackInfo *asioCallbackInfo;
2704 static bool asioXRun;
2707 int drainCounter; // Tracks callback counts when draining
2708 bool internalDrain; // Indicates if stop is initiated from callback or not.
2709 ASIOBufferInfo *bufferInfos;
2713 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2716 // Function declarations (definitions at end of section)
2717 static const char* getAsioErrorString( ASIOError result );
2718 static void sampleRateChanged( ASIOSampleRate sRate );
2719 static long asioMessages( long selector, long value, void* message, double* opt );
2721 RtApiAsio :: RtApiAsio()
2723 // ASIO cannot run on a multi-threaded appartment. You can call
2724 // CoInitialize beforehand, but it must be for appartment threading
2725 // (in which case, CoInitilialize will return S_FALSE here).
2726 coInitialized_ = false;
2727 HRESULT hr = CoInitialize( NULL );
2729 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2730 error( RtAudioError::WARNING );
2732 coInitialized_ = true;
2734 drivers.removeCurrentDriver();
2735 driverInfo.asioVersion = 2;
2737 // See note in DirectSound implementation about GetDesktopWindow().
2738 driverInfo.sysRef = GetForegroundWindow();
2741 RtApiAsio :: ~RtApiAsio()
2743 if ( stream_.state != STREAM_CLOSED ) closeStream();
2744 if ( coInitialized_ ) CoUninitialize();
2747 unsigned int RtApiAsio :: getDeviceCount( void )
2749 return (unsigned int) drivers.asioGetNumDev();
2752 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2754 RtAudio::DeviceInfo info;
2755 info.probed = false;
2758 unsigned int nDevices = getDeviceCount();
2759 if ( nDevices == 0 ) {
2760 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2761 error( RtAudioError::INVALID_USE );
2765 if ( device >= nDevices ) {
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2767 error( RtAudioError::INVALID_USE );
2771 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2772 if ( stream_.state != STREAM_CLOSED ) {
2773 if ( device >= devices_.size() ) {
2774 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2775 error( RtAudioError::WARNING );
2778 return devices_[ device ];
2781 char driverName[32];
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2783 if ( result != ASE_OK ) {
2784 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2785 errorText_ = errorStream_.str();
2786 error( RtAudioError::WARNING );
2790 info.name = driverName;
2792 if ( !drivers.loadDriver( driverName ) ) {
2793 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2794 errorText_ = errorStream_.str();
2795 error( RtAudioError::WARNING );
2799 result = ASIOInit( &driverInfo );
2800 if ( result != ASE_OK ) {
2801 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2802 errorText_ = errorStream_.str();
2803 error( RtAudioError::WARNING );
2807 // Determine the device channel information.
2808 long inputChannels, outputChannels;
2809 result = ASIOGetChannels( &inputChannels, &outputChannels );
2810 if ( result != ASE_OK ) {
2811 drivers.removeCurrentDriver();
2812 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2813 errorText_ = errorStream_.str();
2814 error( RtAudioError::WARNING );
2818 info.outputChannels = outputChannels;
2819 info.inputChannels = inputChannels;
2820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2823 // Determine the supported sample rates.
2824 info.sampleRates.clear();
2825 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2826 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2827 if ( result == ASE_OK ) {
2828 info.sampleRates.push_back( SAMPLE_RATES[i] );
2830 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2831 info.preferredSampleRate = SAMPLE_RATES[i];
2835 // Determine supported data types ... just check first channel and assume rest are the same.
2836 ASIOChannelInfo channelInfo;
2837 channelInfo.channel = 0;
2838 channelInfo.isInput = true;
2839 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2840 result = ASIOGetChannelInfo( &channelInfo );
2841 if ( result != ASE_OK ) {
2842 drivers.removeCurrentDriver();
2843 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2844 errorText_ = errorStream_.str();
2845 error( RtAudioError::WARNING );
2849 info.nativeFormats = 0;
2850 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2851 info.nativeFormats |= RTAUDIO_SINT16;
2852 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2853 info.nativeFormats |= RTAUDIO_SINT32;
2854 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2855 info.nativeFormats |= RTAUDIO_FLOAT32;
2856 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT64;
2858 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2859 info.nativeFormats |= RTAUDIO_SINT24;
2861 if ( info.outputChannels > 0 )
2862 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2863 if ( info.inputChannels > 0 )
2864 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2867 drivers.removeCurrentDriver();
2871 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2873 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2874 object->callbackEvent( index );
2877 void RtApiAsio :: saveDeviceInfo( void )
2881 unsigned int nDevices = getDeviceCount();
2882 devices_.resize( nDevices );
2883 for ( unsigned int i=0; i<nDevices; i++ )
2884 devices_[i] = getDeviceInfo( i );
2887 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2888 unsigned int firstChannel, unsigned int sampleRate,
2889 RtAudioFormat format, unsigned int *bufferSize,
2890 RtAudio::StreamOptions *options )
2891 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2893 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2895 // For ASIO, a duplex stream MUST use the same driver.
2896 if ( isDuplexInput && stream_.device[0] != device ) {
2897 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2901 char driverName[32];
2902 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903 if ( result != ASE_OK ) {
2904 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905 errorText_ = errorStream_.str();
2909 // Only load the driver once for duplex stream.
2910 if ( !isDuplexInput ) {
2911 // The getDeviceInfo() function will not work when a stream is open
2912 // because ASIO does not allow multiple devices to run at the same
2913 // time. Thus, we'll probe the system before opening a stream and
2914 // save the results for use by getDeviceInfo().
2915 this->saveDeviceInfo();
2917 if ( !drivers.loadDriver( driverName ) ) {
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2919 errorText_ = errorStream_.str();
2923 result = ASIOInit( &driverInfo );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2926 errorText_ = errorStream_.str();
2931 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2932 bool buffersAllocated = false;
2933 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2934 unsigned int nChannels;
2937 // Check the device channel count.
2938 long inputChannels, outputChannels;
2939 result = ASIOGetChannels( &inputChannels, &outputChannels );
2940 if ( result != ASE_OK ) {
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2942 errorText_ = errorStream_.str();
2946 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2947 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2948 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2949 errorText_ = errorStream_.str();
2952 stream_.nDeviceChannels[mode] = channels;
2953 stream_.nUserChannels[mode] = channels;
2954 stream_.channelOffset[mode] = firstChannel;
2956 // Verify the sample rate is supported.
2957 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2958 if ( result != ASE_OK ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2960 errorText_ = errorStream_.str();
2964 // Get the current sample rate
2965 ASIOSampleRate currentRate;
2966 result = ASIOGetSampleRate( ¤tRate );
2967 if ( result != ASE_OK ) {
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2969 errorText_ = errorStream_.str();
2973 // Set the sample rate only if necessary
2974 if ( currentRate != sampleRate ) {
2975 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2976 if ( result != ASE_OK ) {
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2978 errorText_ = errorStream_.str();
2983 // Determine the driver data type.
2984 ASIOChannelInfo channelInfo;
2985 channelInfo.channel = 0;
2986 if ( mode == OUTPUT ) channelInfo.isInput = false;
2987 else channelInfo.isInput = true;
2988 result = ASIOGetChannelInfo( &channelInfo );
2989 if ( result != ASE_OK ) {
2990 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2991 errorText_ = errorStream_.str();
2995 // Assuming WINDOWS host is always little-endian.
2996 stream_.doByteSwap[mode] = false;
2997 stream_.userFormat = format;
2998 stream_.deviceFormat[mode] = 0;
2999 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3001 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3003 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3005 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3007 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3009 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3011 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3012 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3013 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3015 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3017 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3020 if ( stream_.deviceFormat[mode] == 0 ) {
3021 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3022 errorText_ = errorStream_.str();
3026 // Set the buffer size. For a duplex stream, this will end up
3027 // setting the buffer size based on the input constraints, which
3029 long minSize, maxSize, preferSize, granularity;
3030 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3031 if ( result != ASE_OK ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3033 errorText_ = errorStream_.str();
3037 if ( isDuplexInput ) {
3038 // When this is the duplex input (output was opened before), then we have to use the same
3039 // buffersize as the output, because it might use the preferred buffer size, which most
3040 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3041 // So instead of throwing an error, make them equal. The caller uses the reference
3042 // to the "bufferSize" param as usual to set up processing buffers.
3044 *bufferSize = stream_.bufferSize;
3047 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3048 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3049 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3050 else if ( granularity == -1 ) {
3051 // Make sure bufferSize is a power of two.
3052 int log2_of_min_size = 0;
3053 int log2_of_max_size = 0;
3055 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3056 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3057 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3060 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3061 int min_delta_num = log2_of_min_size;
3063 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3064 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3065 if (current_delta < min_delta) {
3066 min_delta = current_delta;
3071 *bufferSize = ( (unsigned int)1 << min_delta_num );
3072 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3073 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3075 else if ( granularity != 0 ) {
3076 // Set to an even multiple of granularity, rounding up.
3077 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3082 // we don't use it anymore, see above!
3083 // Just left it here for the case...
3084 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3085 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3090 stream_.bufferSize = *bufferSize;
3091 stream_.nBuffers = 2;
3093 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3094 else stream_.userInterleaved = true;
3096 // ASIO always uses non-interleaved buffers.
3097 stream_.deviceInterleaved[mode] = false;
3099 // Allocate, if necessary, our AsioHandle structure for the stream.
3100 if ( handle == 0 ) {
3102 handle = new AsioHandle;
3104 catch ( std::bad_alloc& ) {
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3108 handle->bufferInfos = 0;
3110 // Create a manual-reset event.
3111 handle->condition = CreateEvent( NULL, // no security
3112 TRUE, // manual-reset
3113 FALSE, // non-signaled initially
3115 stream_.apiHandle = (void *) handle;
3118 // Create the ASIO internal buffers. Since RtAudio sets up input
3119 // and output separately, we'll have to dispose of previously
3120 // created output buffers for a duplex stream.
3121 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3122 ASIODisposeBuffers();
3123 if ( handle->bufferInfos ) free( handle->bufferInfos );
3126 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3128 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3129 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3130 if ( handle->bufferInfos == NULL ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3132 errorText_ = errorStream_.str();
3136 ASIOBufferInfo *infos;
3137 infos = handle->bufferInfos;
3138 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3139 infos->isInput = ASIOFalse;
3140 infos->channelNum = i + stream_.channelOffset[0];
3141 infos->buffers[0] = infos->buffers[1] = 0;
3143 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3144 infos->isInput = ASIOTrue;
3145 infos->channelNum = i + stream_.channelOffset[1];
3146 infos->buffers[0] = infos->buffers[1] = 0;
3149 // prepare for callbacks
3150 stream_.sampleRate = sampleRate;
3151 stream_.device[mode] = device;
3152 stream_.mode = isDuplexInput ? DUPLEX : mode;
3154 // store this class instance before registering callbacks, that are going to use it
3155 asioCallbackInfo = &stream_.callbackInfo;
3156 stream_.callbackInfo.object = (void *) this;
3158 // Set up the ASIO callback structure and create the ASIO data buffers.
3159 asioCallbacks.bufferSwitch = &bufferSwitch;
3160 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3161 asioCallbacks.asioMessage = &asioMessages;
3162 asioCallbacks.bufferSwitchTimeInfo = NULL;
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3164 if ( result != ASE_OK ) {
3165 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3166 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3167 // in that case, let's be naïve and try that instead
3168 *bufferSize = preferSize;
3169 stream_.bufferSize = *bufferSize;
3170 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3173 if ( result != ASE_OK ) {
3174 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3175 errorText_ = errorStream_.str();
3178 buffersAllocated = true;
3179 stream_.state = STREAM_STOPPED;
3181 // Set flags for buffer conversion.
3182 stream_.doConvertBuffer[mode] = false;
3183 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3184 stream_.doConvertBuffer[mode] = true;
3185 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3186 stream_.nUserChannels[mode] > 1 )
3187 stream_.doConvertBuffer[mode] = true;
3189 // Allocate necessary internal buffers
3190 unsigned long bufferBytes;
3191 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3193 if ( stream_.userBuffer[mode] == NULL ) {
3194 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3198 if ( stream_.doConvertBuffer[mode] ) {
3200 bool makeBuffer = true;
3201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3202 if ( isDuplexInput && stream_.deviceBuffer ) {
3203 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3204 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3208 bufferBytes *= *bufferSize;
3209 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3210 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3211 if ( stream_.deviceBuffer == NULL ) {
3212 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3218 // Determine device latencies
3219 long inputLatency, outputLatency;
3220 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3221 if ( result != ASE_OK ) {
3222 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3223 errorText_ = errorStream_.str();
3224 error( RtAudioError::WARNING); // warn but don't fail
3227 stream_.latency[0] = outputLatency;
3228 stream_.latency[1] = inputLatency;
3231 // Setup the buffer conversion information structure. We don't use
3232 // buffers to do channel offsets, so we override that parameter
3234 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3239 if ( !isDuplexInput ) {
3240 // the cleanup for error in the duplex input, is done by RtApi::openStream
3241 // So we clean up for single channel only
3243 if ( buffersAllocated )
3244 ASIODisposeBuffers();
3246 drivers.removeCurrentDriver();
3249 CloseHandle( handle->condition );
3250 if ( handle->bufferInfos )
3251 free( handle->bufferInfos );
3254 stream_.apiHandle = 0;
3258 if ( stream_.userBuffer[mode] ) {
3259 free( stream_.userBuffer[mode] );
3260 stream_.userBuffer[mode] = 0;
3263 if ( stream_.deviceBuffer ) {
3264 free( stream_.deviceBuffer );
3265 stream_.deviceBuffer = 0;
3270 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3272 void RtApiAsio :: closeStream()
3274 if ( stream_.state == STREAM_CLOSED ) {
3275 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3276 error( RtAudioError::WARNING );
3280 if ( stream_.state == STREAM_RUNNING ) {
3281 stream_.state = STREAM_STOPPED;
3284 ASIODisposeBuffers();
3285 drivers.removeCurrentDriver();
3287 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3289 CloseHandle( handle->condition );
3290 if ( handle->bufferInfos )
3291 free( handle->bufferInfos );
3293 stream_.apiHandle = 0;
3296 for ( int i=0; i<2; i++ ) {
3297 if ( stream_.userBuffer[i] ) {
3298 free( stream_.userBuffer[i] );
3299 stream_.userBuffer[i] = 0;
3303 if ( stream_.deviceBuffer ) {
3304 free( stream_.deviceBuffer );
3305 stream_.deviceBuffer = 0;
3308 stream_.mode = UNINITIALIZED;
3309 stream_.state = STREAM_CLOSED;
3312 bool stopThreadCalled = false;
3314 void RtApiAsio :: startStream()
3317 if ( stream_.state == STREAM_RUNNING ) {
3318 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3319 error( RtAudioError::WARNING );
3323 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3324 ASIOError result = ASIOStart();
3325 if ( result != ASE_OK ) {
3326 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3327 errorText_ = errorStream_.str();
3331 handle->drainCounter = 0;
3332 handle->internalDrain = false;
3333 ResetEvent( handle->condition );
3334 stream_.state = STREAM_RUNNING;
3338 stopThreadCalled = false;
3340 if ( result == ASE_OK ) return;
3341 error( RtAudioError::SYSTEM_ERROR );
3344 void RtApiAsio :: stopStream()
3347 if ( stream_.state == STREAM_STOPPED ) {
3348 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3349 error( RtAudioError::WARNING );
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3355 if ( handle->drainCounter == 0 ) {
3356 handle->drainCounter = 2;
3357 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3361 stream_.state = STREAM_STOPPED;
3363 ASIOError result = ASIOStop();
3364 if ( result != ASE_OK ) {
3365 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3366 errorText_ = errorStream_.str();
3369 if ( result == ASE_OK ) return;
3370 error( RtAudioError::SYSTEM_ERROR );
3373 void RtApiAsio :: abortStream()
3376 if ( stream_.state == STREAM_STOPPED ) {
3377 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3378 error( RtAudioError::WARNING );
3382 // The following lines were commented-out because some behavior was
3383 // noted where the device buffers need to be zeroed to avoid
3384 // continuing sound, even when the device buffers are completely
3385 // disposed. So now, calling abort is the same as calling stop.
3386 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3387 // handle->drainCounter = 2;
3391 // This function will be called by a spawned thread when the user
3392 // callback function signals that the stream should be stopped or
3393 // aborted. It is necessary to handle it this way because the
3394 // callbackEvent() function must return before the ASIOStop()
3395 // function will return.
3396 static unsigned __stdcall asioStopStream( void *ptr )
3398 CallbackInfo *info = (CallbackInfo *) ptr;
3399 RtApiAsio *object = (RtApiAsio *) info->object;
3401 object->stopStream();
3406 bool RtApiAsio :: callbackEvent( long bufferIndex )
3408 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3409 if ( stream_.state == STREAM_CLOSED ) {
3410 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3411 error( RtAudioError::WARNING );
3415 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3416 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 // Check if we were draining the stream and signal if finished.
3419 if ( handle->drainCounter > 3 ) {
3421 stream_.state = STREAM_STOPPING;
3422 if ( handle->internalDrain == false )
3423 SetEvent( handle->condition );
3424 else { // spawn a thread to stop the stream
3426 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3427 &stream_.callbackInfo, 0, &threadId );
3432 // Invoke user callback to get fresh output data UNLESS we are
3434 if ( handle->drainCounter == 0 ) {
3435 RtAudioCallback callback = (RtAudioCallback) info->callback;
3436 double streamTime = getStreamTime();
3437 RtAudioStreamStatus status = 0;
3438 if ( stream_.mode != INPUT && asioXRun == true ) {
3439 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3442 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3443 status |= RTAUDIO_INPUT_OVERFLOW;
3446 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3447 stream_.bufferSize, streamTime, status, info->userData );
3448 if ( cbReturnValue == 2 ) {
3449 stream_.state = STREAM_STOPPING;
3450 handle->drainCounter = 2;
3452 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3453 &stream_.callbackInfo, 0, &threadId );
3456 else if ( cbReturnValue == 1 ) {
3457 handle->drainCounter = 1;
3458 handle->internalDrain = true;
3462 unsigned int nChannels, bufferBytes, i, j;
3463 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3464 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3466 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3468 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3470 for ( i=0, j=0; i<nChannels; i++ ) {
3471 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3472 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3476 else if ( stream_.doConvertBuffer[0] ) {
3478 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3479 if ( stream_.doByteSwap[0] )
3480 byteSwapBuffer( stream_.deviceBuffer,
3481 stream_.bufferSize * stream_.nDeviceChannels[0],
3482 stream_.deviceFormat[0] );
3484 for ( i=0, j=0; i<nChannels; i++ ) {
3485 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3486 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3487 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3493 if ( stream_.doByteSwap[0] )
3494 byteSwapBuffer( stream_.userBuffer[0],
3495 stream_.bufferSize * stream_.nUserChannels[0],
3496 stream_.userFormat );
3498 for ( i=0, j=0; i<nChannels; i++ ) {
3499 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3500 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3501 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3507 // Don't bother draining input
3508 if ( handle->drainCounter ) {
3509 handle->drainCounter++;
3513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3515 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3517 if (stream_.doConvertBuffer[1]) {
3519 // Always interleave ASIO input data.
3520 for ( i=0, j=0; i<nChannels; i++ ) {
3521 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3522 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3523 handle->bufferInfos[i].buffers[bufferIndex],
3527 if ( stream_.doByteSwap[1] )
3528 byteSwapBuffer( stream_.deviceBuffer,
3529 stream_.bufferSize * stream_.nDeviceChannels[1],
3530 stream_.deviceFormat[1] );
3531 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3535 for ( i=0, j=0; i<nChannels; i++ ) {
3536 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3537 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3538 handle->bufferInfos[i].buffers[bufferIndex],
3543 if ( stream_.doByteSwap[1] )
3544 byteSwapBuffer( stream_.userBuffer[1],
3545 stream_.bufferSize * stream_.nUserChannels[1],
3546 stream_.userFormat );
3551 // The following call was suggested by Malte Clasen. While the API
3552 // documentation indicates it should not be required, some device
3553 // drivers apparently do not function correctly without it.
3556 RtApi::tickStreamTime();
3560 static void sampleRateChanged( ASIOSampleRate sRate )
3562 // The ASIO documentation says that this usually only happens during
3563 // external sync. Audio processing is not stopped by the driver,
3564 // actual sample rate might not have even changed, maybe only the
3565 // sample rate status of an AES/EBU or S/PDIF digital input at the
3568 RtApi *object = (RtApi *) asioCallbackInfo->object;
3570 object->stopStream();
3572 catch ( RtAudioError &exception ) {
3573 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3577 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3580 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3584 switch( selector ) {
3585 case kAsioSelectorSupported:
3586 if ( value == kAsioResetRequest
3587 || value == kAsioEngineVersion
3588 || value == kAsioResyncRequest
3589 || value == kAsioLatenciesChanged
3590 // The following three were added for ASIO 2.0, you don't
3591 // necessarily have to support them.
3592 || value == kAsioSupportsTimeInfo
3593 || value == kAsioSupportsTimeCode
3594 || value == kAsioSupportsInputMonitor)
3597 case kAsioResetRequest:
3598 // Defer the task and perform the reset of the driver during the
3599 // next "safe" situation. You cannot reset the driver right now,
3600 // as this code is called from the driver. Reset the driver is
3601 // done by completely destruct is. I.e. ASIOStop(),
3602 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3604 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3607 case kAsioResyncRequest:
3608 // This informs the application that the driver encountered some
3609 // non-fatal data loss. It is used for synchronization purposes
3610 // of different media. Added mainly to work around the Win16Mutex
3611 // problems in Windows 95/98 with the Windows Multimedia system,
3612 // which could lose data because the Mutex was held too long by
3613 // another thread. However a driver can issue it in other
3615 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3619 case kAsioLatenciesChanged:
3620 // This will inform the host application that the drivers were
3621 // latencies changed. Beware, it this does not mean that the
3622 // buffer sizes have changed! You might need to update internal
3624 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3627 case kAsioEngineVersion:
3628 // Return the supported ASIO version of the host application. If
3629 // a host application does not implement this selector, ASIO 1.0
3630 // is assumed by the driver.
3633 case kAsioSupportsTimeInfo:
3634 // Informs the driver whether the
3635 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3636 // For compatibility with ASIO 1.0 drivers the host application
3637 // should always support the "old" bufferSwitch method, too.
3640 case kAsioSupportsTimeCode:
3641 // Informs the driver whether application is interested in time
3642 // code info. If an application does not need to know about time
3643 // code, the driver has less work to do.
3650 static const char* getAsioErrorString( ASIOError result )
3658 static const Messages m[] =
3660 { ASE_NotPresent, "Hardware input or output is not present or available." },
3661 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3662 { ASE_InvalidParameter, "Invalid input parameter." },
3663 { ASE_InvalidMode, "Invalid mode." },
3664 { ASE_SPNotAdvancing, "Sample position not advancing." },
3665 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3666 { ASE_NoMemory, "Not enough memory to complete the request." }
3669 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3670 if ( m[i].value == result ) return m[i].message;
3672 return "Unknown error.";
3675 //******************** End of __WINDOWS_ASIO__ *********************//
3679 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3681 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3682 // - Introduces support for the Windows WASAPI API
3683 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3684 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3685 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3690 #include <audioclient.h>
3692 #include <mmdeviceapi.h>
3693 #include <functiondiscoverykeys_devpkey.h>
3695 //=============================================================================
3697 #define SAFE_RELEASE( objectPtr )\
3700 objectPtr->Release();\
3704 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3706 //-----------------------------------------------------------------------------
3708 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3709 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3710 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3711 // provide intermediate storage for read / write synchronization.
3725 // sets the length of the internal ring buffer
3726 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3729 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3731 bufferSize_ = bufferSize;
3736 // attempt to push a buffer into the ring buffer at the current "in" index
3737 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3739 if ( !buffer || // incoming buffer is NULL
3740 bufferSize == 0 || // incoming buffer has no data
3741 bufferSize > bufferSize_ ) // incoming buffer too large
3746 unsigned int relOutIndex = outIndex_;
3747 unsigned int inIndexEnd = inIndex_ + bufferSize;
3748 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3749 relOutIndex += bufferSize_;
3752 // "in" index can end on the "out" index but cannot begin at it
3753 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3754 return false; // not enough space between "in" index and "out" index
3757 // copy buffer from external to internal
3758 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3759 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3760 int fromInSize = bufferSize - fromZeroSize;
3765 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3766 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3768 case RTAUDIO_SINT16:
3769 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3770 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3772 case RTAUDIO_SINT24:
3773 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3774 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3776 case RTAUDIO_SINT32:
3777 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3778 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3780 case RTAUDIO_FLOAT32:
3781 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3782 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3784 case RTAUDIO_FLOAT64:
3785 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3786 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3790 // update "in" index
3791 inIndex_ += bufferSize;
3792 inIndex_ %= bufferSize_;
3797 // attempt to pull a buffer from the ring buffer from the current "out" index
3798 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3800 if ( !buffer || // incoming buffer is NULL
3801 bufferSize == 0 || // incoming buffer has no data
3802 bufferSize > bufferSize_ ) // incoming buffer too large
3807 unsigned int relInIndex = inIndex_;
3808 unsigned int outIndexEnd = outIndex_ + bufferSize;
3809 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3810 relInIndex += bufferSize_;
3813 // "out" index can begin at and end on the "in" index
3814 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3815 return false; // not enough space between "out" index and "in" index
3818 // copy buffer from internal to external
3819 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3820 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3821 int fromOutSize = bufferSize - fromZeroSize;
3826 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3827 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3829 case RTAUDIO_SINT16:
3830 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3831 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3833 case RTAUDIO_SINT24:
3834 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3835 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3837 case RTAUDIO_SINT32:
3838 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3839 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3841 case RTAUDIO_FLOAT32:
3842 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3843 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3845 case RTAUDIO_FLOAT64:
3846 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3847 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3851 // update "out" index
3852 outIndex_ += bufferSize;
3853 outIndex_ %= bufferSize_;
3860 unsigned int bufferSize_;
3861 unsigned int inIndex_;
3862 unsigned int outIndex_;
3865 //-----------------------------------------------------------------------------
3867 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3868 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3869 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3870 // This sample rate converter works best with conversions between one rate and its multiple.
3871 void convertBufferWasapi( char* outBuffer,
3872 const char* inBuffer,
3873 const unsigned int& channelCount,
3874 const unsigned int& inSampleRate,
3875 const unsigned int& outSampleRate,
3876 const unsigned int& inSampleCount,
3877 unsigned int& outSampleCount,
3878 const RtAudioFormat& format )
3880 // calculate the new outSampleCount and relative sampleStep
3881 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3882 float sampleRatioInv = ( float ) 1 / sampleRatio;
3883 float sampleStep = 1.0f / sampleRatio;
3884 float inSampleFraction = 0.0f;
3886 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
3888 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
3889 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
3891 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3892 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3894 unsigned int inSample = ( unsigned int ) inSampleFraction;
3899 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3901 case RTAUDIO_SINT16:
3902 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3904 case RTAUDIO_SINT24:
3905 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3907 case RTAUDIO_SINT32:
3908 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3910 case RTAUDIO_FLOAT32:
3911 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3913 case RTAUDIO_FLOAT64:
3914 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3918 // jump to next in sample
3919 inSampleFraction += sampleStep;
3922 else // else interpolate
3924 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3925 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3927 unsigned int inSample = ( unsigned int ) inSampleFraction;
3928 float inSampleDec = inSampleFraction - inSample;
3929 unsigned int frameInSample = inSample * channelCount;
3930 unsigned int frameOutSample = outSample * channelCount;
3936 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3938 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
3939 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
3940 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
3941 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3945 case RTAUDIO_SINT16:
3947 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3949 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
3950 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
3951 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
3952 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3956 case RTAUDIO_SINT24:
3958 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3960 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
3961 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
3962 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3963 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3967 case RTAUDIO_SINT32:
3969 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3971 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
3972 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
3973 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3974 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3978 case RTAUDIO_FLOAT32:
3980 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3982 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
3983 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
3984 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
3985 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3989 case RTAUDIO_FLOAT64:
3991 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3993 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
3994 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
3995 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
3996 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4002 // jump to next in sample
4003 inSampleFraction += sampleStep;
4008 //-----------------------------------------------------------------------------
4010 // A structure to hold various information related to the WASAPI implementation.
4013 IAudioClient* captureAudioClient;
4014 IAudioClient* renderAudioClient;
4015 IAudioCaptureClient* captureClient;
4016 IAudioRenderClient* renderClient;
4017 HANDLE captureEvent;
4021 : captureAudioClient( NULL ),
4022 renderAudioClient( NULL ),
4023 captureClient( NULL ),
4024 renderClient( NULL ),
4025 captureEvent( NULL ),
4026 renderEvent( NULL ) {}
4029 //=============================================================================
4031 RtApiWasapi::RtApiWasapi()
4032 : coInitialized_( false ), deviceEnumerator_( NULL )
4034 // WASAPI can run either apartment or multi-threaded
4035 HRESULT hr = CoInitialize( NULL );
4036 if ( !FAILED( hr ) )
4037 coInitialized_ = true;
4039 // Instantiate device enumerator
4040 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4041 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4042 ( void** ) &deviceEnumerator_ );
4044 if ( FAILED( hr ) ) {
4045 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4046 error( RtAudioError::DRIVER_ERROR );
4050 //-----------------------------------------------------------------------------
4052 RtApiWasapi::~RtApiWasapi()
4054 if ( stream_.state != STREAM_CLOSED )
4057 SAFE_RELEASE( deviceEnumerator_ );
4059 // If this object previously called CoInitialize()
4060 if ( coInitialized_ )
4064 //=============================================================================
4066 unsigned int RtApiWasapi::getDeviceCount( void )
4068 unsigned int captureDeviceCount = 0;
4069 unsigned int renderDeviceCount = 0;
4071 IMMDeviceCollection* captureDevices = NULL;
4072 IMMDeviceCollection* renderDevices = NULL;
4074 // Count capture devices
4076 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4077 if ( FAILED( hr ) ) {
4078 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4082 hr = captureDevices->GetCount( &captureDeviceCount );
4083 if ( FAILED( hr ) ) {
4084 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4088 // Count render devices
4089 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4090 if ( FAILED( hr ) ) {
4091 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4095 hr = renderDevices->GetCount( &renderDeviceCount );
4096 if ( FAILED( hr ) ) {
4097 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4102 // release all references
4103 SAFE_RELEASE( captureDevices );
4104 SAFE_RELEASE( renderDevices );
4106 if ( errorText_.empty() )
4107 return captureDeviceCount + renderDeviceCount;
4109 error( RtAudioError::DRIVER_ERROR );
4113 //-----------------------------------------------------------------------------
4115 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4117 RtAudio::DeviceInfo info;
4118 unsigned int captureDeviceCount = 0;
4119 unsigned int renderDeviceCount = 0;
4120 std::string defaultDeviceName;
4121 bool isCaptureDevice = false;
4123 PROPVARIANT deviceNameProp;
4124 PROPVARIANT defaultDeviceNameProp;
4126 IMMDeviceCollection* captureDevices = NULL;
4127 IMMDeviceCollection* renderDevices = NULL;
4128 IMMDevice* devicePtr = NULL;
4129 IMMDevice* defaultDevicePtr = NULL;
4130 IAudioClient* audioClient = NULL;
4131 IPropertyStore* devicePropStore = NULL;
4132 IPropertyStore* defaultDevicePropStore = NULL;
4134 WAVEFORMATEX* deviceFormat = NULL;
4135 WAVEFORMATEX* closestMatchFormat = NULL;
4138 info.probed = false;
4140 // Count capture devices
4142 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4143 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4144 if ( FAILED( hr ) ) {
4145 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4149 hr = captureDevices->GetCount( &captureDeviceCount );
4150 if ( FAILED( hr ) ) {
4151 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4155 // Count render devices
4156 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4157 if ( FAILED( hr ) ) {
4158 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4162 hr = renderDevices->GetCount( &renderDeviceCount );
4163 if ( FAILED( hr ) ) {
4164 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4168 // validate device index
4169 if ( device >= captureDeviceCount + renderDeviceCount ) {
4170 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4171 errorType = RtAudioError::INVALID_USE;
4175 // determine whether index falls within capture or render devices
4176 if ( device >= renderDeviceCount ) {
4177 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4178 if ( FAILED( hr ) ) {
4179 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4182 isCaptureDevice = true;
4185 hr = renderDevices->Item( device, &devicePtr );
4186 if ( FAILED( hr ) ) {
4187 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4190 isCaptureDevice = false;
4193 // get default device name
4194 if ( isCaptureDevice ) {
4195 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4196 if ( FAILED( hr ) ) {
4197 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4202 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4203 if ( FAILED( hr ) ) {
4204 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4209 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4210 if ( FAILED( hr ) ) {
4211 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4214 PropVariantInit( &defaultDeviceNameProp );
4216 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4217 if ( FAILED( hr ) ) {
4218 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4222 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4225 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4231 PropVariantInit( &deviceNameProp );
4233 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4234 if ( FAILED( hr ) ) {
4235 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4239 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4242 if ( isCaptureDevice ) {
4243 info.isDefaultInput = info.name == defaultDeviceName;
4244 info.isDefaultOutput = false;
4247 info.isDefaultInput = false;
4248 info.isDefaultOutput = info.name == defaultDeviceName;
4252 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4253 if ( FAILED( hr ) ) {
4254 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4258 hr = audioClient->GetMixFormat( &deviceFormat );
4259 if ( FAILED( hr ) ) {
4260 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4264 if ( isCaptureDevice ) {
4265 info.inputChannels = deviceFormat->nChannels;
4266 info.outputChannels = 0;
4267 info.duplexChannels = 0;
4270 info.inputChannels = 0;
4271 info.outputChannels = deviceFormat->nChannels;
4272 info.duplexChannels = 0;
4276 info.sampleRates.clear();
4278 // allow support for all sample rates as we have a built-in sample rate converter
4279 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4280 info.sampleRates.push_back( SAMPLE_RATES[i] );
4282 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4285 info.nativeFormats = 0;
4287 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4288 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4289 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4291 if ( deviceFormat->wBitsPerSample == 32 ) {
4292 info.nativeFormats |= RTAUDIO_FLOAT32;
4294 else if ( deviceFormat->wBitsPerSample == 64 ) {
4295 info.nativeFormats |= RTAUDIO_FLOAT64;
4298 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4299 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4300 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4302 if ( deviceFormat->wBitsPerSample == 8 ) {
4303 info.nativeFormats |= RTAUDIO_SINT8;
4305 else if ( deviceFormat->wBitsPerSample == 16 ) {
4306 info.nativeFormats |= RTAUDIO_SINT16;
4308 else if ( deviceFormat->wBitsPerSample == 24 ) {
4309 info.nativeFormats |= RTAUDIO_SINT24;
4311 else if ( deviceFormat->wBitsPerSample == 32 ) {
4312 info.nativeFormats |= RTAUDIO_SINT32;
4320 // release all references
4321 PropVariantClear( &deviceNameProp );
4322 PropVariantClear( &defaultDeviceNameProp );
4324 SAFE_RELEASE( captureDevices );
4325 SAFE_RELEASE( renderDevices );
4326 SAFE_RELEASE( devicePtr );
4327 SAFE_RELEASE( defaultDevicePtr );
4328 SAFE_RELEASE( audioClient );
4329 SAFE_RELEASE( devicePropStore );
4330 SAFE_RELEASE( defaultDevicePropStore );
4332 CoTaskMemFree( deviceFormat );
4333 CoTaskMemFree( closestMatchFormat );
4335 if ( !errorText_.empty() )
4340 //-----------------------------------------------------------------------------
4342 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4344 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4345 if ( getDeviceInfo( i ).isDefaultOutput ) {
4353 //-----------------------------------------------------------------------------
4355 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4357 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4358 if ( getDeviceInfo( i ).isDefaultInput ) {
4366 //-----------------------------------------------------------------------------
4368 void RtApiWasapi::closeStream( void )
4370 if ( stream_.state == STREAM_CLOSED ) {
4371 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4372 error( RtAudioError::WARNING );
4376 if ( stream_.state != STREAM_STOPPED )
4379 // clean up stream memory
4380 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4381 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4383 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4384 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4386 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4387 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4389 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4390 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4392 delete ( WasapiHandle* ) stream_.apiHandle;
4393 stream_.apiHandle = NULL;
4395 for ( int i = 0; i < 2; i++ ) {
4396 if ( stream_.userBuffer[i] ) {
4397 free( stream_.userBuffer[i] );
4398 stream_.userBuffer[i] = 0;
4402 if ( stream_.deviceBuffer ) {
4403 free( stream_.deviceBuffer );
4404 stream_.deviceBuffer = 0;
4407 // update stream state
4408 stream_.state = STREAM_CLOSED;
4411 //-----------------------------------------------------------------------------
4413 void RtApiWasapi::startStream( void )
4417 if ( stream_.state == STREAM_RUNNING ) {
4418 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4419 error( RtAudioError::WARNING );
4423 // update stream state
4424 stream_.state = STREAM_RUNNING;
4426 // create WASAPI stream thread
4427 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4429 if ( !stream_.callbackInfo.thread ) {
4430 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4431 error( RtAudioError::THREAD_ERROR );
4434 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4435 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4439 //-----------------------------------------------------------------------------
4441 void RtApiWasapi::stopStream( void )
4445 if ( stream_.state == STREAM_STOPPED ) {
4446 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4447 error( RtAudioError::WARNING );
4451 // inform stream thread by setting stream state to STREAM_STOPPING
4452 stream_.state = STREAM_STOPPING;
4454 // wait until stream thread is stopped
4455 while( stream_.state != STREAM_STOPPED ) {
4459 // Wait for the last buffer to play before stopping.
4460 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4462 // stop capture client if applicable
4463 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4464 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4465 if ( FAILED( hr ) ) {
4466 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4467 error( RtAudioError::DRIVER_ERROR );
4472 // stop render client if applicable
4473 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4474 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4475 if ( FAILED( hr ) ) {
4476 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4477 error( RtAudioError::DRIVER_ERROR );
4482 // close thread handle
4483 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4484 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4485 error( RtAudioError::THREAD_ERROR );
4489 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4492 //-----------------------------------------------------------------------------
4494 void RtApiWasapi::abortStream( void )
4498 if ( stream_.state == STREAM_STOPPED ) {
4499 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4500 error( RtAudioError::WARNING );
4504 // inform stream thread by setting stream state to STREAM_STOPPING
4505 stream_.state = STREAM_STOPPING;
4507 // wait until stream thread is stopped
4508 while ( stream_.state != STREAM_STOPPED ) {
4512 // stop capture client if applicable
4513 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4514 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4515 if ( FAILED( hr ) ) {
4516 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4517 error( RtAudioError::DRIVER_ERROR );
4522 // stop render client if applicable
4523 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4524 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4525 if ( FAILED( hr ) ) {
4526 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4527 error( RtAudioError::DRIVER_ERROR );
4532 // close thread handle
4533 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4534 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4535 error( RtAudioError::THREAD_ERROR );
4539 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4542 //-----------------------------------------------------------------------------
4544 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4545 unsigned int firstChannel, unsigned int sampleRate,
4546 RtAudioFormat format, unsigned int* bufferSize,
4547 RtAudio::StreamOptions* options )
4549 bool methodResult = FAILURE;
4550 unsigned int captureDeviceCount = 0;
4551 unsigned int renderDeviceCount = 0;
4553 IMMDeviceCollection* captureDevices = NULL;
4554 IMMDeviceCollection* renderDevices = NULL;
4555 IMMDevice* devicePtr = NULL;
4556 WAVEFORMATEX* deviceFormat = NULL;
4557 unsigned int bufferBytes;
4558 stream_.state = STREAM_STOPPED;
4560 // create API Handle if not already created
4561 if ( !stream_.apiHandle )
4562 stream_.apiHandle = ( void* ) new WasapiHandle();
4564 // Count capture devices
4566 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4567 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4568 if ( FAILED( hr ) ) {
4569 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4573 hr = captureDevices->GetCount( &captureDeviceCount );
4574 if ( FAILED( hr ) ) {
4575 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4579 // Count render devices
4580 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4581 if ( FAILED( hr ) ) {
4582 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4586 hr = renderDevices->GetCount( &renderDeviceCount );
4587 if ( FAILED( hr ) ) {
4588 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4592 // validate device index
4593 if ( device >= captureDeviceCount + renderDeviceCount ) {
4594 errorType = RtAudioError::INVALID_USE;
4595 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4599 // determine whether index falls within capture or render devices
4600 if ( device >= renderDeviceCount ) {
4601 if ( mode != INPUT ) {
4602 errorType = RtAudioError::INVALID_USE;
4603 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4607 // retrieve captureAudioClient from devicePtr
4608 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4610 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4611 if ( FAILED( hr ) ) {
4612 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4616 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4617 NULL, ( void** ) &captureAudioClient );
4618 if ( FAILED( hr ) ) {
4619 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4623 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4624 if ( FAILED( hr ) ) {
4625 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4629 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4630 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4633 if ( mode != OUTPUT ) {
4634 errorType = RtAudioError::INVALID_USE;
4635 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4639 // retrieve renderAudioClient from devicePtr
4640 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4642 hr = renderDevices->Item( device, &devicePtr );
4643 if ( FAILED( hr ) ) {
4644 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4648 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4649 NULL, ( void** ) &renderAudioClient );
4650 if ( FAILED( hr ) ) {
4651 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4655 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4656 if ( FAILED( hr ) ) {
4657 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4661 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4662 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4666 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4667 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4668 stream_.mode = DUPLEX;
4671 stream_.mode = mode;
4674 stream_.device[mode] = device;
4675 stream_.doByteSwap[mode] = false;
4676 stream_.sampleRate = sampleRate;
4677 stream_.bufferSize = *bufferSize;
4678 stream_.nBuffers = 1;
4679 stream_.nUserChannels[mode] = channels;
4680 stream_.channelOffset[mode] = firstChannel;
4681 stream_.userFormat = format;
4682 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4684 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4685 stream_.userInterleaved = false;
4687 stream_.userInterleaved = true;
4688 stream_.deviceInterleaved[mode] = true;
4690 // Set flags for buffer conversion.
4691 stream_.doConvertBuffer[mode] = false;
4692 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4693 stream_.nUserChannels != stream_.nDeviceChannels )
4694 stream_.doConvertBuffer[mode] = true;
4695 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4696 stream_.nUserChannels[mode] > 1 )
4697 stream_.doConvertBuffer[mode] = true;
4699 if ( stream_.doConvertBuffer[mode] )
4700 setConvertInfo( mode, 0 );
4702 // Allocate necessary internal buffers
4703 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4705 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4706 if ( !stream_.userBuffer[mode] ) {
4707 errorType = RtAudioError::MEMORY_ERROR;
4708 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4712 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4713 stream_.callbackInfo.priority = 15;
4715 stream_.callbackInfo.priority = 0;
4717 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4718 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4720 methodResult = SUCCESS;
4724 SAFE_RELEASE( captureDevices );
4725 SAFE_RELEASE( renderDevices );
4726 SAFE_RELEASE( devicePtr );
4727 CoTaskMemFree( deviceFormat );
4729 // if method failed, close the stream
4730 if ( methodResult == FAILURE )
4733 if ( !errorText_.empty() )
4735 return methodResult;
4738 //=============================================================================
4740 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4743 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4748 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4751 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4756 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4759 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4764 //-----------------------------------------------------------------------------
4766 void RtApiWasapi::wasapiThread()
4768 // as this is a new thread, we must CoInitialize it
4769 CoInitialize( NULL );
4773 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4774 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4775 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4776 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4777 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4778 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4780 WAVEFORMATEX* captureFormat = NULL;
4781 WAVEFORMATEX* renderFormat = NULL;
4782 float captureSrRatio = 0.0f;
4783 float renderSrRatio = 0.0f;
4784 WasapiBuffer captureBuffer;
4785 WasapiBuffer renderBuffer;
4787 // declare local stream variables
4788 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4789 BYTE* streamBuffer = NULL;
4790 unsigned long captureFlags = 0;
4791 unsigned int bufferFrameCount = 0;
4792 unsigned int numFramesPadding = 0;
4793 unsigned int convBufferSize = 0;
4794 bool callbackPushed = false;
4795 bool callbackPulled = false;
4796 bool callbackStopped = false;
4797 int callbackResult = 0;
4799 // convBuffer is used to store converted buffers between WASAPI and the user
4800 char* convBuffer = NULL;
4801 unsigned int convBuffSize = 0;
4802 unsigned int deviceBuffSize = 0;
4805 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4807 // Attempt to assign "Pro Audio" characteristic to thread
4808 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4810 DWORD taskIndex = 0;
4811 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4812 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4813 FreeLibrary( AvrtDll );
4816 // start capture stream if applicable
4817 if ( captureAudioClient ) {
4818 hr = captureAudioClient->GetMixFormat( &captureFormat );
4819 if ( FAILED( hr ) ) {
4820 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4824 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4826 // initialize capture stream according to desire buffer size
4827 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4828 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4830 if ( !captureClient ) {
4831 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4832 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4833 desiredBufferPeriod,
4834 desiredBufferPeriod,
4837 if ( FAILED( hr ) ) {
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4842 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4843 ( void** ) &captureClient );
4844 if ( FAILED( hr ) ) {
4845 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4849 // configure captureEvent to trigger on every available capture buffer
4850 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4851 if ( !captureEvent ) {
4852 errorType = RtAudioError::SYSTEM_ERROR;
4853 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4857 hr = captureAudioClient->SetEventHandle( captureEvent );
4858 if ( FAILED( hr ) ) {
4859 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4863 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4864 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4867 unsigned int inBufferSize = 0;
4868 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4869 if ( FAILED( hr ) ) {
4870 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4874 // scale outBufferSize according to stream->user sample rate ratio
4875 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4876 inBufferSize *= stream_.nDeviceChannels[INPUT];
4878 // set captureBuffer size
4879 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4881 // reset the capture stream
4882 hr = captureAudioClient->Reset();
4883 if ( FAILED( hr ) ) {
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4888 // start the capture stream
4889 hr = captureAudioClient->Start();
4890 if ( FAILED( hr ) ) {
4891 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4896 // start render stream if applicable
4897 if ( renderAudioClient ) {
4898 hr = renderAudioClient->GetMixFormat( &renderFormat );
4899 if ( FAILED( hr ) ) {
4900 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4904 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4906 // initialize render stream according to desire buffer size
4907 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4908 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4910 if ( !renderClient ) {
4911 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4912 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4913 desiredBufferPeriod,
4914 desiredBufferPeriod,
4917 if ( FAILED( hr ) ) {
4918 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4922 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4923 ( void** ) &renderClient );
4924 if ( FAILED( hr ) ) {
4925 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4929 // configure renderEvent to trigger on every available render buffer
4930 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4931 if ( !renderEvent ) {
4932 errorType = RtAudioError::SYSTEM_ERROR;
4933 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4937 hr = renderAudioClient->SetEventHandle( renderEvent );
4938 if ( FAILED( hr ) ) {
4939 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4943 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4944 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4947 unsigned int outBufferSize = 0;
4948 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4949 if ( FAILED( hr ) ) {
4950 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4954 // scale inBufferSize according to user->stream sample rate ratio
4955 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4956 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4958 // set renderBuffer size
4959 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4961 // reset the render stream
4962 hr = renderAudioClient->Reset();
4963 if ( FAILED( hr ) ) {
4964 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4968 // start the render stream
4969 hr = renderAudioClient->Start();
4970 if ( FAILED( hr ) ) {
4971 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4976 if ( stream_.mode == INPUT ) {
4977 using namespace std; // for roundf
4978 convBuffSize = ( size_t ) roundf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4979 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4981 else if ( stream_.mode == OUTPUT ) {
4982 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4983 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4985 else if ( stream_.mode == DUPLEX ) {
4986 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4987 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4988 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4989 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4992 convBuffer = ( char* ) malloc( convBuffSize );
4993 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4994 if ( !convBuffer || !stream_.deviceBuffer ) {
4995 errorType = RtAudioError::MEMORY_ERROR;
4996 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5000 // stream process loop
5001 while ( stream_.state != STREAM_STOPPING ) {
5002 if ( !callbackPulled ) {
5005 // 1. Pull callback buffer from inputBuffer
5006 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5007 // Convert callback buffer to user format
5009 if ( captureAudioClient ) {
5010 // Pull callback buffer from inputBuffer
5011 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5012 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
5013 stream_.deviceFormat[INPUT] );
5015 if ( callbackPulled ) {
5016 // Convert callback buffer to user sample rate
5017 convertBufferWasapi( stream_.deviceBuffer,
5019 stream_.nDeviceChannels[INPUT],
5020 captureFormat->nSamplesPerSec,
5022 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
5024 stream_.deviceFormat[INPUT] );
5026 if ( stream_.doConvertBuffer[INPUT] ) {
5027 // Convert callback buffer to user format
5028 convertBuffer( stream_.userBuffer[INPUT],
5029 stream_.deviceBuffer,
5030 stream_.convertInfo[INPUT] );
5033 // no further conversion, simple copy deviceBuffer to userBuffer
5034 memcpy( stream_.userBuffer[INPUT],
5035 stream_.deviceBuffer,
5036 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5041 // if there is no capture stream, set callbackPulled flag
5042 callbackPulled = true;
5047 // 1. Execute user callback method
5048 // 2. Handle return value from callback
5050 // if callback has not requested the stream to stop
5051 if ( callbackPulled && !callbackStopped ) {
5052 // Execute user callback method
5053 callbackResult = callback( stream_.userBuffer[OUTPUT],
5054 stream_.userBuffer[INPUT],
5057 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5058 stream_.callbackInfo.userData );
5060 // Handle return value from callback
5061 if ( callbackResult == 1 ) {
5062 // instantiate a thread to stop this thread
5063 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5064 if ( !threadHandle ) {
5065 errorType = RtAudioError::THREAD_ERROR;
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5069 else if ( !CloseHandle( threadHandle ) ) {
5070 errorType = RtAudioError::THREAD_ERROR;
5071 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5075 callbackStopped = true;
5077 else if ( callbackResult == 2 ) {
5078 // instantiate a thread to stop this thread
5079 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5080 if ( !threadHandle ) {
5081 errorType = RtAudioError::THREAD_ERROR;
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5085 else if ( !CloseHandle( threadHandle ) ) {
5086 errorType = RtAudioError::THREAD_ERROR;
5087 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5091 callbackStopped = true;
5098 // 1. Convert callback buffer to stream format
5099 // 2. Convert callback buffer to stream sample rate and channel count
5100 // 3. Push callback buffer into outputBuffer
5102 if ( renderAudioClient && callbackPulled ) {
5103 if ( stream_.doConvertBuffer[OUTPUT] ) {
5104 // Convert callback buffer to stream format
5105 convertBuffer( stream_.deviceBuffer,
5106 stream_.userBuffer[OUTPUT],
5107 stream_.convertInfo[OUTPUT] );
5111 // Convert callback buffer to stream sample rate
5112 convertBufferWasapi( convBuffer,
5113 stream_.deviceBuffer,
5114 stream_.nDeviceChannels[OUTPUT],
5116 renderFormat->nSamplesPerSec,
5119 stream_.deviceFormat[OUTPUT] );
5121 // Push callback buffer into outputBuffer
5122 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5123 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5124 stream_.deviceFormat[OUTPUT] );
5127 // if there is no render stream, set callbackPushed flag
5128 callbackPushed = true;
5133 // 1. Get capture buffer from stream
5134 // 2. Push capture buffer into inputBuffer
5135 // 3. If 2. was successful: Release capture buffer
5137 if ( captureAudioClient ) {
5138 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5139 if ( !callbackPulled ) {
5140 WaitForSingleObject( captureEvent, INFINITE );
5143 // Get capture buffer from stream
5144 hr = captureClient->GetBuffer( &streamBuffer,
5146 &captureFlags, NULL, NULL );
5147 if ( FAILED( hr ) ) {
5148 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5152 if ( bufferFrameCount != 0 ) {
5153 // Push capture buffer into inputBuffer
5154 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5155 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5156 stream_.deviceFormat[INPUT] ) )
5158 // Release capture buffer
5159 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5160 if ( FAILED( hr ) ) {
5161 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5167 // Inform WASAPI that capture was unsuccessful
5168 hr = captureClient->ReleaseBuffer( 0 );
5169 if ( FAILED( hr ) ) {
5170 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5177 // Inform WASAPI that capture was unsuccessful
5178 hr = captureClient->ReleaseBuffer( 0 );
5179 if ( FAILED( hr ) ) {
5180 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5188 // 1. Get render buffer from stream
5189 // 2. Pull next buffer from outputBuffer
5190 // 3. If 2. was successful: Fill render buffer with next buffer
5191 // Release render buffer
5193 if ( renderAudioClient ) {
5194 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5195 if ( callbackPulled && !callbackPushed ) {
5196 WaitForSingleObject( renderEvent, INFINITE );
5199 // Get render buffer from stream
5200 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5201 if ( FAILED( hr ) ) {
5202 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5206 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5207 if ( FAILED( hr ) ) {
5208 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5212 bufferFrameCount -= numFramesPadding;
5214 if ( bufferFrameCount != 0 ) {
5215 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5216 if ( FAILED( hr ) ) {
5217 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5221 // Pull next buffer from outputBuffer
5222 // Fill render buffer with next buffer
5223 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5224 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5225 stream_.deviceFormat[OUTPUT] ) )
5227 // Release render buffer
5228 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5229 if ( FAILED( hr ) ) {
5230 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5236 // Inform WASAPI that render was unsuccessful
5237 hr = renderClient->ReleaseBuffer( 0, 0 );
5238 if ( FAILED( hr ) ) {
5239 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5246 // Inform WASAPI that render was unsuccessful
5247 hr = renderClient->ReleaseBuffer( 0, 0 );
5248 if ( FAILED( hr ) ) {
5249 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5255 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5256 if ( callbackPushed ) {
5257 callbackPulled = false;
5259 RtApi::tickStreamTime();
5266 CoTaskMemFree( captureFormat );
5267 CoTaskMemFree( renderFormat );
5269 free ( convBuffer );
5273 // update stream state
5274 stream_.state = STREAM_STOPPED;
5276 if ( errorText_.empty() )
5282 //******************** End of __WINDOWS_WASAPI__ *********************//
5286 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5288 // Modified by Robin Davies, October 2005
5289 // - Improvements to DirectX pointer chasing.
5290 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5291 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5292 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5293 // Changed device query structure for RtAudio 4.0.7, January 2010
5295 #include <mmsystem.h>
5299 #include <algorithm>
5301 #if defined(__MINGW32__)
5302 // missing from latest mingw winapi
5303 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5304 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5305 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5306 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5309 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5311 #ifdef _MSC_VER // if Microsoft Visual C++
5312 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5315 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5317 if ( pointer > bufferSize ) pointer -= bufferSize;
5318 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5319 if ( pointer < earlierPointer ) pointer += bufferSize;
5320 return pointer >= earlierPointer && pointer < laterPointer;
5323 // A structure to hold various information related to the DirectSound
5324 // API implementation.
5326 unsigned int drainCounter; // Tracks callback counts when draining
5327 bool internalDrain; // Indicates if stop is initiated from callback or not.
5331 UINT bufferPointer[2];
5332 DWORD dsBufferSize[2];
5333 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5337 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5340 // Declarations for utility functions, callbacks, and structures
5341 // specific to the DirectSound implementation.
5342 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5343 LPCTSTR description,
5347 static const char* getErrorString( int code );
5349 static unsigned __stdcall callbackHandler( void *ptr );
5358 : found(false) { validId[0] = false; validId[1] = false; }
5361 struct DsProbeData {
5363 std::vector<struct DsDevice>* dsDevices;
5366 RtApiDs :: RtApiDs()
5368 // Dsound will run both-threaded. If CoInitialize fails, then just
5369 // accept whatever the mainline chose for a threading model.
5370 coInitialized_ = false;
5371 HRESULT hr = CoInitialize( NULL );
5372 if ( !FAILED( hr ) ) coInitialized_ = true;
5375 RtApiDs :: ~RtApiDs()
5377 if ( stream_.state != STREAM_CLOSED ) closeStream();
5378 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5381 // The DirectSound default output is always the first device.
5382 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5387 // The DirectSound default input is always the first input device,
5388 // which is the first capture device enumerated.
5389 unsigned int RtApiDs :: getDefaultInputDevice( void )
5394 unsigned int RtApiDs :: getDeviceCount( void )
5396 // Set query flag for previously found devices to false, so that we
5397 // can check for any devices that have disappeared.
5398 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5399 dsDevices[i].found = false;
5401 // Query DirectSound devices.
5402 struct DsProbeData probeInfo;
5403 probeInfo.isInput = false;
5404 probeInfo.dsDevices = &dsDevices;
5405 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5406 if ( FAILED( result ) ) {
5407 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5408 errorText_ = errorStream_.str();
5409 error( RtAudioError::WARNING );
5412 // Query DirectSoundCapture devices.
5413 probeInfo.isInput = true;
5414 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5415 if ( FAILED( result ) ) {
5416 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5417 errorText_ = errorStream_.str();
5418 error( RtAudioError::WARNING );
5421 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5422 for ( unsigned int i=0; i<dsDevices.size(); ) {
5423 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5427 return static_cast<unsigned int>(dsDevices.size());
5430 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5432 RtAudio::DeviceInfo info;
5433 info.probed = false;
5435 if ( dsDevices.size() == 0 ) {
5436 // Force a query of all devices
5438 if ( dsDevices.size() == 0 ) {
5439 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5440 error( RtAudioError::INVALID_USE );
5445 if ( device >= dsDevices.size() ) {
5446 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5447 error( RtAudioError::INVALID_USE );
5452 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5454 LPDIRECTSOUND output;
5456 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5457 if ( FAILED( result ) ) {
5458 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5459 errorText_ = errorStream_.str();
5460 error( RtAudioError::WARNING );
5464 outCaps.dwSize = sizeof( outCaps );
5465 result = output->GetCaps( &outCaps );
5466 if ( FAILED( result ) ) {
5468 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5469 errorText_ = errorStream_.str();
5470 error( RtAudioError::WARNING );
5474 // Get output channel information.
5475 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5477 // Get sample rate information.
5478 info.sampleRates.clear();
5479 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5480 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5481 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5482 info.sampleRates.push_back( SAMPLE_RATES[k] );
5484 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5485 info.preferredSampleRate = SAMPLE_RATES[k];
5489 // Get format information.
5490 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5491 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5495 if ( getDefaultOutputDevice() == device )
5496 info.isDefaultOutput = true;
5498 if ( dsDevices[ device ].validId[1] == false ) {
5499 info.name = dsDevices[ device ].name;
5506 LPDIRECTSOUNDCAPTURE input;
5507 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5508 if ( FAILED( result ) ) {
5509 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5510 errorText_ = errorStream_.str();
5511 error( RtAudioError::WARNING );
5516 inCaps.dwSize = sizeof( inCaps );
5517 result = input->GetCaps( &inCaps );
5518 if ( FAILED( result ) ) {
5520 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5521 errorText_ = errorStream_.str();
5522 error( RtAudioError::WARNING );
5526 // Get input channel information.
5527 info.inputChannels = inCaps.dwChannels;
5529 // Get sample rate and format information.
5530 std::vector<unsigned int> rates;
5531 if ( inCaps.dwChannels >= 2 ) {
5532 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5533 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5534 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5535 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5536 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5537 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5538 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5539 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5541 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5542 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5543 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5544 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5545 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5547 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5548 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5549 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5550 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5551 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5554 else if ( inCaps.dwChannels == 1 ) {
5555 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5556 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5557 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5558 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5559 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5560 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5561 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5562 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5564 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5565 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5566 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5567 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5568 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5570 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5571 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5572 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5573 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5574 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5577 else info.inputChannels = 0; // technically, this would be an error
5581 if ( info.inputChannels == 0 ) return info;
5583 // Copy the supported rates to the info structure but avoid duplication.
5585 for ( unsigned int i=0; i<rates.size(); i++ ) {
5587 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5588 if ( rates[i] == info.sampleRates[j] ) {
5593 if ( found == false ) info.sampleRates.push_back( rates[i] );
5595 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5597 // If device opens for both playback and capture, we determine the channels.
5598 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5599 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5601 if ( device == 0 ) info.isDefaultInput = true;
5603 // Copy name and return.
5604 info.name = dsDevices[ device ].name;
5609 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5610 unsigned int firstChannel, unsigned int sampleRate,
5611 RtAudioFormat format, unsigned int *bufferSize,
5612 RtAudio::StreamOptions *options )
5614 if ( channels + firstChannel > 2 ) {
5615 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5619 size_t nDevices = dsDevices.size();
5620 if ( nDevices == 0 ) {
5621 // This should not happen because a check is made before this function is called.
5622 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5626 if ( device >= nDevices ) {
5627 // This should not happen because a check is made before this function is called.
5628 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5632 if ( mode == OUTPUT ) {
5633 if ( dsDevices[ device ].validId[0] == false ) {
5634 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5635 errorText_ = errorStream_.str();
5639 else { // mode == INPUT
5640 if ( dsDevices[ device ].validId[1] == false ) {
5641 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5642 errorText_ = errorStream_.str();
5647 // According to a note in PortAudio, using GetDesktopWindow()
5648 // instead of GetForegroundWindow() is supposed to avoid problems
5649 // that occur when the application's window is not the foreground
5650 // window. Also, if the application window closes before the
5651 // DirectSound buffer, DirectSound can crash. In the past, I had
5652 // problems when using GetDesktopWindow() but it seems fine now
5653 // (January 2010). I'll leave it commented here.
5654 // HWND hWnd = GetForegroundWindow();
5655 HWND hWnd = GetDesktopWindow();
5657 // Check the numberOfBuffers parameter and limit the lowest value to
5658 // two. This is a judgement call and a value of two is probably too
5659 // low for capture, but it should work for playback.
5661 if ( options ) nBuffers = options->numberOfBuffers;
5662 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5663 if ( nBuffers < 2 ) nBuffers = 3;
5665 // Check the lower range of the user-specified buffer size and set
5666 // (arbitrarily) to a lower bound of 32.
5667 if ( *bufferSize < 32 ) *bufferSize = 32;
5669 // Create the wave format structure. The data format setting will
5670 // be determined later.
5671 WAVEFORMATEX waveFormat;
5672 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5673 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5674 waveFormat.nChannels = channels + firstChannel;
5675 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5677 // Determine the device buffer size. By default, we'll use the value
5678 // defined above (32K), but we will grow it to make allowances for
5679 // very large software buffer sizes.
5680 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5681 DWORD dsPointerLeadTime = 0;
5683 void *ohandle = 0, *bhandle = 0;
5685 if ( mode == OUTPUT ) {
5687 LPDIRECTSOUND output;
5688 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5689 if ( FAILED( result ) ) {
5690 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5691 errorText_ = errorStream_.str();
5696 outCaps.dwSize = sizeof( outCaps );
5697 result = output->GetCaps( &outCaps );
5698 if ( FAILED( result ) ) {
5700 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5701 errorText_ = errorStream_.str();
5705 // Check channel information.
5706 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5707 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5708 errorText_ = errorStream_.str();
5712 // Check format information. Use 16-bit format unless not
5713 // supported or user requests 8-bit.
5714 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5715 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5716 waveFormat.wBitsPerSample = 16;
5717 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5720 waveFormat.wBitsPerSample = 8;
5721 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5723 stream_.userFormat = format;
5725 // Update wave format structure and buffer information.
5726 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5727 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5728 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5730 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5731 while ( dsPointerLeadTime * 2U > dsBufferSize )
5734 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5735 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5736 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5737 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5738 if ( FAILED( result ) ) {
5740 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5741 errorText_ = errorStream_.str();
5745 // Even though we will write to the secondary buffer, we need to
5746 // access the primary buffer to set the correct output format
5747 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5748 // buffer description.
5749 DSBUFFERDESC bufferDescription;
5750 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5751 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5752 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5754 // Obtain the primary buffer
5755 LPDIRECTSOUNDBUFFER buffer;
5756 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5757 if ( FAILED( result ) ) {
5759 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5760 errorText_ = errorStream_.str();
5764 // Set the primary DS buffer sound format.
5765 result = buffer->SetFormat( &waveFormat );
5766 if ( FAILED( result ) ) {
5768 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5769 errorText_ = errorStream_.str();
5773 // Setup the secondary DS buffer description.
5774 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5775 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5776 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5777 DSBCAPS_GLOBALFOCUS |
5778 DSBCAPS_GETCURRENTPOSITION2 |
5779 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5780 bufferDescription.dwBufferBytes = dsBufferSize;
5781 bufferDescription.lpwfxFormat = &waveFormat;
5783 // Try to create the secondary DS buffer. If that doesn't work,
5784 // try to use software mixing. Otherwise, there's a problem.
5785 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5786 if ( FAILED( result ) ) {
5787 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5788 DSBCAPS_GLOBALFOCUS |
5789 DSBCAPS_GETCURRENTPOSITION2 |
5790 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5791 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5792 if ( FAILED( result ) ) {
5794 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5795 errorText_ = errorStream_.str();
5800 // Get the buffer size ... might be different from what we specified.
5802 dsbcaps.dwSize = sizeof( DSBCAPS );
5803 result = buffer->GetCaps( &dsbcaps );
5804 if ( FAILED( result ) ) {
5807 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5808 errorText_ = errorStream_.str();
5812 dsBufferSize = dsbcaps.dwBufferBytes;
5814 // Lock the DS buffer
5817 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5818 if ( FAILED( result ) ) {
5821 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5822 errorText_ = errorStream_.str();
5826 // Zero the DS buffer
5827 ZeroMemory( audioPtr, dataLen );
5829 // Unlock the DS buffer
5830 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5831 if ( FAILED( result ) ) {
5834 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5835 errorText_ = errorStream_.str();
5839 ohandle = (void *) output;
5840 bhandle = (void *) buffer;
5843 if ( mode == INPUT ) {
5845 LPDIRECTSOUNDCAPTURE input;
5846 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5847 if ( FAILED( result ) ) {
5848 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5849 errorText_ = errorStream_.str();
5854 inCaps.dwSize = sizeof( inCaps );
5855 result = input->GetCaps( &inCaps );
5856 if ( FAILED( result ) ) {
5858 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5859 errorText_ = errorStream_.str();
5863 // Check channel information.
5864 if ( inCaps.dwChannels < channels + firstChannel ) {
5865 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5869 // Check format information. Use 16-bit format unless user
5871 DWORD deviceFormats;
5872 if ( channels + firstChannel == 2 ) {
5873 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5874 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5875 waveFormat.wBitsPerSample = 8;
5876 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5878 else { // assume 16-bit is supported
5879 waveFormat.wBitsPerSample = 16;
5880 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5883 else { // channel == 1
5884 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5885 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5886 waveFormat.wBitsPerSample = 8;
5887 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5889 else { // assume 16-bit is supported
5890 waveFormat.wBitsPerSample = 16;
5891 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5894 stream_.userFormat = format;
5896 // Update wave format structure and buffer information.
5897 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5898 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5899 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5901 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5902 while ( dsPointerLeadTime * 2U > dsBufferSize )
5905 // Setup the secondary DS buffer description.
5906 DSCBUFFERDESC bufferDescription;
5907 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5908 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5909 bufferDescription.dwFlags = 0;
5910 bufferDescription.dwReserved = 0;
5911 bufferDescription.dwBufferBytes = dsBufferSize;
5912 bufferDescription.lpwfxFormat = &waveFormat;
5914 // Create the capture buffer.
5915 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5916 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5917 if ( FAILED( result ) ) {
5919 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5920 errorText_ = errorStream_.str();
5924 // Get the buffer size ... might be different from what we specified.
5926 dscbcaps.dwSize = sizeof( DSCBCAPS );
5927 result = buffer->GetCaps( &dscbcaps );
5928 if ( FAILED( result ) ) {
5931 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5932 errorText_ = errorStream_.str();
5936 dsBufferSize = dscbcaps.dwBufferBytes;
5938 // NOTE: We could have a problem here if this is a duplex stream
5939 // and the play and capture hardware buffer sizes are different
5940 // (I'm actually not sure if that is a problem or not).
5941 // Currently, we are not verifying that.
5943 // Lock the capture buffer
5946 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5947 if ( FAILED( result ) ) {
5950 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5951 errorText_ = errorStream_.str();
5956 ZeroMemory( audioPtr, dataLen );
5958 // Unlock the buffer
5959 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5960 if ( FAILED( result ) ) {
5963 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5964 errorText_ = errorStream_.str();
5968 ohandle = (void *) input;
5969 bhandle = (void *) buffer;
5972 // Set various stream parameters
5973 DsHandle *handle = 0;
5974 stream_.nDeviceChannels[mode] = channels + firstChannel;
5975 stream_.nUserChannels[mode] = channels;
5976 stream_.bufferSize = *bufferSize;
5977 stream_.channelOffset[mode] = firstChannel;
5978 stream_.deviceInterleaved[mode] = true;
5979 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5980 else stream_.userInterleaved = true;
5982 // Set flag for buffer conversion
5983 stream_.doConvertBuffer[mode] = false;
5984 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5985 stream_.doConvertBuffer[mode] = true;
5986 if (stream_.userFormat != stream_.deviceFormat[mode])
5987 stream_.doConvertBuffer[mode] = true;
5988 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5989 stream_.nUserChannels[mode] > 1 )
5990 stream_.doConvertBuffer[mode] = true;
5992 // Allocate necessary internal buffers
5993 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5994 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5995 if ( stream_.userBuffer[mode] == NULL ) {
5996 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6000 if ( stream_.doConvertBuffer[mode] ) {
6002 bool makeBuffer = true;
6003 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6004 if ( mode == INPUT ) {
6005 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6006 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6007 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6012 bufferBytes *= *bufferSize;
6013 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6014 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6015 if ( stream_.deviceBuffer == NULL ) {
6016 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6022 // Allocate our DsHandle structures for the stream.
6023 if ( stream_.apiHandle == 0 ) {
6025 handle = new DsHandle;
6027 catch ( std::bad_alloc& ) {
6028 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6032 // Create a manual-reset event.
6033 handle->condition = CreateEvent( NULL, // no security
6034 TRUE, // manual-reset
6035 FALSE, // non-signaled initially
6037 stream_.apiHandle = (void *) handle;
6040 handle = (DsHandle *) stream_.apiHandle;
6041 handle->id[mode] = ohandle;
6042 handle->buffer[mode] = bhandle;
6043 handle->dsBufferSize[mode] = dsBufferSize;
6044 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6046 stream_.device[mode] = device;
6047 stream_.state = STREAM_STOPPED;
6048 if ( stream_.mode == OUTPUT && mode == INPUT )
6049 // We had already set up an output stream.
6050 stream_.mode = DUPLEX;
6052 stream_.mode = mode;
6053 stream_.nBuffers = nBuffers;
6054 stream_.sampleRate = sampleRate;
6056 // Setup the buffer conversion information structure.
6057 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6059 // Setup the callback thread.
6060 if ( stream_.callbackInfo.isRunning == false ) {
6062 stream_.callbackInfo.isRunning = true;
6063 stream_.callbackInfo.object = (void *) this;
6064 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6065 &stream_.callbackInfo, 0, &threadId );
6066 if ( stream_.callbackInfo.thread == 0 ) {
6067 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6071 // Boost DS thread priority
6072 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6078 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6079 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6080 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6081 if ( buffer ) buffer->Release();
6084 if ( handle->buffer[1] ) {
6085 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6086 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6087 if ( buffer ) buffer->Release();
6090 CloseHandle( handle->condition );
6092 stream_.apiHandle = 0;
6095 for ( int i=0; i<2; i++ ) {
6096 if ( stream_.userBuffer[i] ) {
6097 free( stream_.userBuffer[i] );
6098 stream_.userBuffer[i] = 0;
6102 if ( stream_.deviceBuffer ) {
6103 free( stream_.deviceBuffer );
6104 stream_.deviceBuffer = 0;
6107 stream_.state = STREAM_CLOSED;
6111 void RtApiDs :: closeStream()
6113 if ( stream_.state == STREAM_CLOSED ) {
6114 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6115 error( RtAudioError::WARNING );
6119 // Stop the callback thread.
6120 stream_.callbackInfo.isRunning = false;
6121 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6122 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6124 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6126 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6127 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6128 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6135 if ( handle->buffer[1] ) {
6136 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6137 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6144 CloseHandle( handle->condition );
6146 stream_.apiHandle = 0;
6149 for ( int i=0; i<2; i++ ) {
6150 if ( stream_.userBuffer[i] ) {
6151 free( stream_.userBuffer[i] );
6152 stream_.userBuffer[i] = 0;
6156 if ( stream_.deviceBuffer ) {
6157 free( stream_.deviceBuffer );
6158 stream_.deviceBuffer = 0;
6161 stream_.mode = UNINITIALIZED;
6162 stream_.state = STREAM_CLOSED;
6165 void RtApiDs :: startStream()
6168 if ( stream_.state == STREAM_RUNNING ) {
6169 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6170 error( RtAudioError::WARNING );
6174 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6176 // Increase scheduler frequency on lesser windows (a side-effect of
6177 // increasing timer accuracy). On greater windows (Win2K or later),
6178 // this is already in effect.
6179 timeBeginPeriod( 1 );
6181 buffersRolling = false;
6182 duplexPrerollBytes = 0;
6184 if ( stream_.mode == DUPLEX ) {
6185 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6186 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6190 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6192 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6193 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6194 if ( FAILED( result ) ) {
6195 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6196 errorText_ = errorStream_.str();
6201 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6203 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6204 result = buffer->Start( DSCBSTART_LOOPING );
6205 if ( FAILED( result ) ) {
6206 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6207 errorText_ = errorStream_.str();
6212 handle->drainCounter = 0;
6213 handle->internalDrain = false;
6214 ResetEvent( handle->condition );
6215 stream_.state = STREAM_RUNNING;
6218 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6221 void RtApiDs :: stopStream()
6224 if ( stream_.state == STREAM_STOPPED ) {
6225 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6226 error( RtAudioError::WARNING );
6233 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6234 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6235 if ( handle->drainCounter == 0 ) {
6236 handle->drainCounter = 2;
6237 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6240 stream_.state = STREAM_STOPPED;
6242 MUTEX_LOCK( &stream_.mutex );
6244 // Stop the buffer and clear memory
6245 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6246 result = buffer->Stop();
6247 if ( FAILED( result ) ) {
6248 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6249 errorText_ = errorStream_.str();
6253 // Lock the buffer and clear it so that if we start to play again,
6254 // we won't have old data playing.
6255 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6256 if ( FAILED( result ) ) {
6257 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6258 errorText_ = errorStream_.str();
6262 // Zero the DS buffer
6263 ZeroMemory( audioPtr, dataLen );
6265 // Unlock the DS buffer
6266 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6267 if ( FAILED( result ) ) {
6268 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6269 errorText_ = errorStream_.str();
6273 // If we start playing again, we must begin at beginning of buffer.
6274 handle->bufferPointer[0] = 0;
6277 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6278 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6282 stream_.state = STREAM_STOPPED;
6284 if ( stream_.mode != DUPLEX )
6285 MUTEX_LOCK( &stream_.mutex );
6287 result = buffer->Stop();
6288 if ( FAILED( result ) ) {
6289 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6290 errorText_ = errorStream_.str();
6294 // Lock the buffer and clear it so that if we start to play again,
6295 // we won't have old data playing.
6296 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6297 if ( FAILED( result ) ) {
6298 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6299 errorText_ = errorStream_.str();
6303 // Zero the DS buffer
6304 ZeroMemory( audioPtr, dataLen );
6306 // Unlock the DS buffer
6307 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6308 if ( FAILED( result ) ) {
6309 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6310 errorText_ = errorStream_.str();
6314 // If we start recording again, we must begin at beginning of buffer.
6315 handle->bufferPointer[1] = 0;
6319 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6320 MUTEX_UNLOCK( &stream_.mutex );
6322 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6325 void RtApiDs :: abortStream()
6328 if ( stream_.state == STREAM_STOPPED ) {
6329 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6330 error( RtAudioError::WARNING );
6334 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6335 handle->drainCounter = 2;
6340 void RtApiDs :: callbackEvent()
6342 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6343 Sleep( 50 ); // sleep 50 milliseconds
6347 if ( stream_.state == STREAM_CLOSED ) {
6348 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6349 error( RtAudioError::WARNING );
6353 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6354 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6356 // Check if we were draining the stream and signal is finished.
6357 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6359 stream_.state = STREAM_STOPPING;
6360 if ( handle->internalDrain == false )
6361 SetEvent( handle->condition );
6367 // Invoke user callback to get fresh output data UNLESS we are
6369 if ( handle->drainCounter == 0 ) {
6370 RtAudioCallback callback = (RtAudioCallback) info->callback;
6371 double streamTime = getStreamTime();
6372 RtAudioStreamStatus status = 0;
6373 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6374 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6375 handle->xrun[0] = false;
6377 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6378 status |= RTAUDIO_INPUT_OVERFLOW;
6379 handle->xrun[1] = false;
6381 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6382 stream_.bufferSize, streamTime, status, info->userData );
6383 if ( cbReturnValue == 2 ) {
6384 stream_.state = STREAM_STOPPING;
6385 handle->drainCounter = 2;
6389 else if ( cbReturnValue == 1 ) {
6390 handle->drainCounter = 1;
6391 handle->internalDrain = true;
6396 DWORD currentWritePointer, safeWritePointer;
6397 DWORD currentReadPointer, safeReadPointer;
6398 UINT nextWritePointer;
6400 LPVOID buffer1 = NULL;
6401 LPVOID buffer2 = NULL;
6402 DWORD bufferSize1 = 0;
6403 DWORD bufferSize2 = 0;
6408 MUTEX_LOCK( &stream_.mutex );
6409 if ( stream_.state == STREAM_STOPPED ) {
6410 MUTEX_UNLOCK( &stream_.mutex );
6414 if ( buffersRolling == false ) {
6415 if ( stream_.mode == DUPLEX ) {
6416 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6418 // It takes a while for the devices to get rolling. As a result,
6419 // there's no guarantee that the capture and write device pointers
6420 // will move in lockstep. Wait here for both devices to start
6421 // rolling, and then set our buffer pointers accordingly.
6422 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6423 // bytes later than the write buffer.
6425 // Stub: a serious risk of having a pre-emptive scheduling round
6426 // take place between the two GetCurrentPosition calls... but I'm
6427 // really not sure how to solve the problem. Temporarily boost to
6428 // Realtime priority, maybe; but I'm not sure what priority the
6429 // DirectSound service threads run at. We *should* be roughly
6430 // within a ms or so of correct.
6432 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6433 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6435 DWORD startSafeWritePointer, startSafeReadPointer;
6437 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6438 if ( FAILED( result ) ) {
6439 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6440 errorText_ = errorStream_.str();
6441 MUTEX_UNLOCK( &stream_.mutex );
6442 error( RtAudioError::SYSTEM_ERROR );
6445 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6446 if ( FAILED( result ) ) {
6447 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6448 errorText_ = errorStream_.str();
6449 MUTEX_UNLOCK( &stream_.mutex );
6450 error( RtAudioError::SYSTEM_ERROR );
6454 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6455 if ( FAILED( result ) ) {
6456 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6457 errorText_ = errorStream_.str();
6458 MUTEX_UNLOCK( &stream_.mutex );
6459 error( RtAudioError::SYSTEM_ERROR );
6462 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6463 if ( FAILED( result ) ) {
6464 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6465 errorText_ = errorStream_.str();
6466 MUTEX_UNLOCK( &stream_.mutex );
6467 error( RtAudioError::SYSTEM_ERROR );
6470 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6474 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6476 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6477 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6478 handle->bufferPointer[1] = safeReadPointer;
6480 else if ( stream_.mode == OUTPUT ) {
6482 // Set the proper nextWritePosition after initial startup.
6483 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6484 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6485 if ( FAILED( result ) ) {
6486 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6487 errorText_ = errorStream_.str();
6488 MUTEX_UNLOCK( &stream_.mutex );
6489 error( RtAudioError::SYSTEM_ERROR );
6492 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6493 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6496 buffersRolling = true;
6499 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6501 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6503 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6504 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6505 bufferBytes *= formatBytes( stream_.userFormat );
6506 memset( stream_.userBuffer[0], 0, bufferBytes );
6509 // Setup parameters and do buffer conversion if necessary.
6510 if ( stream_.doConvertBuffer[0] ) {
6511 buffer = stream_.deviceBuffer;
6512 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6513 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6514 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6517 buffer = stream_.userBuffer[0];
6518 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6519 bufferBytes *= formatBytes( stream_.userFormat );
6522 // No byte swapping necessary in DirectSound implementation.
6524 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6525 // unsigned. So, we need to convert our signed 8-bit data here to
6527 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6528 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6530 DWORD dsBufferSize = handle->dsBufferSize[0];
6531 nextWritePointer = handle->bufferPointer[0];
6533 DWORD endWrite, leadPointer;
6535 // Find out where the read and "safe write" pointers are.
6536 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6537 if ( FAILED( result ) ) {
6538 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6539 errorText_ = errorStream_.str();
6540 MUTEX_UNLOCK( &stream_.mutex );
6541 error( RtAudioError::SYSTEM_ERROR );
6545 // We will copy our output buffer into the region between
6546 // safeWritePointer and leadPointer. If leadPointer is not
6547 // beyond the next endWrite position, wait until it is.
6548 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6549 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6550 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6551 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6552 endWrite = nextWritePointer + bufferBytes;
6554 // Check whether the entire write region is behind the play pointer.
6555 if ( leadPointer >= endWrite ) break;
6557 // If we are here, then we must wait until the leadPointer advances
6558 // beyond the end of our next write region. We use the
6559 // Sleep() function to suspend operation until that happens.
6560 double millis = ( endWrite - leadPointer ) * 1000.0;
6561 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6562 if ( millis < 1.0 ) millis = 1.0;
6563 Sleep( (DWORD) millis );
6566 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6567 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6568 // We've strayed into the forbidden zone ... resync the read pointer.
6569 handle->xrun[0] = true;
6570 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6571 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6572 handle->bufferPointer[0] = nextWritePointer;
6573 endWrite = nextWritePointer + bufferBytes;
6576 // Lock free space in the buffer
6577 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6578 &bufferSize1, &buffer2, &bufferSize2, 0 );
6579 if ( FAILED( result ) ) {
6580 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6581 errorText_ = errorStream_.str();
6582 MUTEX_UNLOCK( &stream_.mutex );
6583 error( RtAudioError::SYSTEM_ERROR );
6587 // Copy our buffer into the DS buffer
6588 CopyMemory( buffer1, buffer, bufferSize1 );
6589 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6591 // Update our buffer offset and unlock sound buffer
6592 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6593 if ( FAILED( result ) ) {
6594 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6595 errorText_ = errorStream_.str();
6596 MUTEX_UNLOCK( &stream_.mutex );
6597 error( RtAudioError::SYSTEM_ERROR );
6600 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6601 handle->bufferPointer[0] = nextWritePointer;
6604 // Don't bother draining input
6605 if ( handle->drainCounter ) {
6606 handle->drainCounter++;
6610 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6612 // Setup parameters.
6613 if ( stream_.doConvertBuffer[1] ) {
6614 buffer = stream_.deviceBuffer;
6615 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6616 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6619 buffer = stream_.userBuffer[1];
6620 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6621 bufferBytes *= formatBytes( stream_.userFormat );
6624 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6625 long nextReadPointer = handle->bufferPointer[1];
6626 DWORD dsBufferSize = handle->dsBufferSize[1];
6628 // Find out where the write and "safe read" pointers are.
6629 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6630 if ( FAILED( result ) ) {
6631 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6632 errorText_ = errorStream_.str();
6633 MUTEX_UNLOCK( &stream_.mutex );
6634 error( RtAudioError::SYSTEM_ERROR );
6638 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6639 DWORD endRead = nextReadPointer + bufferBytes;
6641 // Handling depends on whether we are INPUT or DUPLEX.
6642 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6643 // then a wait here will drag the write pointers into the forbidden zone.
6645 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6646 // it's in a safe position. This causes dropouts, but it seems to be the only
6647 // practical way to sync up the read and write pointers reliably, given the
6648 // the very complex relationship between phase and increment of the read and write
6651 // In order to minimize audible dropouts in DUPLEX mode, we will
6652 // provide a pre-roll period of 0.5 seconds in which we return
6653 // zeros from the read buffer while the pointers sync up.
6655 if ( stream_.mode == DUPLEX ) {
6656 if ( safeReadPointer < endRead ) {
6657 if ( duplexPrerollBytes <= 0 ) {
6658 // Pre-roll time over. Be more agressive.
6659 int adjustment = endRead-safeReadPointer;
6661 handle->xrun[1] = true;
6663 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6664 // and perform fine adjustments later.
6665 // - small adjustments: back off by twice as much.
6666 if ( adjustment >= 2*bufferBytes )
6667 nextReadPointer = safeReadPointer-2*bufferBytes;
6669 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6671 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6675 // In pre=roll time. Just do it.
6676 nextReadPointer = safeReadPointer - bufferBytes;
6677 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6679 endRead = nextReadPointer + bufferBytes;
6682 else { // mode == INPUT
6683 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6684 // See comments for playback.
6685 double millis = (endRead - safeReadPointer) * 1000.0;
6686 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6687 if ( millis < 1.0 ) millis = 1.0;
6688 Sleep( (DWORD) millis );
6690 // Wake up and find out where we are now.
6691 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6692 if ( FAILED( result ) ) {
6693 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6694 errorText_ = errorStream_.str();
6695 MUTEX_UNLOCK( &stream_.mutex );
6696 error( RtAudioError::SYSTEM_ERROR );
6700 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6704 // Lock free space in the buffer
6705 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6706 &bufferSize1, &buffer2, &bufferSize2, 0 );
6707 if ( FAILED( result ) ) {
6708 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6709 errorText_ = errorStream_.str();
6710 MUTEX_UNLOCK( &stream_.mutex );
6711 error( RtAudioError::SYSTEM_ERROR );
6715 if ( duplexPrerollBytes <= 0 ) {
6716 // Copy our buffer into the DS buffer
6717 CopyMemory( buffer, buffer1, bufferSize1 );
6718 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6721 memset( buffer, 0, bufferSize1 );
6722 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6723 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6726 // Update our buffer offset and unlock sound buffer
6727 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6728 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6729 if ( FAILED( result ) ) {
6730 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6731 errorText_ = errorStream_.str();
6732 MUTEX_UNLOCK( &stream_.mutex );
6733 error( RtAudioError::SYSTEM_ERROR );
6736 handle->bufferPointer[1] = nextReadPointer;
6738 // No byte swapping necessary in DirectSound implementation.
6740 // If necessary, convert 8-bit data from unsigned to signed.
6741 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6742 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6744 // Do buffer conversion if necessary.
6745 if ( stream_.doConvertBuffer[1] )
6746 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6750 MUTEX_UNLOCK( &stream_.mutex );
6751 RtApi::tickStreamTime();
6754 // Definitions for utility functions and callbacks
6755 // specific to the DirectSound implementation.
6757 static unsigned __stdcall callbackHandler( void *ptr )
6759 CallbackInfo *info = (CallbackInfo *) ptr;
6760 RtApiDs *object = (RtApiDs *) info->object;
6761 bool* isRunning = &info->isRunning;
6763 while ( *isRunning == true ) {
6764 object->callbackEvent();
6771 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6772 LPCTSTR description,
6776 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6777 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6780 bool validDevice = false;
6781 if ( probeInfo.isInput == true ) {
6783 LPDIRECTSOUNDCAPTURE object;
6785 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6786 if ( hr != DS_OK ) return TRUE;
6788 caps.dwSize = sizeof(caps);
6789 hr = object->GetCaps( &caps );
6790 if ( hr == DS_OK ) {
6791 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6798 LPDIRECTSOUND object;
6799 hr = DirectSoundCreate( lpguid, &object, NULL );
6800 if ( hr != DS_OK ) return TRUE;
6802 caps.dwSize = sizeof(caps);
6803 hr = object->GetCaps( &caps );
6804 if ( hr == DS_OK ) {
6805 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6811 // If good device, then save its name and guid.
6812 std::string name = convertCharPointerToStdString( description );
6813 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6814 if ( lpguid == NULL )
6815 name = "Default Device";
6816 if ( validDevice ) {
6817 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6818 if ( dsDevices[i].name == name ) {
6819 dsDevices[i].found = true;
6820 if ( probeInfo.isInput ) {
6821 dsDevices[i].id[1] = lpguid;
6822 dsDevices[i].validId[1] = true;
6825 dsDevices[i].id[0] = lpguid;
6826 dsDevices[i].validId[0] = true;
6834 device.found = true;
6835 if ( probeInfo.isInput ) {
6836 device.id[1] = lpguid;
6837 device.validId[1] = true;
6840 device.id[0] = lpguid;
6841 device.validId[0] = true;
6843 dsDevices.push_back( device );
6849 static const char* getErrorString( int code )
6853 case DSERR_ALLOCATED:
6854 return "Already allocated";
6856 case DSERR_CONTROLUNAVAIL:
6857 return "Control unavailable";
6859 case DSERR_INVALIDPARAM:
6860 return "Invalid parameter";
6862 case DSERR_INVALIDCALL:
6863 return "Invalid call";
6866 return "Generic error";
6868 case DSERR_PRIOLEVELNEEDED:
6869 return "Priority level needed";
6871 case DSERR_OUTOFMEMORY:
6872 return "Out of memory";
6874 case DSERR_BADFORMAT:
6875 return "The sample rate or the channel format is not supported";
6877 case DSERR_UNSUPPORTED:
6878 return "Not supported";
6880 case DSERR_NODRIVER:
6883 case DSERR_ALREADYINITIALIZED:
6884 return "Already initialized";
6886 case DSERR_NOAGGREGATION:
6887 return "No aggregation";
6889 case DSERR_BUFFERLOST:
6890 return "Buffer lost";
6892 case DSERR_OTHERAPPHASPRIO:
6893 return "Another application already has priority";
6895 case DSERR_UNINITIALIZED:
6896 return "Uninitialized";
6899 return "DirectSound unknown error";
6902 //******************** End of __WINDOWS_DS__ *********************//
6906 #if defined(__LINUX_ALSA__)
6908 #include <alsa/asoundlib.h>
6911 // A structure to hold various information related to the ALSA API
6914 snd_pcm_t *handles[2];
6917 pthread_cond_t runnable_cv;
6921 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6924 static void *alsaCallbackHandler( void * ptr );
6926 RtApiAlsa :: RtApiAlsa()
6928 // Nothing to do here.
6931 RtApiAlsa :: ~RtApiAlsa()
6933 if ( stream_.state != STREAM_CLOSED ) closeStream();
6936 unsigned int RtApiAlsa :: getDeviceCount( void )
6938 unsigned nDevices = 0;
6939 int result, subdevice, card;
6943 // Count cards and devices
6945 snd_card_next( &card );
6946 while ( card >= 0 ) {
6947 sprintf( name, "hw:%d", card );
6948 result = snd_ctl_open( &handle, name, 0 );
6950 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6951 errorText_ = errorStream_.str();
6952 error( RtAudioError::WARNING );
6957 result = snd_ctl_pcm_next_device( handle, &subdevice );
6959 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6960 errorText_ = errorStream_.str();
6961 error( RtAudioError::WARNING );
6964 if ( subdevice < 0 )
6969 snd_ctl_close( handle );
6970 snd_card_next( &card );
6973 result = snd_ctl_open( &handle, "default", 0 );
6976 snd_ctl_close( handle );
6982 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6984 RtAudio::DeviceInfo info;
6985 info.probed = false;
6987 unsigned nDevices = 0;
6988 int result, subdevice, card;
6992 // Count cards and devices
6995 snd_card_next( &card );
6996 while ( card >= 0 ) {
6997 sprintf( name, "hw:%d", card );
6998 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7000 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7001 errorText_ = errorStream_.str();
7002 error( RtAudioError::WARNING );
7007 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7009 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7010 errorText_ = errorStream_.str();
7011 error( RtAudioError::WARNING );
7014 if ( subdevice < 0 ) break;
7015 if ( nDevices == device ) {
7016 sprintf( name, "hw:%d,%d", card, subdevice );
7022 snd_ctl_close( chandle );
7023 snd_card_next( &card );
7026 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7027 if ( result == 0 ) {
7028 if ( nDevices == device ) {
7029 strcpy( name, "default" );
7035 if ( nDevices == 0 ) {
7036 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7037 error( RtAudioError::INVALID_USE );
7041 if ( device >= nDevices ) {
7042 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7043 error( RtAudioError::INVALID_USE );
7049 // If a stream is already open, we cannot probe the stream devices.
7050 // Thus, use the saved results.
7051 if ( stream_.state != STREAM_CLOSED &&
7052 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7053 snd_ctl_close( chandle );
7054 if ( device >= devices_.size() ) {
7055 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7056 error( RtAudioError::WARNING );
7059 return devices_[ device ];
7062 int openMode = SND_PCM_ASYNC;
7063 snd_pcm_stream_t stream;
7064 snd_pcm_info_t *pcminfo;
7065 snd_pcm_info_alloca( &pcminfo );
7067 snd_pcm_hw_params_t *params;
7068 snd_pcm_hw_params_alloca( ¶ms );
7070 // First try for playback unless default device (which has subdev -1)
7071 stream = SND_PCM_STREAM_PLAYBACK;
7072 snd_pcm_info_set_stream( pcminfo, stream );
7073 if ( subdevice != -1 ) {
7074 snd_pcm_info_set_device( pcminfo, subdevice );
7075 snd_pcm_info_set_subdevice( pcminfo, 0 );
7077 result = snd_ctl_pcm_info( chandle, pcminfo );
7079 // Device probably doesn't support playback.
7084 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7086 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7087 errorText_ = errorStream_.str();
7088 error( RtAudioError::WARNING );
7092 // The device is open ... fill the parameter structure.
7093 result = snd_pcm_hw_params_any( phandle, params );
7095 snd_pcm_close( phandle );
7096 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7097 errorText_ = errorStream_.str();
7098 error( RtAudioError::WARNING );
7102 // Get output channel information.
7104 result = snd_pcm_hw_params_get_channels_max( params, &value );
7106 snd_pcm_close( phandle );
7107 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7108 errorText_ = errorStream_.str();
7109 error( RtAudioError::WARNING );
7112 info.outputChannels = value;
7113 snd_pcm_close( phandle );
7116 stream = SND_PCM_STREAM_CAPTURE;
7117 snd_pcm_info_set_stream( pcminfo, stream );
7119 // Now try for capture unless default device (with subdev = -1)
7120 if ( subdevice != -1 ) {
7121 result = snd_ctl_pcm_info( chandle, pcminfo );
7122 snd_ctl_close( chandle );
7124 // Device probably doesn't support capture.
7125 if ( info.outputChannels == 0 ) return info;
7126 goto probeParameters;
7130 snd_ctl_close( chandle );
7132 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7134 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7135 errorText_ = errorStream_.str();
7136 error( RtAudioError::WARNING );
7137 if ( info.outputChannels == 0 ) return info;
7138 goto probeParameters;
7141 // The device is open ... fill the parameter structure.
7142 result = snd_pcm_hw_params_any( phandle, params );
7144 snd_pcm_close( phandle );
7145 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7146 errorText_ = errorStream_.str();
7147 error( RtAudioError::WARNING );
7148 if ( info.outputChannels == 0 ) return info;
7149 goto probeParameters;
7152 result = snd_pcm_hw_params_get_channels_max( params, &value );
7154 snd_pcm_close( phandle );
7155 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7156 errorText_ = errorStream_.str();
7157 error( RtAudioError::WARNING );
7158 if ( info.outputChannels == 0 ) return info;
7159 goto probeParameters;
7161 info.inputChannels = value;
7162 snd_pcm_close( phandle );
7164 // If device opens for both playback and capture, we determine the channels.
7165 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7166 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7168 // ALSA doesn't provide default devices so we'll use the first available one.
7169 if ( device == 0 && info.outputChannels > 0 )
7170 info.isDefaultOutput = true;
7171 if ( device == 0 && info.inputChannels > 0 )
7172 info.isDefaultInput = true;
7175 // At this point, we just need to figure out the supported data
7176 // formats and sample rates. We'll proceed by opening the device in
7177 // the direction with the maximum number of channels, or playback if
7178 // they are equal. This might limit our sample rate options, but so
7181 if ( info.outputChannels >= info.inputChannels )
7182 stream = SND_PCM_STREAM_PLAYBACK;
7184 stream = SND_PCM_STREAM_CAPTURE;
7185 snd_pcm_info_set_stream( pcminfo, stream );
7187 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7189 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7190 errorText_ = errorStream_.str();
7191 error( RtAudioError::WARNING );
7195 // The device is open ... fill the parameter structure.
7196 result = snd_pcm_hw_params_any( phandle, params );
7198 snd_pcm_close( phandle );
7199 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7200 errorText_ = errorStream_.str();
7201 error( RtAudioError::WARNING );
7205 // Test our discrete set of sample rate values.
7206 info.sampleRates.clear();
7207 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7208 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7209 info.sampleRates.push_back( SAMPLE_RATES[i] );
7211 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7212 info.preferredSampleRate = SAMPLE_RATES[i];
7215 if ( info.sampleRates.size() == 0 ) {
7216 snd_pcm_close( phandle );
7217 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7218 errorText_ = errorStream_.str();
7219 error( RtAudioError::WARNING );
7223 // Probe the supported data formats ... we don't care about endian-ness just yet
7224 snd_pcm_format_t format;
7225 info.nativeFormats = 0;
7226 format = SND_PCM_FORMAT_S8;
7227 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7228 info.nativeFormats |= RTAUDIO_SINT8;
7229 format = SND_PCM_FORMAT_S16;
7230 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7231 info.nativeFormats |= RTAUDIO_SINT16;
7232 format = SND_PCM_FORMAT_S24;
7233 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7234 info.nativeFormats |= RTAUDIO_SINT24;
7235 format = SND_PCM_FORMAT_S32;
7236 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7237 info.nativeFormats |= RTAUDIO_SINT32;
7238 format = SND_PCM_FORMAT_FLOAT;
7239 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7240 info.nativeFormats |= RTAUDIO_FLOAT32;
7241 format = SND_PCM_FORMAT_FLOAT64;
7242 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7243 info.nativeFormats |= RTAUDIO_FLOAT64;
7245 // Check that we have at least one supported format
7246 if ( info.nativeFormats == 0 ) {
7247 snd_pcm_close( phandle );
7248 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7249 errorText_ = errorStream_.str();
7250 error( RtAudioError::WARNING );
7254 // Get the device name
7256 result = snd_card_get_name( card, &cardname );
7257 if ( result >= 0 ) {
7258 sprintf( name, "hw:%s,%d", cardname, subdevice );
7263 // That's all ... close the device and return
7264 snd_pcm_close( phandle );
7269 void RtApiAlsa :: saveDeviceInfo( void )
7273 unsigned int nDevices = getDeviceCount();
7274 devices_.resize( nDevices );
7275 for ( unsigned int i=0; i<nDevices; i++ )
7276 devices_[i] = getDeviceInfo( i );
7279 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7280 unsigned int firstChannel, unsigned int sampleRate,
7281 RtAudioFormat format, unsigned int *bufferSize,
7282 RtAudio::StreamOptions *options )
7285 #if defined(__RTAUDIO_DEBUG__)
7287 snd_output_stdio_attach(&out, stderr, 0);
7290 // I'm not using the "plug" interface ... too much inconsistent behavior.
7292 unsigned nDevices = 0;
7293 int result, subdevice, card;
7297 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7298 snprintf(name, sizeof(name), "%s", "default");
7300 // Count cards and devices
7302 snd_card_next( &card );
7303 while ( card >= 0 ) {
7304 sprintf( name, "hw:%d", card );
7305 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7307 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7308 errorText_ = errorStream_.str();
7313 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7314 if ( result < 0 ) break;
7315 if ( subdevice < 0 ) break;
7316 if ( nDevices == device ) {
7317 sprintf( name, "hw:%d,%d", card, subdevice );
7318 snd_ctl_close( chandle );
7323 snd_ctl_close( chandle );
7324 snd_card_next( &card );
7327 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7328 if ( result == 0 ) {
7329 if ( nDevices == device ) {
7330 strcpy( name, "default" );
7336 if ( nDevices == 0 ) {
7337 // This should not happen because a check is made before this function is called.
7338 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7342 if ( device >= nDevices ) {
7343 // This should not happen because a check is made before this function is called.
7344 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7351 // The getDeviceInfo() function will not work for a device that is
7352 // already open. Thus, we'll probe the system before opening a
7353 // stream and save the results for use by getDeviceInfo().
7354 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7355 this->saveDeviceInfo();
7357 snd_pcm_stream_t stream;
7358 if ( mode == OUTPUT )
7359 stream = SND_PCM_STREAM_PLAYBACK;
7361 stream = SND_PCM_STREAM_CAPTURE;
7364 int openMode = SND_PCM_ASYNC;
7365 result = snd_pcm_open( &phandle, name, stream, openMode );
7367 if ( mode == OUTPUT )
7368 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7370 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7371 errorText_ = errorStream_.str();
7375 // Fill the parameter structure.
7376 snd_pcm_hw_params_t *hw_params;
7377 snd_pcm_hw_params_alloca( &hw_params );
7378 result = snd_pcm_hw_params_any( phandle, hw_params );
7380 snd_pcm_close( phandle );
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7382 errorText_ = errorStream_.str();
7386 #if defined(__RTAUDIO_DEBUG__)
7387 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7388 snd_pcm_hw_params_dump( hw_params, out );
7391 // Set access ... check user preference.
7392 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7393 stream_.userInterleaved = false;
7394 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7396 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7397 stream_.deviceInterleaved[mode] = true;
7400 stream_.deviceInterleaved[mode] = false;
7403 stream_.userInterleaved = true;
7404 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7406 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7407 stream_.deviceInterleaved[mode] = false;
7410 stream_.deviceInterleaved[mode] = true;
7414 snd_pcm_close( phandle );
7415 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7416 errorText_ = errorStream_.str();
7420 // Determine how to set the device format.
7421 stream_.userFormat = format;
7422 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7424 if ( format == RTAUDIO_SINT8 )
7425 deviceFormat = SND_PCM_FORMAT_S8;
7426 else if ( format == RTAUDIO_SINT16 )
7427 deviceFormat = SND_PCM_FORMAT_S16;
7428 else if ( format == RTAUDIO_SINT24 )
7429 deviceFormat = SND_PCM_FORMAT_S24;
7430 else if ( format == RTAUDIO_SINT32 )
7431 deviceFormat = SND_PCM_FORMAT_S32;
7432 else if ( format == RTAUDIO_FLOAT32 )
7433 deviceFormat = SND_PCM_FORMAT_FLOAT;
7434 else if ( format == RTAUDIO_FLOAT64 )
7435 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7437 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7438 stream_.deviceFormat[mode] = format;
7442 // The user requested format is not natively supported by the device.
7443 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7444 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7445 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7449 deviceFormat = SND_PCM_FORMAT_FLOAT;
7450 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7451 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7455 deviceFormat = SND_PCM_FORMAT_S32;
7456 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7457 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7461 deviceFormat = SND_PCM_FORMAT_S24;
7462 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7463 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7467 deviceFormat = SND_PCM_FORMAT_S16;
7468 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7469 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7473 deviceFormat = SND_PCM_FORMAT_S8;
7474 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7475 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7479 // If we get here, no supported format was found.
7480 snd_pcm_close( phandle );
7481 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7482 errorText_ = errorStream_.str();
7486 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7488 snd_pcm_close( phandle );
7489 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7490 errorText_ = errorStream_.str();
7494 // Determine whether byte-swaping is necessary.
7495 stream_.doByteSwap[mode] = false;
7496 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7497 result = snd_pcm_format_cpu_endian( deviceFormat );
7499 stream_.doByteSwap[mode] = true;
7500 else if (result < 0) {
7501 snd_pcm_close( phandle );
7502 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7503 errorText_ = errorStream_.str();
7508 // Set the sample rate.
7509 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7511 snd_pcm_close( phandle );
7512 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7513 errorText_ = errorStream_.str();
7517 // Determine the number of channels for this device. We support a possible
7518 // minimum device channel number > than the value requested by the user.
7519 stream_.nUserChannels[mode] = channels;
7521 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7522 unsigned int deviceChannels = value;
7523 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7524 snd_pcm_close( phandle );
7525 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7526 errorText_ = errorStream_.str();
7530 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7532 snd_pcm_close( phandle );
7533 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7534 errorText_ = errorStream_.str();
7537 deviceChannels = value;
7538 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7539 stream_.nDeviceChannels[mode] = deviceChannels;
7541 // Set the device channels.
7542 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7544 snd_pcm_close( phandle );
7545 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7546 errorText_ = errorStream_.str();
7550 // Set the buffer (or period) size.
7552 snd_pcm_uframes_t periodSize = *bufferSize;
7553 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7555 snd_pcm_close( phandle );
7556 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7557 errorText_ = errorStream_.str();
7560 *bufferSize = periodSize;
7562 // Set the buffer number, which in ALSA is referred to as the "period".
7563 unsigned int periods = 0;
7564 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7565 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7566 if ( periods < 2 ) periods = 4; // a fairly safe default value
7567 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7569 snd_pcm_close( phandle );
7570 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7571 errorText_ = errorStream_.str();
7575 // If attempting to setup a duplex stream, the bufferSize parameter
7576 // MUST be the same in both directions!
7577 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7578 snd_pcm_close( phandle );
7579 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7580 errorText_ = errorStream_.str();
7584 stream_.bufferSize = *bufferSize;
7586 // Install the hardware configuration
7587 result = snd_pcm_hw_params( phandle, hw_params );
7589 snd_pcm_close( phandle );
7590 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7591 errorText_ = errorStream_.str();
7595 #if defined(__RTAUDIO_DEBUG__)
7596 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7597 snd_pcm_hw_params_dump( hw_params, out );
7600 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7601 snd_pcm_sw_params_t *sw_params = NULL;
7602 snd_pcm_sw_params_alloca( &sw_params );
7603 snd_pcm_sw_params_current( phandle, sw_params );
7604 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7605 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7606 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7608 // The following two settings were suggested by Theo Veenker
7609 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7610 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7612 // here are two options for a fix
7613 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7614 snd_pcm_uframes_t val;
7615 snd_pcm_sw_params_get_boundary( sw_params, &val );
7616 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7618 result = snd_pcm_sw_params( phandle, sw_params );
7620 snd_pcm_close( phandle );
7621 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7622 errorText_ = errorStream_.str();
7626 #if defined(__RTAUDIO_DEBUG__)
7627 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7628 snd_pcm_sw_params_dump( sw_params, out );
7631 // Set flags for buffer conversion
7632 stream_.doConvertBuffer[mode] = false;
7633 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7634 stream_.doConvertBuffer[mode] = true;
7635 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7636 stream_.doConvertBuffer[mode] = true;
7637 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7638 stream_.nUserChannels[mode] > 1 )
7639 stream_.doConvertBuffer[mode] = true;
7641 // Allocate the ApiHandle if necessary and then save.
7642 AlsaHandle *apiInfo = 0;
7643 if ( stream_.apiHandle == 0 ) {
7645 apiInfo = (AlsaHandle *) new AlsaHandle;
7647 catch ( std::bad_alloc& ) {
7648 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7652 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7653 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7657 stream_.apiHandle = (void *) apiInfo;
7658 apiInfo->handles[0] = 0;
7659 apiInfo->handles[1] = 0;
7662 apiInfo = (AlsaHandle *) stream_.apiHandle;
7664 apiInfo->handles[mode] = phandle;
7667 // Allocate necessary internal buffers.
7668 unsigned long bufferBytes;
7669 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7670 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7671 if ( stream_.userBuffer[mode] == NULL ) {
7672 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7676 if ( stream_.doConvertBuffer[mode] ) {
7678 bool makeBuffer = true;
7679 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7680 if ( mode == INPUT ) {
7681 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7682 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7683 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7688 bufferBytes *= *bufferSize;
7689 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7690 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7691 if ( stream_.deviceBuffer == NULL ) {
7692 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7698 stream_.sampleRate = sampleRate;
7699 stream_.nBuffers = periods;
7700 stream_.device[mode] = device;
7701 stream_.state = STREAM_STOPPED;
7703 // Setup the buffer conversion information structure.
7704 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7706 // Setup thread if necessary.
7707 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7708 // We had already set up an output stream.
7709 stream_.mode = DUPLEX;
7710 // Link the streams if possible.
7711 apiInfo->synchronized = false;
7712 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7713 apiInfo->synchronized = true;
7715 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7716 error( RtAudioError::WARNING );
7720 stream_.mode = mode;
7722 // Setup callback thread.
7723 stream_.callbackInfo.object = (void *) this;
7725 // Set the thread attributes for joinable and realtime scheduling
7726 // priority (optional). The higher priority will only take affect
7727 // if the program is run as root or suid. Note, under Linux
7728 // processes with CAP_SYS_NICE privilege, a user can change
7729 // scheduling policy and priority (thus need not be root). See
7730 // POSIX "capabilities".
7731 pthread_attr_t attr;
7732 pthread_attr_init( &attr );
7733 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7735 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7736 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7737 // We previously attempted to increase the audio callback priority
7738 // to SCHED_RR here via the attributes. However, while no errors
7739 // were reported in doing so, it did not work. So, now this is
7740 // done in the alsaCallbackHandler function.
7741 stream_.callbackInfo.doRealtime = true;
7742 int priority = options->priority;
7743 int min = sched_get_priority_min( SCHED_RR );
7744 int max = sched_get_priority_max( SCHED_RR );
7745 if ( priority < min ) priority = min;
7746 else if ( priority > max ) priority = max;
7747 stream_.callbackInfo.priority = priority;
7751 stream_.callbackInfo.isRunning = true;
7752 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7753 pthread_attr_destroy( &attr );
7755 stream_.callbackInfo.isRunning = false;
7756 errorText_ = "RtApiAlsa::error creating callback thread!";
7765 pthread_cond_destroy( &apiInfo->runnable_cv );
7766 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7767 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7769 stream_.apiHandle = 0;
7772 if ( phandle) snd_pcm_close( phandle );
7774 for ( int i=0; i<2; i++ ) {
7775 if ( stream_.userBuffer[i] ) {
7776 free( stream_.userBuffer[i] );
7777 stream_.userBuffer[i] = 0;
7781 if ( stream_.deviceBuffer ) {
7782 free( stream_.deviceBuffer );
7783 stream_.deviceBuffer = 0;
7786 stream_.state = STREAM_CLOSED;
7790 void RtApiAlsa :: closeStream()
7792 if ( stream_.state == STREAM_CLOSED ) {
7793 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7794 error( RtAudioError::WARNING );
7798 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7799 stream_.callbackInfo.isRunning = false;
7800 MUTEX_LOCK( &stream_.mutex );
7801 if ( stream_.state == STREAM_STOPPED ) {
7802 apiInfo->runnable = true;
7803 pthread_cond_signal( &apiInfo->runnable_cv );
7805 MUTEX_UNLOCK( &stream_.mutex );
7806 pthread_join( stream_.callbackInfo.thread, NULL );
7808 if ( stream_.state == STREAM_RUNNING ) {
7809 stream_.state = STREAM_STOPPED;
7810 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7811 snd_pcm_drop( apiInfo->handles[0] );
7812 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7813 snd_pcm_drop( apiInfo->handles[1] );
7817 pthread_cond_destroy( &apiInfo->runnable_cv );
7818 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7819 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7821 stream_.apiHandle = 0;
7824 for ( int i=0; i<2; i++ ) {
7825 if ( stream_.userBuffer[i] ) {
7826 free( stream_.userBuffer[i] );
7827 stream_.userBuffer[i] = 0;
7831 if ( stream_.deviceBuffer ) {
7832 free( stream_.deviceBuffer );
7833 stream_.deviceBuffer = 0;
7836 stream_.mode = UNINITIALIZED;
7837 stream_.state = STREAM_CLOSED;
7840 void RtApiAlsa :: startStream()
7842 // This method calls snd_pcm_prepare if the device isn't already in that state.
7845 if ( stream_.state == STREAM_RUNNING ) {
7846 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7847 error( RtAudioError::WARNING );
7851 MUTEX_LOCK( &stream_.mutex );
7854 snd_pcm_state_t state;
7855 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7856 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7857 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7858 state = snd_pcm_state( handle[0] );
7859 if ( state != SND_PCM_STATE_PREPARED ) {
7860 result = snd_pcm_prepare( handle[0] );
7862 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7863 errorText_ = errorStream_.str();
7869 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7870 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7871 state = snd_pcm_state( handle[1] );
7872 if ( state != SND_PCM_STATE_PREPARED ) {
7873 result = snd_pcm_prepare( handle[1] );
7875 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7876 errorText_ = errorStream_.str();
7882 stream_.state = STREAM_RUNNING;
7885 apiInfo->runnable = true;
7886 pthread_cond_signal( &apiInfo->runnable_cv );
7887 MUTEX_UNLOCK( &stream_.mutex );
7889 if ( result >= 0 ) return;
7890 error( RtAudioError::SYSTEM_ERROR );
7893 void RtApiAlsa :: stopStream()
7896 if ( stream_.state == STREAM_STOPPED ) {
7897 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7898 error( RtAudioError::WARNING );
7902 stream_.state = STREAM_STOPPED;
7903 MUTEX_LOCK( &stream_.mutex );
7906 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7907 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7908 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7909 if ( apiInfo->synchronized )
7910 result = snd_pcm_drop( handle[0] );
7912 result = snd_pcm_drain( handle[0] );
7914 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7915 errorText_ = errorStream_.str();
7920 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7921 result = snd_pcm_drop( handle[1] );
7923 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7924 errorText_ = errorStream_.str();
7930 apiInfo->runnable = false; // fixes high CPU usage when stopped
7931 MUTEX_UNLOCK( &stream_.mutex );
7933 if ( result >= 0 ) return;
7934 error( RtAudioError::SYSTEM_ERROR );
7937 void RtApiAlsa :: abortStream()
7940 if ( stream_.state == STREAM_STOPPED ) {
7941 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7942 error( RtAudioError::WARNING );
7946 stream_.state = STREAM_STOPPED;
7947 MUTEX_LOCK( &stream_.mutex );
7950 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7951 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7952 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7953 result = snd_pcm_drop( handle[0] );
7955 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7956 errorText_ = errorStream_.str();
7961 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7962 result = snd_pcm_drop( handle[1] );
7964 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7965 errorText_ = errorStream_.str();
7971 apiInfo->runnable = false; // fixes high CPU usage when stopped
7972 MUTEX_UNLOCK( &stream_.mutex );
7974 if ( result >= 0 ) return;
7975 error( RtAudioError::SYSTEM_ERROR );
7978 void RtApiAlsa :: callbackEvent()
7980 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7981 if ( stream_.state == STREAM_STOPPED ) {
7982 MUTEX_LOCK( &stream_.mutex );
7983 while ( !apiInfo->runnable )
7984 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7986 if ( stream_.state != STREAM_RUNNING ) {
7987 MUTEX_UNLOCK( &stream_.mutex );
7990 MUTEX_UNLOCK( &stream_.mutex );
7993 if ( stream_.state == STREAM_CLOSED ) {
7994 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7995 error( RtAudioError::WARNING );
7999 int doStopStream = 0;
8000 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8001 double streamTime = getStreamTime();
8002 RtAudioStreamStatus status = 0;
8003 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8004 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8005 apiInfo->xrun[0] = false;
8007 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8008 status |= RTAUDIO_INPUT_OVERFLOW;
8009 apiInfo->xrun[1] = false;
8011 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8012 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8014 if ( doStopStream == 2 ) {
8019 MUTEX_LOCK( &stream_.mutex );
8021 // The state might change while waiting on a mutex.
8022 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8028 snd_pcm_sframes_t frames;
8029 RtAudioFormat format;
8030 handle = (snd_pcm_t **) apiInfo->handles;
8032 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8034 // Setup parameters.
8035 if ( stream_.doConvertBuffer[1] ) {
8036 buffer = stream_.deviceBuffer;
8037 channels = stream_.nDeviceChannels[1];
8038 format = stream_.deviceFormat[1];
8041 buffer = stream_.userBuffer[1];
8042 channels = stream_.nUserChannels[1];
8043 format = stream_.userFormat;
8046 // Read samples from device in interleaved/non-interleaved format.
8047 if ( stream_.deviceInterleaved[1] )
8048 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8050 void *bufs[channels];
8051 size_t offset = stream_.bufferSize * formatBytes( format );
8052 for ( int i=0; i<channels; i++ )
8053 bufs[i] = (void *) (buffer + (i * offset));
8054 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8057 if ( result < (int) stream_.bufferSize ) {
8058 // Either an error or overrun occured.
8059 if ( result == -EPIPE ) {
8060 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8061 if ( state == SND_PCM_STATE_XRUN ) {
8062 apiInfo->xrun[1] = true;
8063 result = snd_pcm_prepare( handle[1] );
8065 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8066 errorText_ = errorStream_.str();
8070 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8071 errorText_ = errorStream_.str();
8075 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8076 errorText_ = errorStream_.str();
8078 error( RtAudioError::WARNING );
8082 // Do byte swapping if necessary.
8083 if ( stream_.doByteSwap[1] )
8084 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8086 // Do buffer conversion if necessary.
8087 if ( stream_.doConvertBuffer[1] )
8088 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8090 // Check stream latency
8091 result = snd_pcm_delay( handle[1], &frames );
8092 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8097 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8099 // Setup parameters and do buffer conversion if necessary.
8100 if ( stream_.doConvertBuffer[0] ) {
8101 buffer = stream_.deviceBuffer;
8102 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8103 channels = stream_.nDeviceChannels[0];
8104 format = stream_.deviceFormat[0];
8107 buffer = stream_.userBuffer[0];
8108 channels = stream_.nUserChannels[0];
8109 format = stream_.userFormat;
8112 // Do byte swapping if necessary.
8113 if ( stream_.doByteSwap[0] )
8114 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8116 // Write samples to device in interleaved/non-interleaved format.
8117 if ( stream_.deviceInterleaved[0] )
8118 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8120 void *bufs[channels];
8121 size_t offset = stream_.bufferSize * formatBytes( format );
8122 for ( int i=0; i<channels; i++ )
8123 bufs[i] = (void *) (buffer + (i * offset));
8124 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8127 if ( result < (int) stream_.bufferSize ) {
8128 // Either an error or underrun occured.
8129 if ( result == -EPIPE ) {
8130 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8131 if ( state == SND_PCM_STATE_XRUN ) {
8132 apiInfo->xrun[0] = true;
8133 result = snd_pcm_prepare( handle[0] );
8135 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8136 errorText_ = errorStream_.str();
8139 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8142 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8143 errorText_ = errorStream_.str();
8147 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8148 errorText_ = errorStream_.str();
8150 error( RtAudioError::WARNING );
8154 // Check stream latency
8155 result = snd_pcm_delay( handle[0], &frames );
8156 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8160 MUTEX_UNLOCK( &stream_.mutex );
8162 RtApi::tickStreamTime();
8163 if ( doStopStream == 1 ) this->stopStream();
8166 static void *alsaCallbackHandler( void *ptr )
8168 CallbackInfo *info = (CallbackInfo *) ptr;
8169 RtApiAlsa *object = (RtApiAlsa *) info->object;
8170 bool *isRunning = &info->isRunning;
8172 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8173 if ( info->doRealtime ) {
8174 pthread_t tID = pthread_self(); // ID of this thread
8175 sched_param prio = { info->priority }; // scheduling priority of thread
8176 pthread_setschedparam( tID, SCHED_RR, &prio );
8180 while ( *isRunning == true ) {
8181 pthread_testcancel();
8182 object->callbackEvent();
8185 pthread_exit( NULL );
8188 //******************** End of __LINUX_ALSA__ *********************//
8191 #if defined(__LINUX_PULSE__)
8193 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8194 // and Tristan Matthews.
8196 #include <pulse/error.h>
8197 #include <pulse/simple.h>
8200 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8201 44100, 48000, 96000, 0};
8203 struct rtaudio_pa_format_mapping_t {
8204 RtAudioFormat rtaudio_format;
8205 pa_sample_format_t pa_format;
8208 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8209 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8210 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8211 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8212 {0, PA_SAMPLE_INVALID}};
8214 struct PulseAudioHandle {
8218 pthread_cond_t runnable_cv;
8220 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8223 RtApiPulse::~RtApiPulse()
8225 if ( stream_.state != STREAM_CLOSED )
8229 unsigned int RtApiPulse::getDeviceCount( void )
8234 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8236 RtAudio::DeviceInfo info;
8238 info.name = "PulseAudio";
8239 info.outputChannels = 2;
8240 info.inputChannels = 2;
8241 info.duplexChannels = 2;
8242 info.isDefaultOutput = true;
8243 info.isDefaultInput = true;
8245 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8246 info.sampleRates.push_back( *sr );
8248 info.preferredSampleRate = 48000;
8249 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8254 static void *pulseaudio_callback( void * user )
8256 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8257 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8258 volatile bool *isRunning = &cbi->isRunning;
8260 while ( *isRunning ) {
8261 pthread_testcancel();
8262 context->callbackEvent();
8265 pthread_exit( NULL );
8268 void RtApiPulse::closeStream( void )
8270 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8272 stream_.callbackInfo.isRunning = false;
8274 MUTEX_LOCK( &stream_.mutex );
8275 if ( stream_.state == STREAM_STOPPED ) {
8276 pah->runnable = true;
8277 pthread_cond_signal( &pah->runnable_cv );
8279 MUTEX_UNLOCK( &stream_.mutex );
8281 pthread_join( pah->thread, 0 );
8282 if ( pah->s_play ) {
8283 pa_simple_flush( pah->s_play, NULL );
8284 pa_simple_free( pah->s_play );
8287 pa_simple_free( pah->s_rec );
8289 pthread_cond_destroy( &pah->runnable_cv );
8291 stream_.apiHandle = 0;
8294 if ( stream_.userBuffer[0] ) {
8295 free( stream_.userBuffer[0] );
8296 stream_.userBuffer[0] = 0;
8298 if ( stream_.userBuffer[1] ) {
8299 free( stream_.userBuffer[1] );
8300 stream_.userBuffer[1] = 0;
8303 stream_.state = STREAM_CLOSED;
8304 stream_.mode = UNINITIALIZED;
8307 void RtApiPulse::callbackEvent( void )
8309 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8311 if ( stream_.state == STREAM_STOPPED ) {
8312 MUTEX_LOCK( &stream_.mutex );
8313 while ( !pah->runnable )
8314 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8316 if ( stream_.state != STREAM_RUNNING ) {
8317 MUTEX_UNLOCK( &stream_.mutex );
8320 MUTEX_UNLOCK( &stream_.mutex );
8323 if ( stream_.state == STREAM_CLOSED ) {
8324 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8325 "this shouldn't happen!";
8326 error( RtAudioError::WARNING );
8330 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8331 double streamTime = getStreamTime();
8332 RtAudioStreamStatus status = 0;
8333 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8334 stream_.bufferSize, streamTime, status,
8335 stream_.callbackInfo.userData );
8337 if ( doStopStream == 2 ) {
8342 MUTEX_LOCK( &stream_.mutex );
8343 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8344 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8346 if ( stream_.state != STREAM_RUNNING )
8351 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8352 if ( stream_.doConvertBuffer[OUTPUT] ) {
8353 convertBuffer( stream_.deviceBuffer,
8354 stream_.userBuffer[OUTPUT],
8355 stream_.convertInfo[OUTPUT] );
8356 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8357 formatBytes( stream_.deviceFormat[OUTPUT] );
8359 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8360 formatBytes( stream_.userFormat );
8362 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8363 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8364 pa_strerror( pa_error ) << ".";
8365 errorText_ = errorStream_.str();
8366 error( RtAudioError::WARNING );
8370 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8371 if ( stream_.doConvertBuffer[INPUT] )
8372 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8373 formatBytes( stream_.deviceFormat[INPUT] );
8375 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8376 formatBytes( stream_.userFormat );
8378 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8379 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8380 pa_strerror( pa_error ) << ".";
8381 errorText_ = errorStream_.str();
8382 error( RtAudioError::WARNING );
8384 if ( stream_.doConvertBuffer[INPUT] ) {
8385 convertBuffer( stream_.userBuffer[INPUT],
8386 stream_.deviceBuffer,
8387 stream_.convertInfo[INPUT] );
8392 MUTEX_UNLOCK( &stream_.mutex );
8393 RtApi::tickStreamTime();
8395 if ( doStopStream == 1 )
8399 void RtApiPulse::startStream( void )
8401 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8403 if ( stream_.state == STREAM_CLOSED ) {
8404 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8405 error( RtAudioError::INVALID_USE );
8408 if ( stream_.state == STREAM_RUNNING ) {
8409 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8410 error( RtAudioError::WARNING );
8414 MUTEX_LOCK( &stream_.mutex );
8416 stream_.state = STREAM_RUNNING;
8418 pah->runnable = true;
8419 pthread_cond_signal( &pah->runnable_cv );
8420 MUTEX_UNLOCK( &stream_.mutex );
8423 void RtApiPulse::stopStream( void )
8425 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8427 if ( stream_.state == STREAM_CLOSED ) {
8428 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8429 error( RtAudioError::INVALID_USE );
8432 if ( stream_.state == STREAM_STOPPED ) {
8433 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8434 error( RtAudioError::WARNING );
8438 stream_.state = STREAM_STOPPED;
8439 MUTEX_LOCK( &stream_.mutex );
8441 if ( pah && pah->s_play ) {
8443 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8444 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8445 pa_strerror( pa_error ) << ".";
8446 errorText_ = errorStream_.str();
8447 MUTEX_UNLOCK( &stream_.mutex );
8448 error( RtAudioError::SYSTEM_ERROR );
8453 stream_.state = STREAM_STOPPED;
8454 MUTEX_UNLOCK( &stream_.mutex );
8457 void RtApiPulse::abortStream( void )
8459 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8461 if ( stream_.state == STREAM_CLOSED ) {
8462 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8463 error( RtAudioError::INVALID_USE );
8466 if ( stream_.state == STREAM_STOPPED ) {
8467 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8468 error( RtAudioError::WARNING );
8472 stream_.state = STREAM_STOPPED;
8473 MUTEX_LOCK( &stream_.mutex );
8475 if ( pah && pah->s_play ) {
8477 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8478 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8479 pa_strerror( pa_error ) << ".";
8480 errorText_ = errorStream_.str();
8481 MUTEX_UNLOCK( &stream_.mutex );
8482 error( RtAudioError::SYSTEM_ERROR );
8487 stream_.state = STREAM_STOPPED;
8488 MUTEX_UNLOCK( &stream_.mutex );
8491 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8492 unsigned int channels, unsigned int firstChannel,
8493 unsigned int sampleRate, RtAudioFormat format,
8494 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8496 PulseAudioHandle *pah = 0;
8497 unsigned long bufferBytes = 0;
8500 if ( device != 0 ) return false;
8501 if ( mode != INPUT && mode != OUTPUT ) return false;
8502 if ( channels != 1 && channels != 2 ) {
8503 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8506 ss.channels = channels;
8508 if ( firstChannel != 0 ) return false;
8510 bool sr_found = false;
8511 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8512 if ( sampleRate == *sr ) {
8514 stream_.sampleRate = sampleRate;
8515 ss.rate = sampleRate;
8520 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8525 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8526 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8527 if ( format == sf->rtaudio_format ) {
8529 stream_.userFormat = sf->rtaudio_format;
8530 stream_.deviceFormat[mode] = stream_.userFormat;
8531 ss.format = sf->pa_format;
8535 if ( !sf_found ) { // Use internal data format conversion.
8536 stream_.userFormat = format;
8537 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8538 ss.format = PA_SAMPLE_FLOAT32LE;
8541 // Set other stream parameters.
8542 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8543 else stream_.userInterleaved = true;
8544 stream_.deviceInterleaved[mode] = true;
8545 stream_.nBuffers = 1;
8546 stream_.doByteSwap[mode] = false;
8547 stream_.nUserChannels[mode] = channels;
8548 stream_.nDeviceChannels[mode] = channels + firstChannel;
8549 stream_.channelOffset[mode] = 0;
8550 std::string streamName = "RtAudio";
8552 // Set flags for buffer conversion.
8553 stream_.doConvertBuffer[mode] = false;
8554 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8555 stream_.doConvertBuffer[mode] = true;
8556 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8557 stream_.doConvertBuffer[mode] = true;
8559 // Allocate necessary internal buffers.
8560 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8561 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8562 if ( stream_.userBuffer[mode] == NULL ) {
8563 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8566 stream_.bufferSize = *bufferSize;
8568 if ( stream_.doConvertBuffer[mode] ) {
8570 bool makeBuffer = true;
8571 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8572 if ( mode == INPUT ) {
8573 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8574 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8575 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8580 bufferBytes *= *bufferSize;
8581 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8582 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8583 if ( stream_.deviceBuffer == NULL ) {
8584 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8590 stream_.device[mode] = device;
8592 // Setup the buffer conversion information structure.
8593 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8595 if ( !stream_.apiHandle ) {
8596 PulseAudioHandle *pah = new PulseAudioHandle;
8598 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8602 stream_.apiHandle = pah;
8603 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8604 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8608 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8611 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8614 pa_buffer_attr buffer_attr;
8615 buffer_attr.fragsize = bufferBytes;
8616 buffer_attr.maxlength = -1;
8618 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8619 if ( !pah->s_rec ) {
8620 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8625 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8626 if ( !pah->s_play ) {
8627 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8635 if ( stream_.mode == UNINITIALIZED )
8636 stream_.mode = mode;
8637 else if ( stream_.mode == mode )
8640 stream_.mode = DUPLEX;
8642 if ( !stream_.callbackInfo.isRunning ) {
8643 stream_.callbackInfo.object = this;
8644 stream_.callbackInfo.isRunning = true;
8645 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8646 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8651 stream_.state = STREAM_STOPPED;
8655 if ( pah && stream_.callbackInfo.isRunning ) {
8656 pthread_cond_destroy( &pah->runnable_cv );
8658 stream_.apiHandle = 0;
8661 for ( int i=0; i<2; i++ ) {
8662 if ( stream_.userBuffer[i] ) {
8663 free( stream_.userBuffer[i] );
8664 stream_.userBuffer[i] = 0;
8668 if ( stream_.deviceBuffer ) {
8669 free( stream_.deviceBuffer );
8670 stream_.deviceBuffer = 0;
8676 //******************** End of __LINUX_PULSE__ *********************//
8679 #if defined(__LINUX_OSS__)
8682 #include <sys/ioctl.h>
8685 #include <sys/soundcard.h>
8689 static void *ossCallbackHandler(void * ptr);
8691 // A structure to hold various information related to the OSS API
8694 int id[2]; // device ids
8697 pthread_cond_t runnable;
8700 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8703 RtApiOss :: RtApiOss()
8705 // Nothing to do here.
8708 RtApiOss :: ~RtApiOss()
8710 if ( stream_.state != STREAM_CLOSED ) closeStream();
8713 unsigned int RtApiOss :: getDeviceCount( void )
8715 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8716 if ( mixerfd == -1 ) {
8717 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8718 error( RtAudioError::WARNING );
8722 oss_sysinfo sysinfo;
8723 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8725 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8726 error( RtAudioError::WARNING );
8731 return sysinfo.numaudios;
8734 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8736 RtAudio::DeviceInfo info;
8737 info.probed = false;
8739 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8740 if ( mixerfd == -1 ) {
8741 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8742 error( RtAudioError::WARNING );
8746 oss_sysinfo sysinfo;
8747 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8748 if ( result == -1 ) {
8750 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8751 error( RtAudioError::WARNING );
8755 unsigned nDevices = sysinfo.numaudios;
8756 if ( nDevices == 0 ) {
8758 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8759 error( RtAudioError::INVALID_USE );
8763 if ( device >= nDevices ) {
8765 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8766 error( RtAudioError::INVALID_USE );
8770 oss_audioinfo ainfo;
8772 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8774 if ( result == -1 ) {
8775 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8776 errorText_ = errorStream_.str();
8777 error( RtAudioError::WARNING );
8782 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8783 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8784 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8785 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8786 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8789 // Probe data formats ... do for input
8790 unsigned long mask = ainfo.iformats;
8791 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8792 info.nativeFormats |= RTAUDIO_SINT16;
8793 if ( mask & AFMT_S8 )
8794 info.nativeFormats |= RTAUDIO_SINT8;
8795 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8796 info.nativeFormats |= RTAUDIO_SINT32;
8798 if ( mask & AFMT_FLOAT )
8799 info.nativeFormats |= RTAUDIO_FLOAT32;
8801 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8802 info.nativeFormats |= RTAUDIO_SINT24;
8804 // Check that we have at least one supported format
8805 if ( info.nativeFormats == 0 ) {
8806 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8807 errorText_ = errorStream_.str();
8808 error( RtAudioError::WARNING );
8812 // Probe the supported sample rates.
8813 info.sampleRates.clear();
8814 if ( ainfo.nrates ) {
8815 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8816 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8817 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8818 info.sampleRates.push_back( SAMPLE_RATES[k] );
8820 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8821 info.preferredSampleRate = SAMPLE_RATES[k];
8829 // Check min and max rate values;
8830 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8831 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8832 info.sampleRates.push_back( SAMPLE_RATES[k] );
8834 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8835 info.preferredSampleRate = SAMPLE_RATES[k];
8840 if ( info.sampleRates.size() == 0 ) {
8841 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8842 errorText_ = errorStream_.str();
8843 error( RtAudioError::WARNING );
8847 info.name = ainfo.name;
8854 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8855 unsigned int firstChannel, unsigned int sampleRate,
8856 RtAudioFormat format, unsigned int *bufferSize,
8857 RtAudio::StreamOptions *options )
8859 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8860 if ( mixerfd == -1 ) {
8861 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8865 oss_sysinfo sysinfo;
8866 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8867 if ( result == -1 ) {
8869 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8873 unsigned nDevices = sysinfo.numaudios;
8874 if ( nDevices == 0 ) {
8875 // This should not happen because a check is made before this function is called.
8877 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8881 if ( device >= nDevices ) {
8882 // This should not happen because a check is made before this function is called.
8884 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8888 oss_audioinfo ainfo;
8890 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8892 if ( result == -1 ) {
8893 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8894 errorText_ = errorStream_.str();
8898 // Check if device supports input or output
8899 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8900 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8901 if ( mode == OUTPUT )
8902 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8904 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8905 errorText_ = errorStream_.str();
8910 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8911 if ( mode == OUTPUT )
8913 else { // mode == INPUT
8914 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8915 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8916 close( handle->id[0] );
8918 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8919 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8920 errorText_ = errorStream_.str();
8923 // Check that the number previously set channels is the same.
8924 if ( stream_.nUserChannels[0] != channels ) {
8925 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8926 errorText_ = errorStream_.str();
8935 // Set exclusive access if specified.
8936 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8938 // Try to open the device.
8940 fd = open( ainfo.devnode, flags, 0 );
8942 if ( errno == EBUSY )
8943 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8945 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8946 errorText_ = errorStream_.str();
8950 // For duplex operation, specifically set this mode (this doesn't seem to work).
8952 if ( flags | O_RDWR ) {
8953 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8954 if ( result == -1) {
8955 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8956 errorText_ = errorStream_.str();
8962 // Check the device channel support.
8963 stream_.nUserChannels[mode] = channels;
8964 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8966 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8967 errorText_ = errorStream_.str();
8971 // Set the number of channels.
8972 int deviceChannels = channels + firstChannel;
8973 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8974 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8976 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8977 errorText_ = errorStream_.str();
8980 stream_.nDeviceChannels[mode] = deviceChannels;
8982 // Get the data format mask
8984 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8985 if ( result == -1 ) {
8987 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8988 errorText_ = errorStream_.str();
8992 // Determine how to set the device format.
8993 stream_.userFormat = format;
8994 int deviceFormat = -1;
8995 stream_.doByteSwap[mode] = false;
8996 if ( format == RTAUDIO_SINT8 ) {
8997 if ( mask & AFMT_S8 ) {
8998 deviceFormat = AFMT_S8;
8999 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9002 else if ( format == RTAUDIO_SINT16 ) {
9003 if ( mask & AFMT_S16_NE ) {
9004 deviceFormat = AFMT_S16_NE;
9005 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9007 else if ( mask & AFMT_S16_OE ) {
9008 deviceFormat = AFMT_S16_OE;
9009 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9010 stream_.doByteSwap[mode] = true;
9013 else if ( format == RTAUDIO_SINT24 ) {
9014 if ( mask & AFMT_S24_NE ) {
9015 deviceFormat = AFMT_S24_NE;
9016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9018 else if ( mask & AFMT_S24_OE ) {
9019 deviceFormat = AFMT_S24_OE;
9020 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9021 stream_.doByteSwap[mode] = true;
9024 else if ( format == RTAUDIO_SINT32 ) {
9025 if ( mask & AFMT_S32_NE ) {
9026 deviceFormat = AFMT_S32_NE;
9027 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9029 else if ( mask & AFMT_S32_OE ) {
9030 deviceFormat = AFMT_S32_OE;
9031 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9032 stream_.doByteSwap[mode] = true;
9036 if ( deviceFormat == -1 ) {
9037 // The user requested format is not natively supported by the device.
9038 if ( mask & AFMT_S16_NE ) {
9039 deviceFormat = AFMT_S16_NE;
9040 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9042 else if ( mask & AFMT_S32_NE ) {
9043 deviceFormat = AFMT_S32_NE;
9044 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9046 else if ( mask & AFMT_S24_NE ) {
9047 deviceFormat = AFMT_S24_NE;
9048 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9050 else if ( mask & AFMT_S16_OE ) {
9051 deviceFormat = AFMT_S16_OE;
9052 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9053 stream_.doByteSwap[mode] = true;
9055 else if ( mask & AFMT_S32_OE ) {
9056 deviceFormat = AFMT_S32_OE;
9057 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9058 stream_.doByteSwap[mode] = true;
9060 else if ( mask & AFMT_S24_OE ) {
9061 deviceFormat = AFMT_S24_OE;
9062 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9063 stream_.doByteSwap[mode] = true;
9065 else if ( mask & AFMT_S8) {
9066 deviceFormat = AFMT_S8;
9067 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9071 if ( stream_.deviceFormat[mode] == 0 ) {
9072 // This really shouldn't happen ...
9074 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9075 errorText_ = errorStream_.str();
9079 // Set the data format.
9080 int temp = deviceFormat;
9081 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9082 if ( result == -1 || deviceFormat != temp ) {
9084 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9085 errorText_ = errorStream_.str();
9089 // Attempt to set the buffer size. According to OSS, the minimum
9090 // number of buffers is two. The supposed minimum buffer size is 16
9091 // bytes, so that will be our lower bound. The argument to this
9092 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9093 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9094 // We'll check the actual value used near the end of the setup
9096 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9097 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9099 if ( options ) buffers = options->numberOfBuffers;
9100 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9101 if ( buffers < 2 ) buffers = 3;
9102 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9103 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9104 if ( result == -1 ) {
9106 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9107 errorText_ = errorStream_.str();
9110 stream_.nBuffers = buffers;
9112 // Save buffer size (in sample frames).
9113 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9114 stream_.bufferSize = *bufferSize;
9116 // Set the sample rate.
9117 int srate = sampleRate;
9118 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9119 if ( result == -1 ) {
9121 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9122 errorText_ = errorStream_.str();
9126 // Verify the sample rate setup worked.
9127 if ( abs( srate - (int)sampleRate ) > 100 ) {
9129 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9130 errorText_ = errorStream_.str();
9133 stream_.sampleRate = sampleRate;
9135 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9136 // We're doing duplex setup here.
9137 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9138 stream_.nDeviceChannels[0] = deviceChannels;
9141 // Set interleaving parameters.
9142 stream_.userInterleaved = true;
9143 stream_.deviceInterleaved[mode] = true;
9144 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9145 stream_.userInterleaved = false;
9147 // Set flags for buffer conversion
9148 stream_.doConvertBuffer[mode] = false;
9149 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9150 stream_.doConvertBuffer[mode] = true;
9151 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9152 stream_.doConvertBuffer[mode] = true;
9153 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9154 stream_.nUserChannels[mode] > 1 )
9155 stream_.doConvertBuffer[mode] = true;
9157 // Allocate the stream handles if necessary and then save.
9158 if ( stream_.apiHandle == 0 ) {
9160 handle = new OssHandle;
9162 catch ( std::bad_alloc& ) {
9163 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9167 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9168 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9172 stream_.apiHandle = (void *) handle;
9175 handle = (OssHandle *) stream_.apiHandle;
9177 handle->id[mode] = fd;
9179 // Allocate necessary internal buffers.
9180 unsigned long bufferBytes;
9181 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9182 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9183 if ( stream_.userBuffer[mode] == NULL ) {
9184 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9188 if ( stream_.doConvertBuffer[mode] ) {
9190 bool makeBuffer = true;
9191 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9192 if ( mode == INPUT ) {
9193 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9194 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9195 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9200 bufferBytes *= *bufferSize;
9201 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9202 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9203 if ( stream_.deviceBuffer == NULL ) {
9204 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9210 stream_.device[mode] = device;
9211 stream_.state = STREAM_STOPPED;
9213 // Setup the buffer conversion information structure.
9214 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9216 // Setup thread if necessary.
9217 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9218 // We had already set up an output stream.
9219 stream_.mode = DUPLEX;
9220 if ( stream_.device[0] == device ) handle->id[0] = fd;
9223 stream_.mode = mode;
9225 // Setup callback thread.
9226 stream_.callbackInfo.object = (void *) this;
9228 // Set the thread attributes for joinable and realtime scheduling
9229 // priority. The higher priority will only take affect if the
9230 // program is run as root or suid.
9231 pthread_attr_t attr;
9232 pthread_attr_init( &attr );
9233 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9234 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9235 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9236 struct sched_param param;
9237 int priority = options->priority;
9238 int min = sched_get_priority_min( SCHED_RR );
9239 int max = sched_get_priority_max( SCHED_RR );
9240 if ( priority < min ) priority = min;
9241 else if ( priority > max ) priority = max;
9242 param.sched_priority = priority;
9243 pthread_attr_setschedparam( &attr, ¶m );
9244 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9247 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9249 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9252 stream_.callbackInfo.isRunning = true;
9253 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9254 pthread_attr_destroy( &attr );
9256 stream_.callbackInfo.isRunning = false;
9257 errorText_ = "RtApiOss::error creating callback thread!";
9266 pthread_cond_destroy( &handle->runnable );
9267 if ( handle->id[0] ) close( handle->id[0] );
9268 if ( handle->id[1] ) close( handle->id[1] );
9270 stream_.apiHandle = 0;
9273 for ( int i=0; i<2; i++ ) {
9274 if ( stream_.userBuffer[i] ) {
9275 free( stream_.userBuffer[i] );
9276 stream_.userBuffer[i] = 0;
9280 if ( stream_.deviceBuffer ) {
9281 free( stream_.deviceBuffer );
9282 stream_.deviceBuffer = 0;
9288 void RtApiOss :: closeStream()
9290 if ( stream_.state == STREAM_CLOSED ) {
9291 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9292 error( RtAudioError::WARNING );
9296 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9297 stream_.callbackInfo.isRunning = false;
9298 MUTEX_LOCK( &stream_.mutex );
9299 if ( stream_.state == STREAM_STOPPED )
9300 pthread_cond_signal( &handle->runnable );
9301 MUTEX_UNLOCK( &stream_.mutex );
9302 pthread_join( stream_.callbackInfo.thread, NULL );
9304 if ( stream_.state == STREAM_RUNNING ) {
9305 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9306 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9308 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9309 stream_.state = STREAM_STOPPED;
9313 pthread_cond_destroy( &handle->runnable );
9314 if ( handle->id[0] ) close( handle->id[0] );
9315 if ( handle->id[1] ) close( handle->id[1] );
9317 stream_.apiHandle = 0;
9320 for ( int i=0; i<2; i++ ) {
9321 if ( stream_.userBuffer[i] ) {
9322 free( stream_.userBuffer[i] );
9323 stream_.userBuffer[i] = 0;
9327 if ( stream_.deviceBuffer ) {
9328 free( stream_.deviceBuffer );
9329 stream_.deviceBuffer = 0;
9332 stream_.mode = UNINITIALIZED;
9333 stream_.state = STREAM_CLOSED;
9336 void RtApiOss :: startStream()
9339 if ( stream_.state == STREAM_RUNNING ) {
9340 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9341 error( RtAudioError::WARNING );
9345 MUTEX_LOCK( &stream_.mutex );
9347 stream_.state = STREAM_RUNNING;
9349 // No need to do anything else here ... OSS automatically starts
9350 // when fed samples.
9352 MUTEX_UNLOCK( &stream_.mutex );
9354 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9355 pthread_cond_signal( &handle->runnable );
9358 void RtApiOss :: stopStream()
9361 if ( stream_.state == STREAM_STOPPED ) {
9362 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9363 error( RtAudioError::WARNING );
9367 MUTEX_LOCK( &stream_.mutex );
9369 // The state might change while waiting on a mutex.
9370 if ( stream_.state == STREAM_STOPPED ) {
9371 MUTEX_UNLOCK( &stream_.mutex );
9376 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9377 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9379 // Flush the output with zeros a few times.
9382 RtAudioFormat format;
9384 if ( stream_.doConvertBuffer[0] ) {
9385 buffer = stream_.deviceBuffer;
9386 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9387 format = stream_.deviceFormat[0];
9390 buffer = stream_.userBuffer[0];
9391 samples = stream_.bufferSize * stream_.nUserChannels[0];
9392 format = stream_.userFormat;
9395 memset( buffer, 0, samples * formatBytes(format) );
9396 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9397 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9398 if ( result == -1 ) {
9399 errorText_ = "RtApiOss::stopStream: audio write error.";
9400 error( RtAudioError::WARNING );
9404 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9405 if ( result == -1 ) {
9406 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9407 errorText_ = errorStream_.str();
9410 handle->triggered = false;
9413 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9414 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9415 if ( result == -1 ) {
9416 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9417 errorText_ = errorStream_.str();
9423 stream_.state = STREAM_STOPPED;
9424 MUTEX_UNLOCK( &stream_.mutex );
9426 if ( result != -1 ) return;
9427 error( RtAudioError::SYSTEM_ERROR );
9430 void RtApiOss :: abortStream()
9433 if ( stream_.state == STREAM_STOPPED ) {
9434 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9435 error( RtAudioError::WARNING );
9439 MUTEX_LOCK( &stream_.mutex );
9441 // The state might change while waiting on a mutex.
9442 if ( stream_.state == STREAM_STOPPED ) {
9443 MUTEX_UNLOCK( &stream_.mutex );
9448 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9449 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9450 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9451 if ( result == -1 ) {
9452 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9453 errorText_ = errorStream_.str();
9456 handle->triggered = false;
9459 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9460 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9461 if ( result == -1 ) {
9462 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9463 errorText_ = errorStream_.str();
9469 stream_.state = STREAM_STOPPED;
9470 MUTEX_UNLOCK( &stream_.mutex );
9472 if ( result != -1 ) return;
9473 error( RtAudioError::SYSTEM_ERROR );
9476 void RtApiOss :: callbackEvent()
9478 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9479 if ( stream_.state == STREAM_STOPPED ) {
9480 MUTEX_LOCK( &stream_.mutex );
9481 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9482 if ( stream_.state != STREAM_RUNNING ) {
9483 MUTEX_UNLOCK( &stream_.mutex );
9486 MUTEX_UNLOCK( &stream_.mutex );
9489 if ( stream_.state == STREAM_CLOSED ) {
9490 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9491 error( RtAudioError::WARNING );
9495 // Invoke user callback to get fresh output data.
9496 int doStopStream = 0;
9497 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9498 double streamTime = getStreamTime();
9499 RtAudioStreamStatus status = 0;
9500 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9501 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9502 handle->xrun[0] = false;
9504 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9505 status |= RTAUDIO_INPUT_OVERFLOW;
9506 handle->xrun[1] = false;
9508 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9509 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9510 if ( doStopStream == 2 ) {
9511 this->abortStream();
9515 MUTEX_LOCK( &stream_.mutex );
9517 // The state might change while waiting on a mutex.
9518 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9523 RtAudioFormat format;
9525 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9527 // Setup parameters and do buffer conversion if necessary.
9528 if ( stream_.doConvertBuffer[0] ) {
9529 buffer = stream_.deviceBuffer;
9530 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9531 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9532 format = stream_.deviceFormat[0];
9535 buffer = stream_.userBuffer[0];
9536 samples = stream_.bufferSize * stream_.nUserChannels[0];
9537 format = stream_.userFormat;
9540 // Do byte swapping if necessary.
9541 if ( stream_.doByteSwap[0] )
9542 byteSwapBuffer( buffer, samples, format );
9544 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9546 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9547 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9548 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9549 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9550 handle->triggered = true;
9553 // Write samples to device.
9554 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9556 if ( result == -1 ) {
9557 // We'll assume this is an underrun, though there isn't a
9558 // specific means for determining that.
9559 handle->xrun[0] = true;
9560 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9561 error( RtAudioError::WARNING );
9562 // Continue on to input section.
9566 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9568 // Setup parameters.
9569 if ( stream_.doConvertBuffer[1] ) {
9570 buffer = stream_.deviceBuffer;
9571 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9572 format = stream_.deviceFormat[1];
9575 buffer = stream_.userBuffer[1];
9576 samples = stream_.bufferSize * stream_.nUserChannels[1];
9577 format = stream_.userFormat;
9580 // Read samples from device.
9581 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9583 if ( result == -1 ) {
9584 // We'll assume this is an overrun, though there isn't a
9585 // specific means for determining that.
9586 handle->xrun[1] = true;
9587 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9588 error( RtAudioError::WARNING );
9592 // Do byte swapping if necessary.
9593 if ( stream_.doByteSwap[1] )
9594 byteSwapBuffer( buffer, samples, format );
9596 // Do buffer conversion if necessary.
9597 if ( stream_.doConvertBuffer[1] )
9598 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9602 MUTEX_UNLOCK( &stream_.mutex );
9604 RtApi::tickStreamTime();
9605 if ( doStopStream == 1 ) this->stopStream();
9608 static void *ossCallbackHandler( void *ptr )
9610 CallbackInfo *info = (CallbackInfo *) ptr;
9611 RtApiOss *object = (RtApiOss *) info->object;
9612 bool *isRunning = &info->isRunning;
9614 while ( *isRunning == true ) {
9615 pthread_testcancel();
9616 object->callbackEvent();
9619 pthread_exit( NULL );
9622 //******************** End of __LINUX_OSS__ *********************//
9626 // *************************************************** //
9628 // Protected common (OS-independent) RtAudio methods.
9630 // *************************************************** //
9632 // This method can be modified to control the behavior of error
9633 // message printing.
9634 void RtApi :: error( RtAudioError::Type type )
9636 errorStream_.str(""); // clear the ostringstream
9638 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9639 if ( errorCallback ) {
9640 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9642 if ( firstErrorOccurred_ )
9645 firstErrorOccurred_ = true;
9646 const std::string errorMessage = errorText_;
9648 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9649 stream_.callbackInfo.isRunning = false; // exit from the thread
9653 errorCallback( type, errorMessage );
9654 firstErrorOccurred_ = false;
9658 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9659 std::cerr << '\n' << errorText_ << "\n\n";
9660 else if ( type != RtAudioError::WARNING )
9661 throw( RtAudioError( errorText_, type ) );
9664 void RtApi :: verifyStream()
9666 if ( stream_.state == STREAM_CLOSED ) {
9667 errorText_ = "RtApi:: a stream is not open!";
9668 error( RtAudioError::INVALID_USE );
9672 void RtApi :: clearStreamInfo()
9674 stream_.mode = UNINITIALIZED;
9675 stream_.state = STREAM_CLOSED;
9676 stream_.sampleRate = 0;
9677 stream_.bufferSize = 0;
9678 stream_.nBuffers = 0;
9679 stream_.userFormat = 0;
9680 stream_.userInterleaved = true;
9681 stream_.streamTime = 0.0;
9682 stream_.apiHandle = 0;
9683 stream_.deviceBuffer = 0;
9684 stream_.callbackInfo.callback = 0;
9685 stream_.callbackInfo.userData = 0;
9686 stream_.callbackInfo.isRunning = false;
9687 stream_.callbackInfo.errorCallback = 0;
9688 for ( int i=0; i<2; i++ ) {
9689 stream_.device[i] = 11111;
9690 stream_.doConvertBuffer[i] = false;
9691 stream_.deviceInterleaved[i] = true;
9692 stream_.doByteSwap[i] = false;
9693 stream_.nUserChannels[i] = 0;
9694 stream_.nDeviceChannels[i] = 0;
9695 stream_.channelOffset[i] = 0;
9696 stream_.deviceFormat[i] = 0;
9697 stream_.latency[i] = 0;
9698 stream_.userBuffer[i] = 0;
9699 stream_.convertInfo[i].channels = 0;
9700 stream_.convertInfo[i].inJump = 0;
9701 stream_.convertInfo[i].outJump = 0;
9702 stream_.convertInfo[i].inFormat = 0;
9703 stream_.convertInfo[i].outFormat = 0;
9704 stream_.convertInfo[i].inOffset.clear();
9705 stream_.convertInfo[i].outOffset.clear();
9709 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9711 if ( format == RTAUDIO_SINT16 )
9713 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9715 else if ( format == RTAUDIO_FLOAT64 )
9717 else if ( format == RTAUDIO_SINT24 )
9719 else if ( format == RTAUDIO_SINT8 )
9722 errorText_ = "RtApi::formatBytes: undefined format.";
9723 error( RtAudioError::WARNING );
9728 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9730 if ( mode == INPUT ) { // convert device to user buffer
9731 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9732 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9733 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9734 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9736 else { // convert user to device buffer
9737 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9738 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9739 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9740 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9743 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9744 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9746 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9748 // Set up the interleave/deinterleave offsets.
9749 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9750 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9751 ( mode == INPUT && stream_.userInterleaved ) ) {
9752 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9753 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9754 stream_.convertInfo[mode].outOffset.push_back( k );
9755 stream_.convertInfo[mode].inJump = 1;
9759 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9760 stream_.convertInfo[mode].inOffset.push_back( k );
9761 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9762 stream_.convertInfo[mode].outJump = 1;
9766 else { // no (de)interleaving
9767 if ( stream_.userInterleaved ) {
9768 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9769 stream_.convertInfo[mode].inOffset.push_back( k );
9770 stream_.convertInfo[mode].outOffset.push_back( k );
9774 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9775 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9776 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9777 stream_.convertInfo[mode].inJump = 1;
9778 stream_.convertInfo[mode].outJump = 1;
9783 // Add channel offset.
9784 if ( firstChannel > 0 ) {
9785 if ( stream_.deviceInterleaved[mode] ) {
9786 if ( mode == OUTPUT ) {
9787 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9788 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9791 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9792 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9796 if ( mode == OUTPUT ) {
9797 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9798 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9801 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9802 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9808 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9810 // This function does format conversion, input/output channel compensation, and
9811 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9812 // the lower three bytes of a 32-bit integer.
9814 // Clear our device buffer when in/out duplex device channels are different
9815 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9816 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9817 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9820 if (info.outFormat == RTAUDIO_FLOAT64) {
9822 Float64 *out = (Float64 *)outBuffer;
9824 if (info.inFormat == RTAUDIO_SINT8) {
9825 signed char *in = (signed char *)inBuffer;
9826 scale = 1.0 / 127.5;
9827 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9828 for (j=0; j<info.channels; j++) {
9829 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9830 out[info.outOffset[j]] += 0.5;
9831 out[info.outOffset[j]] *= scale;
9834 out += info.outJump;
9837 else if (info.inFormat == RTAUDIO_SINT16) {
9838 Int16 *in = (Int16 *)inBuffer;
9839 scale = 1.0 / 32767.5;
9840 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9841 for (j=0; j<info.channels; j++) {
9842 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9843 out[info.outOffset[j]] += 0.5;
9844 out[info.outOffset[j]] *= scale;
9847 out += info.outJump;
9850 else if (info.inFormat == RTAUDIO_SINT24) {
9851 Int24 *in = (Int24 *)inBuffer;
9852 scale = 1.0 / 8388607.5;
9853 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9854 for (j=0; j<info.channels; j++) {
9855 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9856 out[info.outOffset[j]] += 0.5;
9857 out[info.outOffset[j]] *= scale;
9860 out += info.outJump;
9863 else if (info.inFormat == RTAUDIO_SINT32) {
9864 Int32 *in = (Int32 *)inBuffer;
9865 scale = 1.0 / 2147483647.5;
9866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9867 for (j=0; j<info.channels; j++) {
9868 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9869 out[info.outOffset[j]] += 0.5;
9870 out[info.outOffset[j]] *= scale;
9873 out += info.outJump;
9876 else if (info.inFormat == RTAUDIO_FLOAT32) {
9877 Float32 *in = (Float32 *)inBuffer;
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9879 for (j=0; j<info.channels; j++) {
9880 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9883 out += info.outJump;
9886 else if (info.inFormat == RTAUDIO_FLOAT64) {
9887 // Channel compensation and/or (de)interleaving only.
9888 Float64 *in = (Float64 *)inBuffer;
9889 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9890 for (j=0; j<info.channels; j++) {
9891 out[info.outOffset[j]] = in[info.inOffset[j]];
9894 out += info.outJump;
9898 else if (info.outFormat == RTAUDIO_FLOAT32) {
9900 Float32 *out = (Float32 *)outBuffer;
9902 if (info.inFormat == RTAUDIO_SINT8) {
9903 signed char *in = (signed char *)inBuffer;
9904 scale = (Float32) ( 1.0 / 127.5 );
9905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9906 for (j=0; j<info.channels; j++) {
9907 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9908 out[info.outOffset[j]] += 0.5;
9909 out[info.outOffset[j]] *= scale;
9912 out += info.outJump;
9915 else if (info.inFormat == RTAUDIO_SINT16) {
9916 Int16 *in = (Int16 *)inBuffer;
9917 scale = (Float32) ( 1.0 / 32767.5 );
9918 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9919 for (j=0; j<info.channels; j++) {
9920 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9921 out[info.outOffset[j]] += 0.5;
9922 out[info.outOffset[j]] *= scale;
9925 out += info.outJump;
9928 else if (info.inFormat == RTAUDIO_SINT24) {
9929 Int24 *in = (Int24 *)inBuffer;
9930 scale = (Float32) ( 1.0 / 8388607.5 );
9931 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9932 for (j=0; j<info.channels; j++) {
9933 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9934 out[info.outOffset[j]] += 0.5;
9935 out[info.outOffset[j]] *= scale;
9938 out += info.outJump;
9941 else if (info.inFormat == RTAUDIO_SINT32) {
9942 Int32 *in = (Int32 *)inBuffer;
9943 scale = (Float32) ( 1.0 / 2147483647.5 );
9944 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9945 for (j=0; j<info.channels; j++) {
9946 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9947 out[info.outOffset[j]] += 0.5;
9948 out[info.outOffset[j]] *= scale;
9951 out += info.outJump;
9954 else if (info.inFormat == RTAUDIO_FLOAT32) {
9955 // Channel compensation and/or (de)interleaving only.
9956 Float32 *in = (Float32 *)inBuffer;
9957 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9958 for (j=0; j<info.channels; j++) {
9959 out[info.outOffset[j]] = in[info.inOffset[j]];
9962 out += info.outJump;
9965 else if (info.inFormat == RTAUDIO_FLOAT64) {
9966 Float64 *in = (Float64 *)inBuffer;
9967 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9968 for (j=0; j<info.channels; j++) {
9969 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9972 out += info.outJump;
9976 else if (info.outFormat == RTAUDIO_SINT32) {
9977 Int32 *out = (Int32 *)outBuffer;
9978 if (info.inFormat == RTAUDIO_SINT8) {
9979 signed char *in = (signed char *)inBuffer;
9980 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9981 for (j=0; j<info.channels; j++) {
9982 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9983 out[info.outOffset[j]] <<= 24;
9986 out += info.outJump;
9989 else if (info.inFormat == RTAUDIO_SINT16) {
9990 Int16 *in = (Int16 *)inBuffer;
9991 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9992 for (j=0; j<info.channels; j++) {
9993 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9994 out[info.outOffset[j]] <<= 16;
9997 out += info.outJump;
10000 else if (info.inFormat == RTAUDIO_SINT24) {
10001 Int24 *in = (Int24 *)inBuffer;
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10003 for (j=0; j<info.channels; j++) {
10004 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10005 out[info.outOffset[j]] <<= 8;
10008 out += info.outJump;
10011 else if (info.inFormat == RTAUDIO_SINT32) {
10012 // Channel compensation and/or (de)interleaving only.
10013 Int32 *in = (Int32 *)inBuffer;
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10015 for (j=0; j<info.channels; j++) {
10016 out[info.outOffset[j]] = in[info.inOffset[j]];
10019 out += info.outJump;
10022 else if (info.inFormat == RTAUDIO_FLOAT32) {
10023 Float32 *in = (Float32 *)inBuffer;
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10025 for (j=0; j<info.channels; j++) {
10026 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10029 out += info.outJump;
10032 else if (info.inFormat == RTAUDIO_FLOAT64) {
10033 Float64 *in = (Float64 *)inBuffer;
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10035 for (j=0; j<info.channels; j++) {
10036 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10039 out += info.outJump;
10043 else if (info.outFormat == RTAUDIO_SINT24) {
10044 Int24 *out = (Int24 *)outBuffer;
10045 if (info.inFormat == RTAUDIO_SINT8) {
10046 signed char *in = (signed char *)inBuffer;
10047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10048 for (j=0; j<info.channels; j++) {
10049 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10050 //out[info.outOffset[j]] <<= 16;
10053 out += info.outJump;
10056 else if (info.inFormat == RTAUDIO_SINT16) {
10057 Int16 *in = (Int16 *)inBuffer;
10058 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10059 for (j=0; j<info.channels; j++) {
10060 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10061 //out[info.outOffset[j]] <<= 8;
10064 out += info.outJump;
10067 else if (info.inFormat == RTAUDIO_SINT24) {
10068 // Channel compensation and/or (de)interleaving only.
10069 Int24 *in = (Int24 *)inBuffer;
10070 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10071 for (j=0; j<info.channels; j++) {
10072 out[info.outOffset[j]] = in[info.inOffset[j]];
10075 out += info.outJump;
10078 else if (info.inFormat == RTAUDIO_SINT32) {
10079 Int32 *in = (Int32 *)inBuffer;
10080 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10081 for (j=0; j<info.channels; j++) {
10082 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10083 //out[info.outOffset[j]] >>= 8;
10086 out += info.outJump;
10089 else if (info.inFormat == RTAUDIO_FLOAT32) {
10090 Float32 *in = (Float32 *)inBuffer;
10091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10092 for (j=0; j<info.channels; j++) {
10093 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10096 out += info.outJump;
10099 else if (info.inFormat == RTAUDIO_FLOAT64) {
10100 Float64 *in = (Float64 *)inBuffer;
10101 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10102 for (j=0; j<info.channels; j++) {
10103 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10106 out += info.outJump;
10110 else if (info.outFormat == RTAUDIO_SINT16) {
10111 Int16 *out = (Int16 *)outBuffer;
10112 if (info.inFormat == RTAUDIO_SINT8) {
10113 signed char *in = (signed char *)inBuffer;
10114 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10115 for (j=0; j<info.channels; j++) {
10116 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10117 out[info.outOffset[j]] <<= 8;
10120 out += info.outJump;
10123 else if (info.inFormat == RTAUDIO_SINT16) {
10124 // Channel compensation and/or (de)interleaving only.
10125 Int16 *in = (Int16 *)inBuffer;
10126 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10127 for (j=0; j<info.channels; j++) {
10128 out[info.outOffset[j]] = in[info.inOffset[j]];
10131 out += info.outJump;
10134 else if (info.inFormat == RTAUDIO_SINT24) {
10135 Int24 *in = (Int24 *)inBuffer;
10136 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10137 for (j=0; j<info.channels; j++) {
10138 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10141 out += info.outJump;
10144 else if (info.inFormat == RTAUDIO_SINT32) {
10145 Int32 *in = (Int32 *)inBuffer;
10146 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10147 for (j=0; j<info.channels; j++) {
10148 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10151 out += info.outJump;
10154 else if (info.inFormat == RTAUDIO_FLOAT32) {
10155 Float32 *in = (Float32 *)inBuffer;
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10157 for (j=0; j<info.channels; j++) {
10158 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10161 out += info.outJump;
10164 else if (info.inFormat == RTAUDIO_FLOAT64) {
10165 Float64 *in = (Float64 *)inBuffer;
10166 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10167 for (j=0; j<info.channels; j++) {
10168 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10171 out += info.outJump;
10175 else if (info.outFormat == RTAUDIO_SINT8) {
10176 signed char *out = (signed char *)outBuffer;
10177 if (info.inFormat == RTAUDIO_SINT8) {
10178 // Channel compensation and/or (de)interleaving only.
10179 signed char *in = (signed char *)inBuffer;
10180 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10181 for (j=0; j<info.channels; j++) {
10182 out[info.outOffset[j]] = in[info.inOffset[j]];
10185 out += info.outJump;
10188 if (info.inFormat == RTAUDIO_SINT16) {
10189 Int16 *in = (Int16 *)inBuffer;
10190 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10191 for (j=0; j<info.channels; j++) {
10192 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10195 out += info.outJump;
10198 else if (info.inFormat == RTAUDIO_SINT24) {
10199 Int24 *in = (Int24 *)inBuffer;
10200 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10201 for (j=0; j<info.channels; j++) {
10202 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10205 out += info.outJump;
10208 else if (info.inFormat == RTAUDIO_SINT32) {
10209 Int32 *in = (Int32 *)inBuffer;
10210 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10211 for (j=0; j<info.channels; j++) {
10212 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10215 out += info.outJump;
10218 else if (info.inFormat == RTAUDIO_FLOAT32) {
10219 Float32 *in = (Float32 *)inBuffer;
10220 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10221 for (j=0; j<info.channels; j++) {
10222 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10225 out += info.outJump;
10228 else if (info.inFormat == RTAUDIO_FLOAT64) {
10229 Float64 *in = (Float64 *)inBuffer;
10230 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10231 for (j=0; j<info.channels; j++) {
10232 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10235 out += info.outJump;
10241 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10242 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10243 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10245 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10251 if ( format == RTAUDIO_SINT16 ) {
10252 for ( unsigned int i=0; i<samples; i++ ) {
10253 // Swap 1st and 2nd bytes.
10258 // Increment 2 bytes.
10262 else if ( format == RTAUDIO_SINT32 ||
10263 format == RTAUDIO_FLOAT32 ) {
10264 for ( unsigned int i=0; i<samples; i++ ) {
10265 // Swap 1st and 4th bytes.
10270 // Swap 2nd and 3rd bytes.
10276 // Increment 3 more bytes.
10280 else if ( format == RTAUDIO_SINT24 ) {
10281 for ( unsigned int i=0; i<samples; i++ ) {
10282 // Swap 1st and 3rd bytes.
10287 // Increment 2 more bytes.
10291 else if ( format == RTAUDIO_FLOAT64 ) {
10292 for ( unsigned int i=0; i<samples; i++ ) {
10293 // Swap 1st and 8th bytes
10298 // Swap 2nd and 7th bytes
10304 // Swap 3rd and 6th bytes
10310 // Swap 4th and 5th bytes
10316 // Increment 5 more bytes.
10322 // Indentation settings for Vim and Emacs
10324 // Local Variables:
10325 // c-basic-offset: 2
10326 // indent-tabs-mode: nil
10329 // vim: et sts=2 sw=2