1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3692 #include <audioclient.h>
3694 #include <mmdeviceapi.h>
3695 #include <functiondiscoverykeys_devpkey.h>
3697 //=============================================================================
3699 #define SAFE_RELEASE( objectPtr )\
3702 objectPtr->Release();\
3706 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3708 //-----------------------------------------------------------------------------
3710 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3711 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3712 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3713 // provide intermediate storage for read / write synchronization.
3727 // sets the length of the internal ring buffer
3728 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3731 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3733 bufferSize_ = bufferSize;
3738 // attempt to push a buffer into the ring buffer at the current "in" index
3739 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3741 if ( !buffer || // incoming buffer is NULL
3742 bufferSize == 0 || // incoming buffer has no data
3743 bufferSize > bufferSize_ ) // incoming buffer too large
3748 unsigned int relOutIndex = outIndex_;
3749 unsigned int inIndexEnd = inIndex_ + bufferSize;
3750 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3751 relOutIndex += bufferSize_;
3754 // "in" index can end on the "out" index but cannot begin at it
3755 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3756 return false; // not enough space between "in" index and "out" index
3759 // copy buffer from external to internal
3760 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3761 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3762 int fromInSize = bufferSize - fromZeroSize;
3767 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3768 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3770 case RTAUDIO_SINT16:
3771 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3772 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3774 case RTAUDIO_SINT24:
3775 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3776 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3778 case RTAUDIO_SINT32:
3779 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3780 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3782 case RTAUDIO_FLOAT32:
3783 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3784 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3786 case RTAUDIO_FLOAT64:
3787 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3788 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3792 // update "in" index
3793 inIndex_ += bufferSize;
3794 inIndex_ %= bufferSize_;
3799 // attempt to pull a buffer from the ring buffer from the current "out" index
3800 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3802 if ( !buffer || // incoming buffer is NULL
3803 bufferSize == 0 || // incoming buffer has no data
3804 bufferSize > bufferSize_ ) // incoming buffer too large
3809 unsigned int relInIndex = inIndex_;
3810 unsigned int outIndexEnd = outIndex_ + bufferSize;
3811 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3812 relInIndex += bufferSize_;
3815 // "out" index can begin at and end on the "in" index
3816 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3817 return false; // not enough space between "out" index and "in" index
3820 // copy buffer from internal to external
3821 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3822 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3823 int fromOutSize = bufferSize - fromZeroSize;
3828 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3829 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3831 case RTAUDIO_SINT16:
3832 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3833 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3835 case RTAUDIO_SINT24:
3836 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3837 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3839 case RTAUDIO_SINT32:
3840 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3841 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3843 case RTAUDIO_FLOAT32:
3844 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3845 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3847 case RTAUDIO_FLOAT64:
3848 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3849 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3853 // update "out" index
3854 outIndex_ += bufferSize;
3855 outIndex_ %= bufferSize_;
3862 unsigned int bufferSize_;
3863 unsigned int inIndex_;
3864 unsigned int outIndex_;
3867 //-----------------------------------------------------------------------------
3869 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3870 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3871 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3872 // This sample rate converter works best with conversions between one rate and its multiple.
3873 void convertBufferWasapi( char* outBuffer,
3874 const char* inBuffer,
3875 const unsigned int& channelCount,
3876 const unsigned int& inSampleRate,
3877 const unsigned int& outSampleRate,
3878 const unsigned int& inSampleCount,
3879 unsigned int& outSampleCount,
3880 const RtAudioFormat& format )
3882 // calculate the new outSampleCount and relative sampleStep
3883 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3884 float sampleRatioInv = ( float ) 1 / sampleRatio;
3885 float sampleStep = 1.0f / sampleRatio;
3886 float inSampleFraction = 0.0f;
3888 // for cmath functions
3889 using namespace std;
3891 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
3893 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
3894 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
3896 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3897 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3899 unsigned int inSample = ( unsigned int ) inSampleFraction;
3904 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3906 case RTAUDIO_SINT16:
3907 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3909 case RTAUDIO_SINT24:
3910 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3912 case RTAUDIO_SINT32:
3913 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3915 case RTAUDIO_FLOAT32:
3916 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3918 case RTAUDIO_FLOAT64:
3919 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3923 // jump to next in sample
3924 inSampleFraction += sampleStep;
3927 else // else interpolate
3929 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3930 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3932 unsigned int inSample = ( unsigned int ) inSampleFraction;
3933 float inSampleDec = inSampleFraction - inSample;
3934 unsigned int frameInSample = inSample * channelCount;
3935 unsigned int frameOutSample = outSample * channelCount;
3941 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3943 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
3944 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
3945 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
3946 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3950 case RTAUDIO_SINT16:
3952 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3954 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
3955 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
3956 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
3957 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3961 case RTAUDIO_SINT24:
3963 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3965 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
3966 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
3967 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3968 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3972 case RTAUDIO_SINT32:
3974 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3976 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
3977 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
3978 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3979 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3983 case RTAUDIO_FLOAT32:
3985 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3987 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
3988 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
3989 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
3990 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3994 case RTAUDIO_FLOAT64:
3996 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3998 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
3999 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
4000 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
4001 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4007 // jump to next in sample
4008 inSampleFraction += sampleStep;
4013 //-----------------------------------------------------------------------------
4015 // A structure to hold various information related to the WASAPI implementation.
4018 IAudioClient* captureAudioClient;
4019 IAudioClient* renderAudioClient;
4020 IAudioCaptureClient* captureClient;
4021 IAudioRenderClient* renderClient;
4022 HANDLE captureEvent;
4026 : captureAudioClient( NULL ),
4027 renderAudioClient( NULL ),
4028 captureClient( NULL ),
4029 renderClient( NULL ),
4030 captureEvent( NULL ),
4031 renderEvent( NULL ) {}
4034 //=============================================================================
4036 RtApiWasapi::RtApiWasapi()
4037 : coInitialized_( false ), deviceEnumerator_( NULL )
4039 // WASAPI can run either apartment or multi-threaded
4040 HRESULT hr = CoInitialize( NULL );
4041 if ( !FAILED( hr ) )
4042 coInitialized_ = true;
4044 // Instantiate device enumerator
4045 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4046 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4047 ( void** ) &deviceEnumerator_ );
4049 if ( FAILED( hr ) ) {
4050 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4051 error( RtAudioError::DRIVER_ERROR );
4055 //-----------------------------------------------------------------------------
4057 RtApiWasapi::~RtApiWasapi()
4059 if ( stream_.state != STREAM_CLOSED )
4062 SAFE_RELEASE( deviceEnumerator_ );
4064 // If this object previously called CoInitialize()
4065 if ( coInitialized_ )
4069 //=============================================================================
4071 unsigned int RtApiWasapi::getDeviceCount( void )
4073 unsigned int captureDeviceCount = 0;
4074 unsigned int renderDeviceCount = 0;
4076 IMMDeviceCollection* captureDevices = NULL;
4077 IMMDeviceCollection* renderDevices = NULL;
4079 // Count capture devices
4081 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4082 if ( FAILED( hr ) ) {
4083 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4087 hr = captureDevices->GetCount( &captureDeviceCount );
4088 if ( FAILED( hr ) ) {
4089 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4093 // Count render devices
4094 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4100 hr = renderDevices->GetCount( &renderDeviceCount );
4101 if ( FAILED( hr ) ) {
4102 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4107 // release all references
4108 SAFE_RELEASE( captureDevices );
4109 SAFE_RELEASE( renderDevices );
4111 if ( errorText_.empty() )
4112 return captureDeviceCount + renderDeviceCount;
4114 error( RtAudioError::DRIVER_ERROR );
4118 //-----------------------------------------------------------------------------
4120 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4122 RtAudio::DeviceInfo info;
4123 unsigned int captureDeviceCount = 0;
4124 unsigned int renderDeviceCount = 0;
4125 std::string defaultDeviceName;
4126 bool isCaptureDevice = false;
4128 PROPVARIANT deviceNameProp;
4129 PROPVARIANT defaultDeviceNameProp;
4131 IMMDeviceCollection* captureDevices = NULL;
4132 IMMDeviceCollection* renderDevices = NULL;
4133 IMMDevice* devicePtr = NULL;
4134 IMMDevice* defaultDevicePtr = NULL;
4135 IAudioClient* audioClient = NULL;
4136 IPropertyStore* devicePropStore = NULL;
4137 IPropertyStore* defaultDevicePropStore = NULL;
4139 WAVEFORMATEX* deviceFormat = NULL;
4140 WAVEFORMATEX* closestMatchFormat = NULL;
4143 info.probed = false;
4145 // Count capture devices
4147 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4148 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4149 if ( FAILED( hr ) ) {
4150 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4154 hr = captureDevices->GetCount( &captureDeviceCount );
4155 if ( FAILED( hr ) ) {
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4160 // Count render devices
4161 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4162 if ( FAILED( hr ) ) {
4163 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4167 hr = renderDevices->GetCount( &renderDeviceCount );
4168 if ( FAILED( hr ) ) {
4169 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4173 // validate device index
4174 if ( device >= captureDeviceCount + renderDeviceCount ) {
4175 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4176 errorType = RtAudioError::INVALID_USE;
4180 // determine whether index falls within capture or render devices
4181 if ( device >= renderDeviceCount ) {
4182 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4183 if ( FAILED( hr ) ) {
4184 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4187 isCaptureDevice = true;
4190 hr = renderDevices->Item( device, &devicePtr );
4191 if ( FAILED( hr ) ) {
4192 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4195 isCaptureDevice = false;
4198 // get default device name
4199 if ( isCaptureDevice ) {
4200 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4201 if ( FAILED( hr ) ) {
4202 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4207 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4208 if ( FAILED( hr ) ) {
4209 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4214 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4215 if ( FAILED( hr ) ) {
4216 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4219 PropVariantInit( &defaultDeviceNameProp );
4221 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4222 if ( FAILED( hr ) ) {
4223 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4227 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4230 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4231 if ( FAILED( hr ) ) {
4232 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4236 PropVariantInit( &deviceNameProp );
4238 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4244 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4247 if ( isCaptureDevice ) {
4248 info.isDefaultInput = info.name == defaultDeviceName;
4249 info.isDefaultOutput = false;
4252 info.isDefaultInput = false;
4253 info.isDefaultOutput = info.name == defaultDeviceName;
4257 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4258 if ( FAILED( hr ) ) {
4259 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4263 hr = audioClient->GetMixFormat( &deviceFormat );
4264 if ( FAILED( hr ) ) {
4265 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4269 if ( isCaptureDevice ) {
4270 info.inputChannels = deviceFormat->nChannels;
4271 info.outputChannels = 0;
4272 info.duplexChannels = 0;
4275 info.inputChannels = 0;
4276 info.outputChannels = deviceFormat->nChannels;
4277 info.duplexChannels = 0;
4281 info.sampleRates.clear();
4283 // allow support for all sample rates as we have a built-in sample rate converter
4284 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4285 info.sampleRates.push_back( SAMPLE_RATES[i] );
4287 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4290 info.nativeFormats = 0;
4292 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4293 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4294 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4296 if ( deviceFormat->wBitsPerSample == 32 ) {
4297 info.nativeFormats |= RTAUDIO_FLOAT32;
4299 else if ( deviceFormat->wBitsPerSample == 64 ) {
4300 info.nativeFormats |= RTAUDIO_FLOAT64;
4303 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4304 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4305 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4307 if ( deviceFormat->wBitsPerSample == 8 ) {
4308 info.nativeFormats |= RTAUDIO_SINT8;
4310 else if ( deviceFormat->wBitsPerSample == 16 ) {
4311 info.nativeFormats |= RTAUDIO_SINT16;
4313 else if ( deviceFormat->wBitsPerSample == 24 ) {
4314 info.nativeFormats |= RTAUDIO_SINT24;
4316 else if ( deviceFormat->wBitsPerSample == 32 ) {
4317 info.nativeFormats |= RTAUDIO_SINT32;
4325 // release all references
4326 PropVariantClear( &deviceNameProp );
4327 PropVariantClear( &defaultDeviceNameProp );
4329 SAFE_RELEASE( captureDevices );
4330 SAFE_RELEASE( renderDevices );
4331 SAFE_RELEASE( devicePtr );
4332 SAFE_RELEASE( defaultDevicePtr );
4333 SAFE_RELEASE( audioClient );
4334 SAFE_RELEASE( devicePropStore );
4335 SAFE_RELEASE( defaultDevicePropStore );
4337 CoTaskMemFree( deviceFormat );
4338 CoTaskMemFree( closestMatchFormat );
4340 if ( !errorText_.empty() )
4345 //-----------------------------------------------------------------------------
4347 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4349 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4350 if ( getDeviceInfo( i ).isDefaultOutput ) {
4358 //-----------------------------------------------------------------------------
4360 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4362 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4363 if ( getDeviceInfo( i ).isDefaultInput ) {
4371 //-----------------------------------------------------------------------------
4373 void RtApiWasapi::closeStream( void )
4375 if ( stream_.state == STREAM_CLOSED ) {
4376 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4377 error( RtAudioError::WARNING );
4381 if ( stream_.state != STREAM_STOPPED )
4384 // clean up stream memory
4385 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4386 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4388 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4389 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4391 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4392 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4394 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4395 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4397 delete ( WasapiHandle* ) stream_.apiHandle;
4398 stream_.apiHandle = NULL;
4400 for ( int i = 0; i < 2; i++ ) {
4401 if ( stream_.userBuffer[i] ) {
4402 free( stream_.userBuffer[i] );
4403 stream_.userBuffer[i] = 0;
4407 if ( stream_.deviceBuffer ) {
4408 free( stream_.deviceBuffer );
4409 stream_.deviceBuffer = 0;
4412 // update stream state
4413 stream_.state = STREAM_CLOSED;
4416 //-----------------------------------------------------------------------------
4418 void RtApiWasapi::startStream( void )
4422 if ( stream_.state == STREAM_RUNNING ) {
4423 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4424 error( RtAudioError::WARNING );
4428 // update stream state
4429 stream_.state = STREAM_RUNNING;
4431 // create WASAPI stream thread
4432 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4434 if ( !stream_.callbackInfo.thread ) {
4435 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4436 error( RtAudioError::THREAD_ERROR );
4439 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4440 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4444 //-----------------------------------------------------------------------------
4446 void RtApiWasapi::stopStream( void )
4450 if ( stream_.state == STREAM_STOPPED ) {
4451 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4452 error( RtAudioError::WARNING );
4456 // inform stream thread by setting stream state to STREAM_STOPPING
4457 stream_.state = STREAM_STOPPING;
4459 // wait until stream thread is stopped
4460 while( stream_.state != STREAM_STOPPED ) {
4464 // Wait for the last buffer to play before stopping.
4465 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4467 // stop capture client if applicable
4468 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4469 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4470 if ( FAILED( hr ) ) {
4471 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4472 error( RtAudioError::DRIVER_ERROR );
4477 // stop render client if applicable
4478 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4479 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4480 if ( FAILED( hr ) ) {
4481 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4482 error( RtAudioError::DRIVER_ERROR );
4487 // close thread handle
4488 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4489 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4490 error( RtAudioError::THREAD_ERROR );
4494 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4497 //-----------------------------------------------------------------------------
4499 void RtApiWasapi::abortStream( void )
4503 if ( stream_.state == STREAM_STOPPED ) {
4504 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4505 error( RtAudioError::WARNING );
4509 // inform stream thread by setting stream state to STREAM_STOPPING
4510 stream_.state = STREAM_STOPPING;
4512 // wait until stream thread is stopped
4513 while ( stream_.state != STREAM_STOPPED ) {
4517 // stop capture client if applicable
4518 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4519 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4520 if ( FAILED( hr ) ) {
4521 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4522 error( RtAudioError::DRIVER_ERROR );
4527 // stop render client if applicable
4528 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4529 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4530 if ( FAILED( hr ) ) {
4531 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4532 error( RtAudioError::DRIVER_ERROR );
4537 // close thread handle
4538 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4539 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4540 error( RtAudioError::THREAD_ERROR );
4544 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4547 //-----------------------------------------------------------------------------
4549 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4550 unsigned int firstChannel, unsigned int sampleRate,
4551 RtAudioFormat format, unsigned int* bufferSize,
4552 RtAudio::StreamOptions* options )
4554 bool methodResult = FAILURE;
4555 unsigned int captureDeviceCount = 0;
4556 unsigned int renderDeviceCount = 0;
4558 IMMDeviceCollection* captureDevices = NULL;
4559 IMMDeviceCollection* renderDevices = NULL;
4560 IMMDevice* devicePtr = NULL;
4561 WAVEFORMATEX* deviceFormat = NULL;
4562 unsigned int bufferBytes;
4563 stream_.state = STREAM_STOPPED;
4565 // create API Handle if not already created
4566 if ( !stream_.apiHandle )
4567 stream_.apiHandle = ( void* ) new WasapiHandle();
4569 // Count capture devices
4571 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4572 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4573 if ( FAILED( hr ) ) {
4574 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4578 hr = captureDevices->GetCount( &captureDeviceCount );
4579 if ( FAILED( hr ) ) {
4580 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4584 // Count render devices
4585 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4586 if ( FAILED( hr ) ) {
4587 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4591 hr = renderDevices->GetCount( &renderDeviceCount );
4592 if ( FAILED( hr ) ) {
4593 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4597 // validate device index
4598 if ( device >= captureDeviceCount + renderDeviceCount ) {
4599 errorType = RtAudioError::INVALID_USE;
4600 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4604 // determine whether index falls within capture or render devices
4605 if ( device >= renderDeviceCount ) {
4606 if ( mode != INPUT ) {
4607 errorType = RtAudioError::INVALID_USE;
4608 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4612 // retrieve captureAudioClient from devicePtr
4613 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4615 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4616 if ( FAILED( hr ) ) {
4617 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4621 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4622 NULL, ( void** ) &captureAudioClient );
4623 if ( FAILED( hr ) ) {
4624 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4628 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4629 if ( FAILED( hr ) ) {
4630 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4634 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4635 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4638 if ( mode != OUTPUT ) {
4639 errorType = RtAudioError::INVALID_USE;
4640 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4644 // retrieve renderAudioClient from devicePtr
4645 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4647 hr = renderDevices->Item( device, &devicePtr );
4648 if ( FAILED( hr ) ) {
4649 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4653 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4654 NULL, ( void** ) &renderAudioClient );
4655 if ( FAILED( hr ) ) {
4656 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4660 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4661 if ( FAILED( hr ) ) {
4662 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4666 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4667 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4671 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4672 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4673 stream_.mode = DUPLEX;
4676 stream_.mode = mode;
4679 stream_.device[mode] = device;
4680 stream_.doByteSwap[mode] = false;
4681 stream_.sampleRate = sampleRate;
4682 stream_.bufferSize = *bufferSize;
4683 stream_.nBuffers = 1;
4684 stream_.nUserChannels[mode] = channels;
4685 stream_.channelOffset[mode] = firstChannel;
4686 stream_.userFormat = format;
4687 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4689 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4690 stream_.userInterleaved = false;
4692 stream_.userInterleaved = true;
4693 stream_.deviceInterleaved[mode] = true;
4695 // Set flags for buffer conversion.
4696 stream_.doConvertBuffer[mode] = false;
4697 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4698 stream_.nUserChannels != stream_.nDeviceChannels )
4699 stream_.doConvertBuffer[mode] = true;
4700 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4701 stream_.nUserChannels[mode] > 1 )
4702 stream_.doConvertBuffer[mode] = true;
4704 if ( stream_.doConvertBuffer[mode] )
4705 setConvertInfo( mode, 0 );
4707 // Allocate necessary internal buffers
4708 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4710 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4711 if ( !stream_.userBuffer[mode] ) {
4712 errorType = RtAudioError::MEMORY_ERROR;
4713 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4717 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4718 stream_.callbackInfo.priority = 15;
4720 stream_.callbackInfo.priority = 0;
4722 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4723 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4725 methodResult = SUCCESS;
4729 SAFE_RELEASE( captureDevices );
4730 SAFE_RELEASE( renderDevices );
4731 SAFE_RELEASE( devicePtr );
4732 CoTaskMemFree( deviceFormat );
4734 // if method failed, close the stream
4735 if ( methodResult == FAILURE )
4738 if ( !errorText_.empty() )
4740 return methodResult;
4743 //=============================================================================
4745 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4748 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4753 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4756 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4761 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4764 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4769 //-----------------------------------------------------------------------------
4771 void RtApiWasapi::wasapiThread()
4773 // as this is a new thread, we must CoInitialize it
4774 CoInitialize( NULL );
4778 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4779 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4780 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4781 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4782 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4783 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4785 WAVEFORMATEX* captureFormat = NULL;
4786 WAVEFORMATEX* renderFormat = NULL;
4787 float captureSrRatio = 0.0f;
4788 float renderSrRatio = 0.0f;
4789 WasapiBuffer captureBuffer;
4790 WasapiBuffer renderBuffer;
4792 // declare local stream variables
4793 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4794 BYTE* streamBuffer = NULL;
4795 unsigned long captureFlags = 0;
4796 unsigned int bufferFrameCount = 0;
4797 unsigned int numFramesPadding = 0;
4798 unsigned int convBufferSize = 0;
4799 bool callbackPushed = false;
4800 bool callbackPulled = false;
4801 bool callbackStopped = false;
4802 int callbackResult = 0;
4804 // convBuffer is used to store converted buffers between WASAPI and the user
4805 char* convBuffer = NULL;
4806 unsigned int convBuffSize = 0;
4807 unsigned int deviceBuffSize = 0;
4810 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4812 // Attempt to assign "Pro Audio" characteristic to thread
4813 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4815 DWORD taskIndex = 0;
4816 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4817 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4818 FreeLibrary( AvrtDll );
4821 // start capture stream if applicable
4822 if ( captureAudioClient ) {
4823 hr = captureAudioClient->GetMixFormat( &captureFormat );
4824 if ( FAILED( hr ) ) {
4825 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4829 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4831 // initialize capture stream according to desire buffer size
4832 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4833 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4835 if ( !captureClient ) {
4836 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4837 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4838 desiredBufferPeriod,
4839 desiredBufferPeriod,
4842 if ( FAILED( hr ) ) {
4843 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4847 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4848 ( void** ) &captureClient );
4849 if ( FAILED( hr ) ) {
4850 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4854 // configure captureEvent to trigger on every available capture buffer
4855 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4856 if ( !captureEvent ) {
4857 errorType = RtAudioError::SYSTEM_ERROR;
4858 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4862 hr = captureAudioClient->SetEventHandle( captureEvent );
4863 if ( FAILED( hr ) ) {
4864 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4868 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4869 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4872 unsigned int inBufferSize = 0;
4873 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4874 if ( FAILED( hr ) ) {
4875 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4879 // scale outBufferSize according to stream->user sample rate ratio
4880 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4881 inBufferSize *= stream_.nDeviceChannels[INPUT];
4883 // set captureBuffer size
4884 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4886 // reset the capture stream
4887 hr = captureAudioClient->Reset();
4888 if ( FAILED( hr ) ) {
4889 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4893 // start the capture stream
4894 hr = captureAudioClient->Start();
4895 if ( FAILED( hr ) ) {
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4901 // start render stream if applicable
4902 if ( renderAudioClient ) {
4903 hr = renderAudioClient->GetMixFormat( &renderFormat );
4904 if ( FAILED( hr ) ) {
4905 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4909 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4911 // initialize render stream according to desire buffer size
4912 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4913 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4915 if ( !renderClient ) {
4916 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4917 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4918 desiredBufferPeriod,
4919 desiredBufferPeriod,
4922 if ( FAILED( hr ) ) {
4923 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4927 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4928 ( void** ) &renderClient );
4929 if ( FAILED( hr ) ) {
4930 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4934 // configure renderEvent to trigger on every available render buffer
4935 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4936 if ( !renderEvent ) {
4937 errorType = RtAudioError::SYSTEM_ERROR;
4938 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4942 hr = renderAudioClient->SetEventHandle( renderEvent );
4943 if ( FAILED( hr ) ) {
4944 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4948 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4949 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4952 unsigned int outBufferSize = 0;
4953 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4954 if ( FAILED( hr ) ) {
4955 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4959 // scale inBufferSize according to user->stream sample rate ratio
4960 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4961 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4963 // set renderBuffer size
4964 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4966 // reset the render stream
4967 hr = renderAudioClient->Reset();
4968 if ( FAILED( hr ) ) {
4969 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4973 // start the render stream
4974 hr = renderAudioClient->Start();
4975 if ( FAILED( hr ) ) {
4976 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4981 if ( stream_.mode == INPUT ) {
4982 using namespace std; // for roundf
4983 convBuffSize = ( size_t ) roundf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4984 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4986 else if ( stream_.mode == OUTPUT ) {
4987 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4988 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4990 else if ( stream_.mode == DUPLEX ) {
4991 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4992 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4993 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4994 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4997 convBuffer = ( char* ) malloc( convBuffSize );
4998 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4999 if ( !convBuffer || !stream_.deviceBuffer ) {
5000 errorType = RtAudioError::MEMORY_ERROR;
5001 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5005 // stream process loop
5006 while ( stream_.state != STREAM_STOPPING ) {
5007 if ( !callbackPulled ) {
5010 // 1. Pull callback buffer from inputBuffer
5011 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5012 // Convert callback buffer to user format
5014 if ( captureAudioClient ) {
5015 // Pull callback buffer from inputBuffer
5016 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5017 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
5018 stream_.deviceFormat[INPUT] );
5020 if ( callbackPulled ) {
5021 // Convert callback buffer to user sample rate
5022 convertBufferWasapi( stream_.deviceBuffer,
5024 stream_.nDeviceChannels[INPUT],
5025 captureFormat->nSamplesPerSec,
5027 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
5029 stream_.deviceFormat[INPUT] );
5031 if ( stream_.doConvertBuffer[INPUT] ) {
5032 // Convert callback buffer to user format
5033 convertBuffer( stream_.userBuffer[INPUT],
5034 stream_.deviceBuffer,
5035 stream_.convertInfo[INPUT] );
5038 // no further conversion, simple copy deviceBuffer to userBuffer
5039 memcpy( stream_.userBuffer[INPUT],
5040 stream_.deviceBuffer,
5041 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5046 // if there is no capture stream, set callbackPulled flag
5047 callbackPulled = true;
5052 // 1. Execute user callback method
5053 // 2. Handle return value from callback
5055 // if callback has not requested the stream to stop
5056 if ( callbackPulled && !callbackStopped ) {
5057 // Execute user callback method
5058 callbackResult = callback( stream_.userBuffer[OUTPUT],
5059 stream_.userBuffer[INPUT],
5062 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5063 stream_.callbackInfo.userData );
5065 // Handle return value from callback
5066 if ( callbackResult == 1 ) {
5067 // instantiate a thread to stop this thread
5068 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5069 if ( !threadHandle ) {
5070 errorType = RtAudioError::THREAD_ERROR;
5071 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5074 else if ( !CloseHandle( threadHandle ) ) {
5075 errorType = RtAudioError::THREAD_ERROR;
5076 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5080 callbackStopped = true;
5082 else if ( callbackResult == 2 ) {
5083 // instantiate a thread to stop this thread
5084 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5085 if ( !threadHandle ) {
5086 errorType = RtAudioError::THREAD_ERROR;
5087 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5090 else if ( !CloseHandle( threadHandle ) ) {
5091 errorType = RtAudioError::THREAD_ERROR;
5092 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5096 callbackStopped = true;
5103 // 1. Convert callback buffer to stream format
5104 // 2. Convert callback buffer to stream sample rate and channel count
5105 // 3. Push callback buffer into outputBuffer
5107 if ( renderAudioClient && callbackPulled ) {
5108 if ( stream_.doConvertBuffer[OUTPUT] ) {
5109 // Convert callback buffer to stream format
5110 convertBuffer( stream_.deviceBuffer,
5111 stream_.userBuffer[OUTPUT],
5112 stream_.convertInfo[OUTPUT] );
5116 // Convert callback buffer to stream sample rate
5117 convertBufferWasapi( convBuffer,
5118 stream_.deviceBuffer,
5119 stream_.nDeviceChannels[OUTPUT],
5121 renderFormat->nSamplesPerSec,
5124 stream_.deviceFormat[OUTPUT] );
5126 // Push callback buffer into outputBuffer
5127 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5128 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5129 stream_.deviceFormat[OUTPUT] );
5132 // if there is no render stream, set callbackPushed flag
5133 callbackPushed = true;
5138 // 1. Get capture buffer from stream
5139 // 2. Push capture buffer into inputBuffer
5140 // 3. If 2. was successful: Release capture buffer
5142 if ( captureAudioClient ) {
5143 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5144 if ( !callbackPulled ) {
5145 WaitForSingleObject( captureEvent, INFINITE );
5148 // Get capture buffer from stream
5149 hr = captureClient->GetBuffer( &streamBuffer,
5151 &captureFlags, NULL, NULL );
5152 if ( FAILED( hr ) ) {
5153 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5157 if ( bufferFrameCount != 0 ) {
5158 // Push capture buffer into inputBuffer
5159 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5160 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5161 stream_.deviceFormat[INPUT] ) )
5163 // Release capture buffer
5164 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5165 if ( FAILED( hr ) ) {
5166 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5172 // Inform WASAPI that capture was unsuccessful
5173 hr = captureClient->ReleaseBuffer( 0 );
5174 if ( FAILED( hr ) ) {
5175 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5182 // Inform WASAPI that capture was unsuccessful
5183 hr = captureClient->ReleaseBuffer( 0 );
5184 if ( FAILED( hr ) ) {
5185 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5193 // 1. Get render buffer from stream
5194 // 2. Pull next buffer from outputBuffer
5195 // 3. If 2. was successful: Fill render buffer with next buffer
5196 // Release render buffer
5198 if ( renderAudioClient ) {
5199 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5200 if ( callbackPulled && !callbackPushed ) {
5201 WaitForSingleObject( renderEvent, INFINITE );
5204 // Get render buffer from stream
5205 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5206 if ( FAILED( hr ) ) {
5207 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5211 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5212 if ( FAILED( hr ) ) {
5213 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5217 bufferFrameCount -= numFramesPadding;
5219 if ( bufferFrameCount != 0 ) {
5220 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5221 if ( FAILED( hr ) ) {
5222 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5226 // Pull next buffer from outputBuffer
5227 // Fill render buffer with next buffer
5228 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5229 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5230 stream_.deviceFormat[OUTPUT] ) )
5232 // Release render buffer
5233 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5234 if ( FAILED( hr ) ) {
5235 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5241 // Inform WASAPI that render was unsuccessful
5242 hr = renderClient->ReleaseBuffer( 0, 0 );
5243 if ( FAILED( hr ) ) {
5244 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5251 // Inform WASAPI that render was unsuccessful
5252 hr = renderClient->ReleaseBuffer( 0, 0 );
5253 if ( FAILED( hr ) ) {
5254 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5260 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5261 if ( callbackPushed ) {
5262 callbackPulled = false;
5264 RtApi::tickStreamTime();
5271 CoTaskMemFree( captureFormat );
5272 CoTaskMemFree( renderFormat );
5274 free ( convBuffer );
5278 // update stream state
5279 stream_.state = STREAM_STOPPED;
5281 if ( errorText_.empty() )
5287 //******************** End of __WINDOWS_WASAPI__ *********************//
5291 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5293 // Modified by Robin Davies, October 2005
5294 // - Improvements to DirectX pointer chasing.
5295 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5296 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5297 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5298 // Changed device query structure for RtAudio 4.0.7, January 2010
5300 #include <windows.h>
5301 #include <process.h>
5302 #include <mmsystem.h>
5306 #include <algorithm>
5308 #if defined(__MINGW32__)
5309 // missing from latest mingw winapi
5310 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5311 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5312 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5313 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5316 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5318 #ifdef _MSC_VER // if Microsoft Visual C++
5319 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5322 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5324 if ( pointer > bufferSize ) pointer -= bufferSize;
5325 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5326 if ( pointer < earlierPointer ) pointer += bufferSize;
5327 return pointer >= earlierPointer && pointer < laterPointer;
5330 // A structure to hold various information related to the DirectSound
5331 // API implementation.
5333 unsigned int drainCounter; // Tracks callback counts when draining
5334 bool internalDrain; // Indicates if stop is initiated from callback or not.
5338 UINT bufferPointer[2];
5339 DWORD dsBufferSize[2];
5340 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5344 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5347 // Declarations for utility functions, callbacks, and structures
5348 // specific to the DirectSound implementation.
5349 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5350 LPCTSTR description,
5354 static const char* getErrorString( int code );
5356 static unsigned __stdcall callbackHandler( void *ptr );
5365 : found(false) { validId[0] = false; validId[1] = false; }
5368 struct DsProbeData {
5370 std::vector<struct DsDevice>* dsDevices;
5373 RtApiDs :: RtApiDs()
5375 // Dsound will run both-threaded. If CoInitialize fails, then just
5376 // accept whatever the mainline chose for a threading model.
5377 coInitialized_ = false;
5378 HRESULT hr = CoInitialize( NULL );
5379 if ( !FAILED( hr ) ) coInitialized_ = true;
5382 RtApiDs :: ~RtApiDs()
5384 if ( stream_.state != STREAM_CLOSED ) closeStream();
5385 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5388 // The DirectSound default output is always the first device.
5389 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5394 // The DirectSound default input is always the first input device,
5395 // which is the first capture device enumerated.
5396 unsigned int RtApiDs :: getDefaultInputDevice( void )
5401 unsigned int RtApiDs :: getDeviceCount( void )
5403 // Set query flag for previously found devices to false, so that we
5404 // can check for any devices that have disappeared.
5405 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5406 dsDevices[i].found = false;
5408 // Query DirectSound devices.
5409 struct DsProbeData probeInfo;
5410 probeInfo.isInput = false;
5411 probeInfo.dsDevices = &dsDevices;
5412 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5413 if ( FAILED( result ) ) {
5414 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5415 errorText_ = errorStream_.str();
5416 error( RtAudioError::WARNING );
5419 // Query DirectSoundCapture devices.
5420 probeInfo.isInput = true;
5421 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5422 if ( FAILED( result ) ) {
5423 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5424 errorText_ = errorStream_.str();
5425 error( RtAudioError::WARNING );
5428 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5429 for ( unsigned int i=0; i<dsDevices.size(); ) {
5430 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5434 return static_cast<unsigned int>(dsDevices.size());
5437 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5439 RtAudio::DeviceInfo info;
5440 info.probed = false;
5442 if ( dsDevices.size() == 0 ) {
5443 // Force a query of all devices
5445 if ( dsDevices.size() == 0 ) {
5446 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5447 error( RtAudioError::INVALID_USE );
5452 if ( device >= dsDevices.size() ) {
5453 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5454 error( RtAudioError::INVALID_USE );
5459 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5461 LPDIRECTSOUND output;
5463 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5464 if ( FAILED( result ) ) {
5465 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5466 errorText_ = errorStream_.str();
5467 error( RtAudioError::WARNING );
5471 outCaps.dwSize = sizeof( outCaps );
5472 result = output->GetCaps( &outCaps );
5473 if ( FAILED( result ) ) {
5475 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5476 errorText_ = errorStream_.str();
5477 error( RtAudioError::WARNING );
5481 // Get output channel information.
5482 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5484 // Get sample rate information.
5485 info.sampleRates.clear();
5486 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5487 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5488 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5489 info.sampleRates.push_back( SAMPLE_RATES[k] );
5491 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5492 info.preferredSampleRate = SAMPLE_RATES[k];
5496 // Get format information.
5497 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5498 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5502 if ( getDefaultOutputDevice() == device )
5503 info.isDefaultOutput = true;
5505 if ( dsDevices[ device ].validId[1] == false ) {
5506 info.name = dsDevices[ device ].name;
5513 LPDIRECTSOUNDCAPTURE input;
5514 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5515 if ( FAILED( result ) ) {
5516 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5517 errorText_ = errorStream_.str();
5518 error( RtAudioError::WARNING );
5523 inCaps.dwSize = sizeof( inCaps );
5524 result = input->GetCaps( &inCaps );
5525 if ( FAILED( result ) ) {
5527 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5528 errorText_ = errorStream_.str();
5529 error( RtAudioError::WARNING );
5533 // Get input channel information.
5534 info.inputChannels = inCaps.dwChannels;
5536 // Get sample rate and format information.
5537 std::vector<unsigned int> rates;
5538 if ( inCaps.dwChannels >= 2 ) {
5539 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5540 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5541 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5542 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5543 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5544 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5545 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5546 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5548 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5549 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5550 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5551 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5552 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5554 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5555 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5556 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5557 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5558 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5561 else if ( inCaps.dwChannels == 1 ) {
5562 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5563 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5564 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5565 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5566 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5567 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5568 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5569 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5571 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5572 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5573 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5574 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5575 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5577 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5578 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5579 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5580 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5581 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5584 else info.inputChannels = 0; // technically, this would be an error
5588 if ( info.inputChannels == 0 ) return info;
5590 // Copy the supported rates to the info structure but avoid duplication.
5592 for ( unsigned int i=0; i<rates.size(); i++ ) {
5594 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5595 if ( rates[i] == info.sampleRates[j] ) {
5600 if ( found == false ) info.sampleRates.push_back( rates[i] );
5602 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5604 // If device opens for both playback and capture, we determine the channels.
5605 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5606 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5608 if ( device == 0 ) info.isDefaultInput = true;
5610 // Copy name and return.
5611 info.name = dsDevices[ device ].name;
5616 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5617 unsigned int firstChannel, unsigned int sampleRate,
5618 RtAudioFormat format, unsigned int *bufferSize,
5619 RtAudio::StreamOptions *options )
5621 if ( channels + firstChannel > 2 ) {
5622 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5626 size_t nDevices = dsDevices.size();
5627 if ( nDevices == 0 ) {
5628 // This should not happen because a check is made before this function is called.
5629 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5633 if ( device >= nDevices ) {
5634 // This should not happen because a check is made before this function is called.
5635 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5639 if ( mode == OUTPUT ) {
5640 if ( dsDevices[ device ].validId[0] == false ) {
5641 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5642 errorText_ = errorStream_.str();
5646 else { // mode == INPUT
5647 if ( dsDevices[ device ].validId[1] == false ) {
5648 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5649 errorText_ = errorStream_.str();
5654 // According to a note in PortAudio, using GetDesktopWindow()
5655 // instead of GetForegroundWindow() is supposed to avoid problems
5656 // that occur when the application's window is not the foreground
5657 // window. Also, if the application window closes before the
5658 // DirectSound buffer, DirectSound can crash. In the past, I had
5659 // problems when using GetDesktopWindow() but it seems fine now
5660 // (January 2010). I'll leave it commented here.
5661 // HWND hWnd = GetForegroundWindow();
5662 HWND hWnd = GetDesktopWindow();
5664 // Check the numberOfBuffers parameter and limit the lowest value to
5665 // two. This is a judgement call and a value of two is probably too
5666 // low for capture, but it should work for playback.
5668 if ( options ) nBuffers = options->numberOfBuffers;
5669 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5670 if ( nBuffers < 2 ) nBuffers = 3;
5672 // Check the lower range of the user-specified buffer size and set
5673 // (arbitrarily) to a lower bound of 32.
5674 if ( *bufferSize < 32 ) *bufferSize = 32;
5676 // Create the wave format structure. The data format setting will
5677 // be determined later.
5678 WAVEFORMATEX waveFormat;
5679 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5680 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5681 waveFormat.nChannels = channels + firstChannel;
5682 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5684 // Determine the device buffer size. By default, we'll use the value
5685 // defined above (32K), but we will grow it to make allowances for
5686 // very large software buffer sizes.
5687 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5688 DWORD dsPointerLeadTime = 0;
5690 void *ohandle = 0, *bhandle = 0;
5692 if ( mode == OUTPUT ) {
5694 LPDIRECTSOUND output;
5695 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5696 if ( FAILED( result ) ) {
5697 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5698 errorText_ = errorStream_.str();
5703 outCaps.dwSize = sizeof( outCaps );
5704 result = output->GetCaps( &outCaps );
5705 if ( FAILED( result ) ) {
5707 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5708 errorText_ = errorStream_.str();
5712 // Check channel information.
5713 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5714 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5715 errorText_ = errorStream_.str();
5719 // Check format information. Use 16-bit format unless not
5720 // supported or user requests 8-bit.
5721 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5722 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5723 waveFormat.wBitsPerSample = 16;
5724 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5727 waveFormat.wBitsPerSample = 8;
5728 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5730 stream_.userFormat = format;
5732 // Update wave format structure and buffer information.
5733 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5734 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5735 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5737 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5738 while ( dsPointerLeadTime * 2U > dsBufferSize )
5741 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5742 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5743 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5744 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5745 if ( FAILED( result ) ) {
5747 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5748 errorText_ = errorStream_.str();
5752 // Even though we will write to the secondary buffer, we need to
5753 // access the primary buffer to set the correct output format
5754 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5755 // buffer description.
5756 DSBUFFERDESC bufferDescription;
5757 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5758 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5759 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5761 // Obtain the primary buffer
5762 LPDIRECTSOUNDBUFFER buffer;
5763 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5764 if ( FAILED( result ) ) {
5766 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5767 errorText_ = errorStream_.str();
5771 // Set the primary DS buffer sound format.
5772 result = buffer->SetFormat( &waveFormat );
5773 if ( FAILED( result ) ) {
5775 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5776 errorText_ = errorStream_.str();
5780 // Setup the secondary DS buffer description.
5781 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5782 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5783 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5784 DSBCAPS_GLOBALFOCUS |
5785 DSBCAPS_GETCURRENTPOSITION2 |
5786 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5787 bufferDescription.dwBufferBytes = dsBufferSize;
5788 bufferDescription.lpwfxFormat = &waveFormat;
5790 // Try to create the secondary DS buffer. If that doesn't work,
5791 // try to use software mixing. Otherwise, there's a problem.
5792 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5793 if ( FAILED( result ) ) {
5794 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5795 DSBCAPS_GLOBALFOCUS |
5796 DSBCAPS_GETCURRENTPOSITION2 |
5797 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5798 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5799 if ( FAILED( result ) ) {
5801 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5802 errorText_ = errorStream_.str();
5807 // Get the buffer size ... might be different from what we specified.
5809 dsbcaps.dwSize = sizeof( DSBCAPS );
5810 result = buffer->GetCaps( &dsbcaps );
5811 if ( FAILED( result ) ) {
5814 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5815 errorText_ = errorStream_.str();
5819 dsBufferSize = dsbcaps.dwBufferBytes;
5821 // Lock the DS buffer
5824 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5825 if ( FAILED( result ) ) {
5828 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5829 errorText_ = errorStream_.str();
5833 // Zero the DS buffer
5834 ZeroMemory( audioPtr, dataLen );
5836 // Unlock the DS buffer
5837 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5838 if ( FAILED( result ) ) {
5841 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5842 errorText_ = errorStream_.str();
5846 ohandle = (void *) output;
5847 bhandle = (void *) buffer;
5850 if ( mode == INPUT ) {
5852 LPDIRECTSOUNDCAPTURE input;
5853 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5854 if ( FAILED( result ) ) {
5855 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5856 errorText_ = errorStream_.str();
5861 inCaps.dwSize = sizeof( inCaps );
5862 result = input->GetCaps( &inCaps );
5863 if ( FAILED( result ) ) {
5865 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5866 errorText_ = errorStream_.str();
5870 // Check channel information.
5871 if ( inCaps.dwChannels < channels + firstChannel ) {
5872 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5876 // Check format information. Use 16-bit format unless user
5878 DWORD deviceFormats;
5879 if ( channels + firstChannel == 2 ) {
5880 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5881 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5882 waveFormat.wBitsPerSample = 8;
5883 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5885 else { // assume 16-bit is supported
5886 waveFormat.wBitsPerSample = 16;
5887 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5890 else { // channel == 1
5891 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5892 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5893 waveFormat.wBitsPerSample = 8;
5894 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5896 else { // assume 16-bit is supported
5897 waveFormat.wBitsPerSample = 16;
5898 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5901 stream_.userFormat = format;
5903 // Update wave format structure and buffer information.
5904 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5905 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5906 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5908 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5909 while ( dsPointerLeadTime * 2U > dsBufferSize )
5912 // Setup the secondary DS buffer description.
5913 DSCBUFFERDESC bufferDescription;
5914 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5915 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5916 bufferDescription.dwFlags = 0;
5917 bufferDescription.dwReserved = 0;
5918 bufferDescription.dwBufferBytes = dsBufferSize;
5919 bufferDescription.lpwfxFormat = &waveFormat;
5921 // Create the capture buffer.
5922 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5923 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5924 if ( FAILED( result ) ) {
5926 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5927 errorText_ = errorStream_.str();
5931 // Get the buffer size ... might be different from what we specified.
5933 dscbcaps.dwSize = sizeof( DSCBCAPS );
5934 result = buffer->GetCaps( &dscbcaps );
5935 if ( FAILED( result ) ) {
5938 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5939 errorText_ = errorStream_.str();
5943 dsBufferSize = dscbcaps.dwBufferBytes;
5945 // NOTE: We could have a problem here if this is a duplex stream
5946 // and the play and capture hardware buffer sizes are different
5947 // (I'm actually not sure if that is a problem or not).
5948 // Currently, we are not verifying that.
5950 // Lock the capture buffer
5953 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5954 if ( FAILED( result ) ) {
5957 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5958 errorText_ = errorStream_.str();
5963 ZeroMemory( audioPtr, dataLen );
5965 // Unlock the buffer
5966 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5967 if ( FAILED( result ) ) {
5970 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5971 errorText_ = errorStream_.str();
5975 ohandle = (void *) input;
5976 bhandle = (void *) buffer;
5979 // Set various stream parameters
5980 DsHandle *handle = 0;
5981 stream_.nDeviceChannels[mode] = channels + firstChannel;
5982 stream_.nUserChannels[mode] = channels;
5983 stream_.bufferSize = *bufferSize;
5984 stream_.channelOffset[mode] = firstChannel;
5985 stream_.deviceInterleaved[mode] = true;
5986 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5987 else stream_.userInterleaved = true;
5989 // Set flag for buffer conversion
5990 stream_.doConvertBuffer[mode] = false;
5991 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5992 stream_.doConvertBuffer[mode] = true;
5993 if (stream_.userFormat != stream_.deviceFormat[mode])
5994 stream_.doConvertBuffer[mode] = true;
5995 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5996 stream_.nUserChannels[mode] > 1 )
5997 stream_.doConvertBuffer[mode] = true;
5999 // Allocate necessary internal buffers
6000 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6001 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6002 if ( stream_.userBuffer[mode] == NULL ) {
6003 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6007 if ( stream_.doConvertBuffer[mode] ) {
6009 bool makeBuffer = true;
6010 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6011 if ( mode == INPUT ) {
6012 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6013 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6014 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6019 bufferBytes *= *bufferSize;
6020 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6021 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6022 if ( stream_.deviceBuffer == NULL ) {
6023 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6029 // Allocate our DsHandle structures for the stream.
6030 if ( stream_.apiHandle == 0 ) {
6032 handle = new DsHandle;
6034 catch ( std::bad_alloc& ) {
6035 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6039 // Create a manual-reset event.
6040 handle->condition = CreateEvent( NULL, // no security
6041 TRUE, // manual-reset
6042 FALSE, // non-signaled initially
6044 stream_.apiHandle = (void *) handle;
6047 handle = (DsHandle *) stream_.apiHandle;
6048 handle->id[mode] = ohandle;
6049 handle->buffer[mode] = bhandle;
6050 handle->dsBufferSize[mode] = dsBufferSize;
6051 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6053 stream_.device[mode] = device;
6054 stream_.state = STREAM_STOPPED;
6055 if ( stream_.mode == OUTPUT && mode == INPUT )
6056 // We had already set up an output stream.
6057 stream_.mode = DUPLEX;
6059 stream_.mode = mode;
6060 stream_.nBuffers = nBuffers;
6061 stream_.sampleRate = sampleRate;
6063 // Setup the buffer conversion information structure.
6064 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6066 // Setup the callback thread.
6067 if ( stream_.callbackInfo.isRunning == false ) {
6069 stream_.callbackInfo.isRunning = true;
6070 stream_.callbackInfo.object = (void *) this;
6071 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6072 &stream_.callbackInfo, 0, &threadId );
6073 if ( stream_.callbackInfo.thread == 0 ) {
6074 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6078 // Boost DS thread priority
6079 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6085 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6086 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6087 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6088 if ( buffer ) buffer->Release();
6091 if ( handle->buffer[1] ) {
6092 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6093 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6094 if ( buffer ) buffer->Release();
6097 CloseHandle( handle->condition );
6099 stream_.apiHandle = 0;
6102 for ( int i=0; i<2; i++ ) {
6103 if ( stream_.userBuffer[i] ) {
6104 free( stream_.userBuffer[i] );
6105 stream_.userBuffer[i] = 0;
6109 if ( stream_.deviceBuffer ) {
6110 free( stream_.deviceBuffer );
6111 stream_.deviceBuffer = 0;
6114 stream_.state = STREAM_CLOSED;
6118 void RtApiDs :: closeStream()
6120 if ( stream_.state == STREAM_CLOSED ) {
6121 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6122 error( RtAudioError::WARNING );
6126 // Stop the callback thread.
6127 stream_.callbackInfo.isRunning = false;
6128 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6129 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6131 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6133 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6134 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6135 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6142 if ( handle->buffer[1] ) {
6143 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6144 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6151 CloseHandle( handle->condition );
6153 stream_.apiHandle = 0;
6156 for ( int i=0; i<2; i++ ) {
6157 if ( stream_.userBuffer[i] ) {
6158 free( stream_.userBuffer[i] );
6159 stream_.userBuffer[i] = 0;
6163 if ( stream_.deviceBuffer ) {
6164 free( stream_.deviceBuffer );
6165 stream_.deviceBuffer = 0;
6168 stream_.mode = UNINITIALIZED;
6169 stream_.state = STREAM_CLOSED;
6172 void RtApiDs :: startStream()
6175 if ( stream_.state == STREAM_RUNNING ) {
6176 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6177 error( RtAudioError::WARNING );
6181 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6183 // Increase scheduler frequency on lesser windows (a side-effect of
6184 // increasing timer accuracy). On greater windows (Win2K or later),
6185 // this is already in effect.
6186 timeBeginPeriod( 1 );
6188 buffersRolling = false;
6189 duplexPrerollBytes = 0;
6191 if ( stream_.mode == DUPLEX ) {
6192 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6193 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6197 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6199 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6200 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6201 if ( FAILED( result ) ) {
6202 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6203 errorText_ = errorStream_.str();
6208 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6210 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6211 result = buffer->Start( DSCBSTART_LOOPING );
6212 if ( FAILED( result ) ) {
6213 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6214 errorText_ = errorStream_.str();
6219 handle->drainCounter = 0;
6220 handle->internalDrain = false;
6221 ResetEvent( handle->condition );
6222 stream_.state = STREAM_RUNNING;
6225 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6228 void RtApiDs :: stopStream()
6231 if ( stream_.state == STREAM_STOPPED ) {
6232 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6233 error( RtAudioError::WARNING );
6240 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6241 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6242 if ( handle->drainCounter == 0 ) {
6243 handle->drainCounter = 2;
6244 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6247 stream_.state = STREAM_STOPPED;
6249 MUTEX_LOCK( &stream_.mutex );
6251 // Stop the buffer and clear memory
6252 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6253 result = buffer->Stop();
6254 if ( FAILED( result ) ) {
6255 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6256 errorText_ = errorStream_.str();
6260 // Lock the buffer and clear it so that if we start to play again,
6261 // we won't have old data playing.
6262 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6263 if ( FAILED( result ) ) {
6264 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6265 errorText_ = errorStream_.str();
6269 // Zero the DS buffer
6270 ZeroMemory( audioPtr, dataLen );
6272 // Unlock the DS buffer
6273 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6274 if ( FAILED( result ) ) {
6275 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6276 errorText_ = errorStream_.str();
6280 // If we start playing again, we must begin at beginning of buffer.
6281 handle->bufferPointer[0] = 0;
6284 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6285 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6289 stream_.state = STREAM_STOPPED;
6291 if ( stream_.mode != DUPLEX )
6292 MUTEX_LOCK( &stream_.mutex );
6294 result = buffer->Stop();
6295 if ( FAILED( result ) ) {
6296 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6297 errorText_ = errorStream_.str();
6301 // Lock the buffer and clear it so that if we start to play again,
6302 // we won't have old data playing.
6303 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6304 if ( FAILED( result ) ) {
6305 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6306 errorText_ = errorStream_.str();
6310 // Zero the DS buffer
6311 ZeroMemory( audioPtr, dataLen );
6313 // Unlock the DS buffer
6314 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6315 if ( FAILED( result ) ) {
6316 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6317 errorText_ = errorStream_.str();
6321 // If we start recording again, we must begin at beginning of buffer.
6322 handle->bufferPointer[1] = 0;
6326 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6327 MUTEX_UNLOCK( &stream_.mutex );
6329 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6332 void RtApiDs :: abortStream()
6335 if ( stream_.state == STREAM_STOPPED ) {
6336 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6337 error( RtAudioError::WARNING );
6341 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6342 handle->drainCounter = 2;
6347 void RtApiDs :: callbackEvent()
6349 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6350 Sleep( 50 ); // sleep 50 milliseconds
6354 if ( stream_.state == STREAM_CLOSED ) {
6355 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6356 error( RtAudioError::WARNING );
6360 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6361 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6363 // Check if we were draining the stream and signal is finished.
6364 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6366 stream_.state = STREAM_STOPPING;
6367 if ( handle->internalDrain == false )
6368 SetEvent( handle->condition );
6374 // Invoke user callback to get fresh output data UNLESS we are
6376 if ( handle->drainCounter == 0 ) {
6377 RtAudioCallback callback = (RtAudioCallback) info->callback;
6378 double streamTime = getStreamTime();
6379 RtAudioStreamStatus status = 0;
6380 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6381 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6382 handle->xrun[0] = false;
6384 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6385 status |= RTAUDIO_INPUT_OVERFLOW;
6386 handle->xrun[1] = false;
6388 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6389 stream_.bufferSize, streamTime, status, info->userData );
6390 if ( cbReturnValue == 2 ) {
6391 stream_.state = STREAM_STOPPING;
6392 handle->drainCounter = 2;
6396 else if ( cbReturnValue == 1 ) {
6397 handle->drainCounter = 1;
6398 handle->internalDrain = true;
6403 DWORD currentWritePointer, safeWritePointer;
6404 DWORD currentReadPointer, safeReadPointer;
6405 UINT nextWritePointer;
6407 LPVOID buffer1 = NULL;
6408 LPVOID buffer2 = NULL;
6409 DWORD bufferSize1 = 0;
6410 DWORD bufferSize2 = 0;
6415 MUTEX_LOCK( &stream_.mutex );
6416 if ( stream_.state == STREAM_STOPPED ) {
6417 MUTEX_UNLOCK( &stream_.mutex );
6421 if ( buffersRolling == false ) {
6422 if ( stream_.mode == DUPLEX ) {
6423 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6425 // It takes a while for the devices to get rolling. As a result,
6426 // there's no guarantee that the capture and write device pointers
6427 // will move in lockstep. Wait here for both devices to start
6428 // rolling, and then set our buffer pointers accordingly.
6429 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6430 // bytes later than the write buffer.
6432 // Stub: a serious risk of having a pre-emptive scheduling round
6433 // take place between the two GetCurrentPosition calls... but I'm
6434 // really not sure how to solve the problem. Temporarily boost to
6435 // Realtime priority, maybe; but I'm not sure what priority the
6436 // DirectSound service threads run at. We *should* be roughly
6437 // within a ms or so of correct.
6439 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6440 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6442 DWORD startSafeWritePointer, startSafeReadPointer;
6444 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6445 if ( FAILED( result ) ) {
6446 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6447 errorText_ = errorStream_.str();
6448 MUTEX_UNLOCK( &stream_.mutex );
6449 error( RtAudioError::SYSTEM_ERROR );
6452 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6453 if ( FAILED( result ) ) {
6454 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6455 errorText_ = errorStream_.str();
6456 MUTEX_UNLOCK( &stream_.mutex );
6457 error( RtAudioError::SYSTEM_ERROR );
6461 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6462 if ( FAILED( result ) ) {
6463 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6464 errorText_ = errorStream_.str();
6465 MUTEX_UNLOCK( &stream_.mutex );
6466 error( RtAudioError::SYSTEM_ERROR );
6469 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6470 if ( FAILED( result ) ) {
6471 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6472 errorText_ = errorStream_.str();
6473 MUTEX_UNLOCK( &stream_.mutex );
6474 error( RtAudioError::SYSTEM_ERROR );
6477 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6481 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6483 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6484 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6485 handle->bufferPointer[1] = safeReadPointer;
6487 else if ( stream_.mode == OUTPUT ) {
6489 // Set the proper nextWritePosition after initial startup.
6490 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6491 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6492 if ( FAILED( result ) ) {
6493 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6494 errorText_ = errorStream_.str();
6495 MUTEX_UNLOCK( &stream_.mutex );
6496 error( RtAudioError::SYSTEM_ERROR );
6499 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6500 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6503 buffersRolling = true;
6506 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6508 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6510 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6511 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6512 bufferBytes *= formatBytes( stream_.userFormat );
6513 memset( stream_.userBuffer[0], 0, bufferBytes );
6516 // Setup parameters and do buffer conversion if necessary.
6517 if ( stream_.doConvertBuffer[0] ) {
6518 buffer = stream_.deviceBuffer;
6519 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6520 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6521 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6524 buffer = stream_.userBuffer[0];
6525 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6526 bufferBytes *= formatBytes( stream_.userFormat );
6529 // No byte swapping necessary in DirectSound implementation.
6531 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6532 // unsigned. So, we need to convert our signed 8-bit data here to
6534 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6535 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6537 DWORD dsBufferSize = handle->dsBufferSize[0];
6538 nextWritePointer = handle->bufferPointer[0];
6540 DWORD endWrite, leadPointer;
6542 // Find out where the read and "safe write" pointers are.
6543 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6544 if ( FAILED( result ) ) {
6545 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6546 errorText_ = errorStream_.str();
6547 MUTEX_UNLOCK( &stream_.mutex );
6548 error( RtAudioError::SYSTEM_ERROR );
6552 // We will copy our output buffer into the region between
6553 // safeWritePointer and leadPointer. If leadPointer is not
6554 // beyond the next endWrite position, wait until it is.
6555 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6556 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6557 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6558 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6559 endWrite = nextWritePointer + bufferBytes;
6561 // Check whether the entire write region is behind the play pointer.
6562 if ( leadPointer >= endWrite ) break;
6564 // If we are here, then we must wait until the leadPointer advances
6565 // beyond the end of our next write region. We use the
6566 // Sleep() function to suspend operation until that happens.
6567 double millis = ( endWrite - leadPointer ) * 1000.0;
6568 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6569 if ( millis < 1.0 ) millis = 1.0;
6570 Sleep( (DWORD) millis );
6573 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6574 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6575 // We've strayed into the forbidden zone ... resync the read pointer.
6576 handle->xrun[0] = true;
6577 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6578 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6579 handle->bufferPointer[0] = nextWritePointer;
6580 endWrite = nextWritePointer + bufferBytes;
6583 // Lock free space in the buffer
6584 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6585 &bufferSize1, &buffer2, &bufferSize2, 0 );
6586 if ( FAILED( result ) ) {
6587 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6588 errorText_ = errorStream_.str();
6589 MUTEX_UNLOCK( &stream_.mutex );
6590 error( RtAudioError::SYSTEM_ERROR );
6594 // Copy our buffer into the DS buffer
6595 CopyMemory( buffer1, buffer, bufferSize1 );
6596 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6598 // Update our buffer offset and unlock sound buffer
6599 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6600 if ( FAILED( result ) ) {
6601 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6602 errorText_ = errorStream_.str();
6603 MUTEX_UNLOCK( &stream_.mutex );
6604 error( RtAudioError::SYSTEM_ERROR );
6607 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6608 handle->bufferPointer[0] = nextWritePointer;
6611 // Don't bother draining input
6612 if ( handle->drainCounter ) {
6613 handle->drainCounter++;
6617 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6619 // Setup parameters.
6620 if ( stream_.doConvertBuffer[1] ) {
6621 buffer = stream_.deviceBuffer;
6622 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6623 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6626 buffer = stream_.userBuffer[1];
6627 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6628 bufferBytes *= formatBytes( stream_.userFormat );
6631 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6632 long nextReadPointer = handle->bufferPointer[1];
6633 DWORD dsBufferSize = handle->dsBufferSize[1];
6635 // Find out where the write and "safe read" pointers are.
6636 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6637 if ( FAILED( result ) ) {
6638 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6639 errorText_ = errorStream_.str();
6640 MUTEX_UNLOCK( &stream_.mutex );
6641 error( RtAudioError::SYSTEM_ERROR );
6645 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6646 DWORD endRead = nextReadPointer + bufferBytes;
6648 // Handling depends on whether we are INPUT or DUPLEX.
6649 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6650 // then a wait here will drag the write pointers into the forbidden zone.
6652 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6653 // it's in a safe position. This causes dropouts, but it seems to be the only
6654 // practical way to sync up the read and write pointers reliably, given the
6655 // the very complex relationship between phase and increment of the read and write
6658 // In order to minimize audible dropouts in DUPLEX mode, we will
6659 // provide a pre-roll period of 0.5 seconds in which we return
6660 // zeros from the read buffer while the pointers sync up.
6662 if ( stream_.mode == DUPLEX ) {
6663 if ( safeReadPointer < endRead ) {
6664 if ( duplexPrerollBytes <= 0 ) {
6665 // Pre-roll time over. Be more agressive.
6666 int adjustment = endRead-safeReadPointer;
6668 handle->xrun[1] = true;
6670 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6671 // and perform fine adjustments later.
6672 // - small adjustments: back off by twice as much.
6673 if ( adjustment >= 2*bufferBytes )
6674 nextReadPointer = safeReadPointer-2*bufferBytes;
6676 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6678 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6682 // In pre=roll time. Just do it.
6683 nextReadPointer = safeReadPointer - bufferBytes;
6684 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6686 endRead = nextReadPointer + bufferBytes;
6689 else { // mode == INPUT
6690 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6691 // See comments for playback.
6692 double millis = (endRead - safeReadPointer) * 1000.0;
6693 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6694 if ( millis < 1.0 ) millis = 1.0;
6695 Sleep( (DWORD) millis );
6697 // Wake up and find out where we are now.
6698 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6699 if ( FAILED( result ) ) {
6700 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6701 errorText_ = errorStream_.str();
6702 MUTEX_UNLOCK( &stream_.mutex );
6703 error( RtAudioError::SYSTEM_ERROR );
6707 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6711 // Lock free space in the buffer
6712 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6713 &bufferSize1, &buffer2, &bufferSize2, 0 );
6714 if ( FAILED( result ) ) {
6715 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6716 errorText_ = errorStream_.str();
6717 MUTEX_UNLOCK( &stream_.mutex );
6718 error( RtAudioError::SYSTEM_ERROR );
6722 if ( duplexPrerollBytes <= 0 ) {
6723 // Copy our buffer into the DS buffer
6724 CopyMemory( buffer, buffer1, bufferSize1 );
6725 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6728 memset( buffer, 0, bufferSize1 );
6729 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6730 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6733 // Update our buffer offset and unlock sound buffer
6734 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6735 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6736 if ( FAILED( result ) ) {
6737 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6738 errorText_ = errorStream_.str();
6739 MUTEX_UNLOCK( &stream_.mutex );
6740 error( RtAudioError::SYSTEM_ERROR );
6743 handle->bufferPointer[1] = nextReadPointer;
6745 // No byte swapping necessary in DirectSound implementation.
6747 // If necessary, convert 8-bit data from unsigned to signed.
6748 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6749 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6751 // Do buffer conversion if necessary.
6752 if ( stream_.doConvertBuffer[1] )
6753 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6757 MUTEX_UNLOCK( &stream_.mutex );
6758 RtApi::tickStreamTime();
6761 // Definitions for utility functions and callbacks
6762 // specific to the DirectSound implementation.
6764 static unsigned __stdcall callbackHandler( void *ptr )
6766 CallbackInfo *info = (CallbackInfo *) ptr;
6767 RtApiDs *object = (RtApiDs *) info->object;
6768 bool* isRunning = &info->isRunning;
6770 while ( *isRunning == true ) {
6771 object->callbackEvent();
6778 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6779 LPCTSTR description,
6783 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6784 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6787 bool validDevice = false;
6788 if ( probeInfo.isInput == true ) {
6790 LPDIRECTSOUNDCAPTURE object;
6792 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6793 if ( hr != DS_OK ) return TRUE;
6795 caps.dwSize = sizeof(caps);
6796 hr = object->GetCaps( &caps );
6797 if ( hr == DS_OK ) {
6798 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6805 LPDIRECTSOUND object;
6806 hr = DirectSoundCreate( lpguid, &object, NULL );
6807 if ( hr != DS_OK ) return TRUE;
6809 caps.dwSize = sizeof(caps);
6810 hr = object->GetCaps( &caps );
6811 if ( hr == DS_OK ) {
6812 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6818 // If good device, then save its name and guid.
6819 std::string name = convertCharPointerToStdString( description );
6820 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6821 if ( lpguid == NULL )
6822 name = "Default Device";
6823 if ( validDevice ) {
6824 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6825 if ( dsDevices[i].name == name ) {
6826 dsDevices[i].found = true;
6827 if ( probeInfo.isInput ) {
6828 dsDevices[i].id[1] = lpguid;
6829 dsDevices[i].validId[1] = true;
6832 dsDevices[i].id[0] = lpguid;
6833 dsDevices[i].validId[0] = true;
6841 device.found = true;
6842 if ( probeInfo.isInput ) {
6843 device.id[1] = lpguid;
6844 device.validId[1] = true;
6847 device.id[0] = lpguid;
6848 device.validId[0] = true;
6850 dsDevices.push_back( device );
6856 static const char* getErrorString( int code )
6860 case DSERR_ALLOCATED:
6861 return "Already allocated";
6863 case DSERR_CONTROLUNAVAIL:
6864 return "Control unavailable";
6866 case DSERR_INVALIDPARAM:
6867 return "Invalid parameter";
6869 case DSERR_INVALIDCALL:
6870 return "Invalid call";
6873 return "Generic error";
6875 case DSERR_PRIOLEVELNEEDED:
6876 return "Priority level needed";
6878 case DSERR_OUTOFMEMORY:
6879 return "Out of memory";
6881 case DSERR_BADFORMAT:
6882 return "The sample rate or the channel format is not supported";
6884 case DSERR_UNSUPPORTED:
6885 return "Not supported";
6887 case DSERR_NODRIVER:
6890 case DSERR_ALREADYINITIALIZED:
6891 return "Already initialized";
6893 case DSERR_NOAGGREGATION:
6894 return "No aggregation";
6896 case DSERR_BUFFERLOST:
6897 return "Buffer lost";
6899 case DSERR_OTHERAPPHASPRIO:
6900 return "Another application already has priority";
6902 case DSERR_UNINITIALIZED:
6903 return "Uninitialized";
6906 return "DirectSound unknown error";
6909 //******************** End of __WINDOWS_DS__ *********************//
6913 #if defined(__LINUX_ALSA__)
6915 #include <alsa/asoundlib.h>
6918 // A structure to hold various information related to the ALSA API
6921 snd_pcm_t *handles[2];
6924 pthread_cond_t runnable_cv;
6928 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6931 static void *alsaCallbackHandler( void * ptr );
6933 RtApiAlsa :: RtApiAlsa()
6935 // Nothing to do here.
6938 RtApiAlsa :: ~RtApiAlsa()
6940 if ( stream_.state != STREAM_CLOSED ) closeStream();
6943 unsigned int RtApiAlsa :: getDeviceCount( void )
6945 unsigned nDevices = 0;
6946 int result, subdevice, card;
6950 // Count cards and devices
6952 snd_card_next( &card );
6953 while ( card >= 0 ) {
6954 sprintf( name, "hw:%d", card );
6955 result = snd_ctl_open( &handle, name, 0 );
6957 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6958 errorText_ = errorStream_.str();
6959 error( RtAudioError::WARNING );
6964 result = snd_ctl_pcm_next_device( handle, &subdevice );
6966 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6967 errorText_ = errorStream_.str();
6968 error( RtAudioError::WARNING );
6971 if ( subdevice < 0 )
6976 snd_ctl_close( handle );
6977 snd_card_next( &card );
6980 result = snd_ctl_open( &handle, "default", 0 );
6983 snd_ctl_close( handle );
6989 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6991 RtAudio::DeviceInfo info;
6992 info.probed = false;
6994 unsigned nDevices = 0;
6995 int result, subdevice, card;
6999 // Count cards and devices
7002 snd_card_next( &card );
7003 while ( card >= 0 ) {
7004 sprintf( name, "hw:%d", card );
7005 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7007 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7008 errorText_ = errorStream_.str();
7009 error( RtAudioError::WARNING );
7014 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7016 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7017 errorText_ = errorStream_.str();
7018 error( RtAudioError::WARNING );
7021 if ( subdevice < 0 ) break;
7022 if ( nDevices == device ) {
7023 sprintf( name, "hw:%d,%d", card, subdevice );
7029 snd_ctl_close( chandle );
7030 snd_card_next( &card );
7033 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7034 if ( result == 0 ) {
7035 if ( nDevices == device ) {
7036 strcpy( name, "default" );
7042 if ( nDevices == 0 ) {
7043 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7044 error( RtAudioError::INVALID_USE );
7048 if ( device >= nDevices ) {
7049 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7050 error( RtAudioError::INVALID_USE );
7056 // If a stream is already open, we cannot probe the stream devices.
7057 // Thus, use the saved results.
7058 if ( stream_.state != STREAM_CLOSED &&
7059 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7060 snd_ctl_close( chandle );
7061 if ( device >= devices_.size() ) {
7062 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7063 error( RtAudioError::WARNING );
7066 return devices_[ device ];
7069 int openMode = SND_PCM_ASYNC;
7070 snd_pcm_stream_t stream;
7071 snd_pcm_info_t *pcminfo;
7072 snd_pcm_info_alloca( &pcminfo );
7074 snd_pcm_hw_params_t *params;
7075 snd_pcm_hw_params_alloca( ¶ms );
7077 // First try for playback unless default device (which has subdev -1)
7078 stream = SND_PCM_STREAM_PLAYBACK;
7079 snd_pcm_info_set_stream( pcminfo, stream );
7080 if ( subdevice != -1 ) {
7081 snd_pcm_info_set_device( pcminfo, subdevice );
7082 snd_pcm_info_set_subdevice( pcminfo, 0 );
7084 result = snd_ctl_pcm_info( chandle, pcminfo );
7086 // Device probably doesn't support playback.
7091 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7093 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7094 errorText_ = errorStream_.str();
7095 error( RtAudioError::WARNING );
7099 // The device is open ... fill the parameter structure.
7100 result = snd_pcm_hw_params_any( phandle, params );
7102 snd_pcm_close( phandle );
7103 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7104 errorText_ = errorStream_.str();
7105 error( RtAudioError::WARNING );
7109 // Get output channel information.
7111 result = snd_pcm_hw_params_get_channels_max( params, &value );
7113 snd_pcm_close( phandle );
7114 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7115 errorText_ = errorStream_.str();
7116 error( RtAudioError::WARNING );
7119 info.outputChannels = value;
7120 snd_pcm_close( phandle );
7123 stream = SND_PCM_STREAM_CAPTURE;
7124 snd_pcm_info_set_stream( pcminfo, stream );
7126 // Now try for capture unless default device (with subdev = -1)
7127 if ( subdevice != -1 ) {
7128 result = snd_ctl_pcm_info( chandle, pcminfo );
7129 snd_ctl_close( chandle );
7131 // Device probably doesn't support capture.
7132 if ( info.outputChannels == 0 ) return info;
7133 goto probeParameters;
7137 snd_ctl_close( chandle );
7139 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7141 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7142 errorText_ = errorStream_.str();
7143 error( RtAudioError::WARNING );
7144 if ( info.outputChannels == 0 ) return info;
7145 goto probeParameters;
7148 // The device is open ... fill the parameter structure.
7149 result = snd_pcm_hw_params_any( phandle, params );
7151 snd_pcm_close( phandle );
7152 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7153 errorText_ = errorStream_.str();
7154 error( RtAudioError::WARNING );
7155 if ( info.outputChannels == 0 ) return info;
7156 goto probeParameters;
7159 result = snd_pcm_hw_params_get_channels_max( params, &value );
7161 snd_pcm_close( phandle );
7162 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7163 errorText_ = errorStream_.str();
7164 error( RtAudioError::WARNING );
7165 if ( info.outputChannels == 0 ) return info;
7166 goto probeParameters;
7168 info.inputChannels = value;
7169 snd_pcm_close( phandle );
7171 // If device opens for both playback and capture, we determine the channels.
7172 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7173 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7175 // ALSA doesn't provide default devices so we'll use the first available one.
7176 if ( device == 0 && info.outputChannels > 0 )
7177 info.isDefaultOutput = true;
7178 if ( device == 0 && info.inputChannels > 0 )
7179 info.isDefaultInput = true;
7182 // At this point, we just need to figure out the supported data
7183 // formats and sample rates. We'll proceed by opening the device in
7184 // the direction with the maximum number of channels, or playback if
7185 // they are equal. This might limit our sample rate options, but so
7188 if ( info.outputChannels >= info.inputChannels )
7189 stream = SND_PCM_STREAM_PLAYBACK;
7191 stream = SND_PCM_STREAM_CAPTURE;
7192 snd_pcm_info_set_stream( pcminfo, stream );
7194 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7196 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7197 errorText_ = errorStream_.str();
7198 error( RtAudioError::WARNING );
7202 // The device is open ... fill the parameter structure.
7203 result = snd_pcm_hw_params_any( phandle, params );
7205 snd_pcm_close( phandle );
7206 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7207 errorText_ = errorStream_.str();
7208 error( RtAudioError::WARNING );
7212 // Test our discrete set of sample rate values.
7213 info.sampleRates.clear();
7214 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7215 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7216 info.sampleRates.push_back( SAMPLE_RATES[i] );
7218 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7219 info.preferredSampleRate = SAMPLE_RATES[i];
7222 if ( info.sampleRates.size() == 0 ) {
7223 snd_pcm_close( phandle );
7224 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7225 errorText_ = errorStream_.str();
7226 error( RtAudioError::WARNING );
7230 // Probe the supported data formats ... we don't care about endian-ness just yet
7231 snd_pcm_format_t format;
7232 info.nativeFormats = 0;
7233 format = SND_PCM_FORMAT_S8;
7234 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7235 info.nativeFormats |= RTAUDIO_SINT8;
7236 format = SND_PCM_FORMAT_S16;
7237 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7238 info.nativeFormats |= RTAUDIO_SINT16;
7239 format = SND_PCM_FORMAT_S24;
7240 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7241 info.nativeFormats |= RTAUDIO_SINT24;
7242 format = SND_PCM_FORMAT_S32;
7243 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7244 info.nativeFormats |= RTAUDIO_SINT32;
7245 format = SND_PCM_FORMAT_FLOAT;
7246 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7247 info.nativeFormats |= RTAUDIO_FLOAT32;
7248 format = SND_PCM_FORMAT_FLOAT64;
7249 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7250 info.nativeFormats |= RTAUDIO_FLOAT64;
7252 // Check that we have at least one supported format
7253 if ( info.nativeFormats == 0 ) {
7254 snd_pcm_close( phandle );
7255 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7256 errorText_ = errorStream_.str();
7257 error( RtAudioError::WARNING );
7261 // Get the device name
7263 result = snd_card_get_name( card, &cardname );
7264 if ( result >= 0 ) {
7265 sprintf( name, "hw:%s,%d", cardname, subdevice );
7270 // That's all ... close the device and return
7271 snd_pcm_close( phandle );
7276 void RtApiAlsa :: saveDeviceInfo( void )
7280 unsigned int nDevices = getDeviceCount();
7281 devices_.resize( nDevices );
7282 for ( unsigned int i=0; i<nDevices; i++ )
7283 devices_[i] = getDeviceInfo( i );
7286 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7287 unsigned int firstChannel, unsigned int sampleRate,
7288 RtAudioFormat format, unsigned int *bufferSize,
7289 RtAudio::StreamOptions *options )
7292 #if defined(__RTAUDIO_DEBUG__)
7294 snd_output_stdio_attach(&out, stderr, 0);
7297 // I'm not using the "plug" interface ... too much inconsistent behavior.
7299 unsigned nDevices = 0;
7300 int result, subdevice, card;
7304 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7305 snprintf(name, sizeof(name), "%s", "default");
7307 // Count cards and devices
7309 snd_card_next( &card );
7310 while ( card >= 0 ) {
7311 sprintf( name, "hw:%d", card );
7312 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7314 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7315 errorText_ = errorStream_.str();
7320 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7321 if ( result < 0 ) break;
7322 if ( subdevice < 0 ) break;
7323 if ( nDevices == device ) {
7324 sprintf( name, "hw:%d,%d", card, subdevice );
7325 snd_ctl_close( chandle );
7330 snd_ctl_close( chandle );
7331 snd_card_next( &card );
7334 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7335 if ( result == 0 ) {
7336 if ( nDevices == device ) {
7337 strcpy( name, "default" );
7343 if ( nDevices == 0 ) {
7344 // This should not happen because a check is made before this function is called.
7345 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7349 if ( device >= nDevices ) {
7350 // This should not happen because a check is made before this function is called.
7351 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7358 // The getDeviceInfo() function will not work for a device that is
7359 // already open. Thus, we'll probe the system before opening a
7360 // stream and save the results for use by getDeviceInfo().
7361 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7362 this->saveDeviceInfo();
7364 snd_pcm_stream_t stream;
7365 if ( mode == OUTPUT )
7366 stream = SND_PCM_STREAM_PLAYBACK;
7368 stream = SND_PCM_STREAM_CAPTURE;
7371 int openMode = SND_PCM_ASYNC;
7372 result = snd_pcm_open( &phandle, name, stream, openMode );
7374 if ( mode == OUTPUT )
7375 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7377 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7378 errorText_ = errorStream_.str();
7382 // Fill the parameter structure.
7383 snd_pcm_hw_params_t *hw_params;
7384 snd_pcm_hw_params_alloca( &hw_params );
7385 result = snd_pcm_hw_params_any( phandle, hw_params );
7387 snd_pcm_close( phandle );
7388 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7389 errorText_ = errorStream_.str();
7393 #if defined(__RTAUDIO_DEBUG__)
7394 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7395 snd_pcm_hw_params_dump( hw_params, out );
7398 // Set access ... check user preference.
7399 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7400 stream_.userInterleaved = false;
7401 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7403 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7404 stream_.deviceInterleaved[mode] = true;
7407 stream_.deviceInterleaved[mode] = false;
7410 stream_.userInterleaved = true;
7411 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7413 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7414 stream_.deviceInterleaved[mode] = false;
7417 stream_.deviceInterleaved[mode] = true;
7421 snd_pcm_close( phandle );
7422 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7423 errorText_ = errorStream_.str();
7427 // Determine how to set the device format.
7428 stream_.userFormat = format;
7429 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7431 if ( format == RTAUDIO_SINT8 )
7432 deviceFormat = SND_PCM_FORMAT_S8;
7433 else if ( format == RTAUDIO_SINT16 )
7434 deviceFormat = SND_PCM_FORMAT_S16;
7435 else if ( format == RTAUDIO_SINT24 )
7436 deviceFormat = SND_PCM_FORMAT_S24;
7437 else if ( format == RTAUDIO_SINT32 )
7438 deviceFormat = SND_PCM_FORMAT_S32;
7439 else if ( format == RTAUDIO_FLOAT32 )
7440 deviceFormat = SND_PCM_FORMAT_FLOAT;
7441 else if ( format == RTAUDIO_FLOAT64 )
7442 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7444 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7445 stream_.deviceFormat[mode] = format;
7449 // The user requested format is not natively supported by the device.
7450 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7451 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7452 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7456 deviceFormat = SND_PCM_FORMAT_FLOAT;
7457 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7458 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7462 deviceFormat = SND_PCM_FORMAT_S32;
7463 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7464 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7468 deviceFormat = SND_PCM_FORMAT_S24;
7469 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7470 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7474 deviceFormat = SND_PCM_FORMAT_S16;
7475 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7476 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7480 deviceFormat = SND_PCM_FORMAT_S8;
7481 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7482 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7486 // If we get here, no supported format was found.
7487 snd_pcm_close( phandle );
7488 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7489 errorText_ = errorStream_.str();
7493 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7495 snd_pcm_close( phandle );
7496 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7497 errorText_ = errorStream_.str();
7501 // Determine whether byte-swaping is necessary.
7502 stream_.doByteSwap[mode] = false;
7503 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7504 result = snd_pcm_format_cpu_endian( deviceFormat );
7506 stream_.doByteSwap[mode] = true;
7507 else if (result < 0) {
7508 snd_pcm_close( phandle );
7509 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7510 errorText_ = errorStream_.str();
7515 // Set the sample rate.
7516 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7518 snd_pcm_close( phandle );
7519 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7520 errorText_ = errorStream_.str();
7524 // Determine the number of channels for this device. We support a possible
7525 // minimum device channel number > than the value requested by the user.
7526 stream_.nUserChannels[mode] = channels;
7528 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7529 unsigned int deviceChannels = value;
7530 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7531 snd_pcm_close( phandle );
7532 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7533 errorText_ = errorStream_.str();
7537 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7539 snd_pcm_close( phandle );
7540 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7541 errorText_ = errorStream_.str();
7544 deviceChannels = value;
7545 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7546 stream_.nDeviceChannels[mode] = deviceChannels;
7548 // Set the device channels.
7549 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7551 snd_pcm_close( phandle );
7552 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7553 errorText_ = errorStream_.str();
7557 // Set the buffer (or period) size.
7559 snd_pcm_uframes_t periodSize = *bufferSize;
7560 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7562 snd_pcm_close( phandle );
7563 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7564 errorText_ = errorStream_.str();
7567 *bufferSize = periodSize;
7569 // Set the buffer number, which in ALSA is referred to as the "period".
7570 unsigned int periods = 0;
7571 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7572 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7573 if ( periods < 2 ) periods = 4; // a fairly safe default value
7574 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7576 snd_pcm_close( phandle );
7577 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7578 errorText_ = errorStream_.str();
7582 // If attempting to setup a duplex stream, the bufferSize parameter
7583 // MUST be the same in both directions!
7584 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7585 snd_pcm_close( phandle );
7586 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7587 errorText_ = errorStream_.str();
7591 stream_.bufferSize = *bufferSize;
7593 // Install the hardware configuration
7594 result = snd_pcm_hw_params( phandle, hw_params );
7596 snd_pcm_close( phandle );
7597 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7598 errorText_ = errorStream_.str();
7602 #if defined(__RTAUDIO_DEBUG__)
7603 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7604 snd_pcm_hw_params_dump( hw_params, out );
7607 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7608 snd_pcm_sw_params_t *sw_params = NULL;
7609 snd_pcm_sw_params_alloca( &sw_params );
7610 snd_pcm_sw_params_current( phandle, sw_params );
7611 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7612 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7613 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7615 // The following two settings were suggested by Theo Veenker
7616 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7617 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7619 // here are two options for a fix
7620 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7621 snd_pcm_uframes_t val;
7622 snd_pcm_sw_params_get_boundary( sw_params, &val );
7623 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7625 result = snd_pcm_sw_params( phandle, sw_params );
7627 snd_pcm_close( phandle );
7628 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7629 errorText_ = errorStream_.str();
7633 #if defined(__RTAUDIO_DEBUG__)
7634 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7635 snd_pcm_sw_params_dump( sw_params, out );
7638 // Set flags for buffer conversion
7639 stream_.doConvertBuffer[mode] = false;
7640 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7641 stream_.doConvertBuffer[mode] = true;
7642 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7643 stream_.doConvertBuffer[mode] = true;
7644 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7645 stream_.nUserChannels[mode] > 1 )
7646 stream_.doConvertBuffer[mode] = true;
7648 // Allocate the ApiHandle if necessary and then save.
7649 AlsaHandle *apiInfo = 0;
7650 if ( stream_.apiHandle == 0 ) {
7652 apiInfo = (AlsaHandle *) new AlsaHandle;
7654 catch ( std::bad_alloc& ) {
7655 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7659 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7660 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7664 stream_.apiHandle = (void *) apiInfo;
7665 apiInfo->handles[0] = 0;
7666 apiInfo->handles[1] = 0;
7669 apiInfo = (AlsaHandle *) stream_.apiHandle;
7671 apiInfo->handles[mode] = phandle;
7674 // Allocate necessary internal buffers.
7675 unsigned long bufferBytes;
7676 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7677 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7678 if ( stream_.userBuffer[mode] == NULL ) {
7679 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7683 if ( stream_.doConvertBuffer[mode] ) {
7685 bool makeBuffer = true;
7686 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7687 if ( mode == INPUT ) {
7688 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7689 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7690 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7695 bufferBytes *= *bufferSize;
7696 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7697 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7698 if ( stream_.deviceBuffer == NULL ) {
7699 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7705 stream_.sampleRate = sampleRate;
7706 stream_.nBuffers = periods;
7707 stream_.device[mode] = device;
7708 stream_.state = STREAM_STOPPED;
7710 // Setup the buffer conversion information structure.
7711 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7713 // Setup thread if necessary.
7714 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7715 // We had already set up an output stream.
7716 stream_.mode = DUPLEX;
7717 // Link the streams if possible.
7718 apiInfo->synchronized = false;
7719 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7720 apiInfo->synchronized = true;
7722 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7723 error( RtAudioError::WARNING );
7727 stream_.mode = mode;
7729 // Setup callback thread.
7730 stream_.callbackInfo.object = (void *) this;
7732 // Set the thread attributes for joinable and realtime scheduling
7733 // priority (optional). The higher priority will only take affect
7734 // if the program is run as root or suid. Note, under Linux
7735 // processes with CAP_SYS_NICE privilege, a user can change
7736 // scheduling policy and priority (thus need not be root). See
7737 // POSIX "capabilities".
7738 pthread_attr_t attr;
7739 pthread_attr_init( &attr );
7740 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7741 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7742 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7743 stream_.callbackInfo.doRealtime = true;
7744 struct sched_param param;
7745 int priority = options->priority;
7746 int min = sched_get_priority_min( SCHED_RR );
7747 int max = sched_get_priority_max( SCHED_RR );
7748 if ( priority < min ) priority = min;
7749 else if ( priority > max ) priority = max;
7750 param.sched_priority = priority;
7752 // Set the policy BEFORE the priority. Otherwise it fails.
7753 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7754 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7755 // This is definitely required. Otherwise it fails.
7756 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7757 pthread_attr_setschedparam(&attr, ¶m);
7760 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7762 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7765 stream_.callbackInfo.isRunning = true;
7766 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7767 pthread_attr_destroy( &attr );
7769 // Failed. Try instead with default attributes.
7770 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7772 stream_.callbackInfo.isRunning = false;
7773 errorText_ = "RtApiAlsa::error creating callback thread!";
7783 pthread_cond_destroy( &apiInfo->runnable_cv );
7784 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7785 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7787 stream_.apiHandle = 0;
7790 if ( phandle) snd_pcm_close( phandle );
7792 for ( int i=0; i<2; i++ ) {
7793 if ( stream_.userBuffer[i] ) {
7794 free( stream_.userBuffer[i] );
7795 stream_.userBuffer[i] = 0;
7799 if ( stream_.deviceBuffer ) {
7800 free( stream_.deviceBuffer );
7801 stream_.deviceBuffer = 0;
7804 stream_.state = STREAM_CLOSED;
7808 void RtApiAlsa :: closeStream()
7810 if ( stream_.state == STREAM_CLOSED ) {
7811 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7812 error( RtAudioError::WARNING );
7816 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7817 stream_.callbackInfo.isRunning = false;
7818 MUTEX_LOCK( &stream_.mutex );
7819 if ( stream_.state == STREAM_STOPPED ) {
7820 apiInfo->runnable = true;
7821 pthread_cond_signal( &apiInfo->runnable_cv );
7823 MUTEX_UNLOCK( &stream_.mutex );
7824 pthread_join( stream_.callbackInfo.thread, NULL );
7826 if ( stream_.state == STREAM_RUNNING ) {
7827 stream_.state = STREAM_STOPPED;
7828 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7829 snd_pcm_drop( apiInfo->handles[0] );
7830 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7831 snd_pcm_drop( apiInfo->handles[1] );
7835 pthread_cond_destroy( &apiInfo->runnable_cv );
7836 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7837 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7839 stream_.apiHandle = 0;
7842 for ( int i=0; i<2; i++ ) {
7843 if ( stream_.userBuffer[i] ) {
7844 free( stream_.userBuffer[i] );
7845 stream_.userBuffer[i] = 0;
7849 if ( stream_.deviceBuffer ) {
7850 free( stream_.deviceBuffer );
7851 stream_.deviceBuffer = 0;
7854 stream_.mode = UNINITIALIZED;
7855 stream_.state = STREAM_CLOSED;
7858 void RtApiAlsa :: startStream()
7860 // This method calls snd_pcm_prepare if the device isn't already in that state.
7863 if ( stream_.state == STREAM_RUNNING ) {
7864 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7865 error( RtAudioError::WARNING );
7869 MUTEX_LOCK( &stream_.mutex );
7872 snd_pcm_state_t state;
7873 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7874 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7875 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7876 state = snd_pcm_state( handle[0] );
7877 if ( state != SND_PCM_STATE_PREPARED ) {
7878 result = snd_pcm_prepare( handle[0] );
7880 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7881 errorText_ = errorStream_.str();
7887 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7888 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7889 state = snd_pcm_state( handle[1] );
7890 if ( state != SND_PCM_STATE_PREPARED ) {
7891 result = snd_pcm_prepare( handle[1] );
7893 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7894 errorText_ = errorStream_.str();
7900 stream_.state = STREAM_RUNNING;
7903 apiInfo->runnable = true;
7904 pthread_cond_signal( &apiInfo->runnable_cv );
7905 MUTEX_UNLOCK( &stream_.mutex );
7907 if ( result >= 0 ) return;
7908 error( RtAudioError::SYSTEM_ERROR );
7911 void RtApiAlsa :: stopStream()
7914 if ( stream_.state == STREAM_STOPPED ) {
7915 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7916 error( RtAudioError::WARNING );
7920 stream_.state = STREAM_STOPPED;
7921 MUTEX_LOCK( &stream_.mutex );
7924 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7925 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7926 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7927 if ( apiInfo->synchronized )
7928 result = snd_pcm_drop( handle[0] );
7930 result = snd_pcm_drain( handle[0] );
7932 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7933 errorText_ = errorStream_.str();
7938 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7939 result = snd_pcm_drop( handle[1] );
7941 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7942 errorText_ = errorStream_.str();
7948 apiInfo->runnable = false; // fixes high CPU usage when stopped
7949 MUTEX_UNLOCK( &stream_.mutex );
7951 if ( result >= 0 ) return;
7952 error( RtAudioError::SYSTEM_ERROR );
7955 void RtApiAlsa :: abortStream()
7958 if ( stream_.state == STREAM_STOPPED ) {
7959 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7960 error( RtAudioError::WARNING );
7964 stream_.state = STREAM_STOPPED;
7965 MUTEX_LOCK( &stream_.mutex );
7968 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7969 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7970 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7971 result = snd_pcm_drop( handle[0] );
7973 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7974 errorText_ = errorStream_.str();
7979 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7980 result = snd_pcm_drop( handle[1] );
7982 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7983 errorText_ = errorStream_.str();
7989 apiInfo->runnable = false; // fixes high CPU usage when stopped
7990 MUTEX_UNLOCK( &stream_.mutex );
7992 if ( result >= 0 ) return;
7993 error( RtAudioError::SYSTEM_ERROR );
7996 void RtApiAlsa :: callbackEvent()
7998 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7999 if ( stream_.state == STREAM_STOPPED ) {
8000 MUTEX_LOCK( &stream_.mutex );
8001 while ( !apiInfo->runnable )
8002 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8004 if ( stream_.state != STREAM_RUNNING ) {
8005 MUTEX_UNLOCK( &stream_.mutex );
8008 MUTEX_UNLOCK( &stream_.mutex );
8011 if ( stream_.state == STREAM_CLOSED ) {
8012 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8013 error( RtAudioError::WARNING );
8017 int doStopStream = 0;
8018 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8019 double streamTime = getStreamTime();
8020 RtAudioStreamStatus status = 0;
8021 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8022 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8023 apiInfo->xrun[0] = false;
8025 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8026 status |= RTAUDIO_INPUT_OVERFLOW;
8027 apiInfo->xrun[1] = false;
8029 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8030 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8032 if ( doStopStream == 2 ) {
8037 MUTEX_LOCK( &stream_.mutex );
8039 // The state might change while waiting on a mutex.
8040 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8046 snd_pcm_sframes_t frames;
8047 RtAudioFormat format;
8048 handle = (snd_pcm_t **) apiInfo->handles;
8050 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8052 // Setup parameters.
8053 if ( stream_.doConvertBuffer[1] ) {
8054 buffer = stream_.deviceBuffer;
8055 channels = stream_.nDeviceChannels[1];
8056 format = stream_.deviceFormat[1];
8059 buffer = stream_.userBuffer[1];
8060 channels = stream_.nUserChannels[1];
8061 format = stream_.userFormat;
8064 // Read samples from device in interleaved/non-interleaved format.
8065 if ( stream_.deviceInterleaved[1] )
8066 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8068 void *bufs[channels];
8069 size_t offset = stream_.bufferSize * formatBytes( format );
8070 for ( int i=0; i<channels; i++ )
8071 bufs[i] = (void *) (buffer + (i * offset));
8072 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8075 if ( result < (int) stream_.bufferSize ) {
8076 // Either an error or overrun occured.
8077 if ( result == -EPIPE ) {
8078 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8079 if ( state == SND_PCM_STATE_XRUN ) {
8080 apiInfo->xrun[1] = true;
8081 result = snd_pcm_prepare( handle[1] );
8083 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8084 errorText_ = errorStream_.str();
8088 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8089 errorText_ = errorStream_.str();
8093 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8094 errorText_ = errorStream_.str();
8096 error( RtAudioError::WARNING );
8100 // Do byte swapping if necessary.
8101 if ( stream_.doByteSwap[1] )
8102 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8104 // Do buffer conversion if necessary.
8105 if ( stream_.doConvertBuffer[1] )
8106 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8108 // Check stream latency
8109 result = snd_pcm_delay( handle[1], &frames );
8110 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8115 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8117 // Setup parameters and do buffer conversion if necessary.
8118 if ( stream_.doConvertBuffer[0] ) {
8119 buffer = stream_.deviceBuffer;
8120 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8121 channels = stream_.nDeviceChannels[0];
8122 format = stream_.deviceFormat[0];
8125 buffer = stream_.userBuffer[0];
8126 channels = stream_.nUserChannels[0];
8127 format = stream_.userFormat;
8130 // Do byte swapping if necessary.
8131 if ( stream_.doByteSwap[0] )
8132 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8134 // Write samples to device in interleaved/non-interleaved format.
8135 if ( stream_.deviceInterleaved[0] )
8136 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8138 void *bufs[channels];
8139 size_t offset = stream_.bufferSize * formatBytes( format );
8140 for ( int i=0; i<channels; i++ )
8141 bufs[i] = (void *) (buffer + (i * offset));
8142 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8145 if ( result < (int) stream_.bufferSize ) {
8146 // Either an error or underrun occured.
8147 if ( result == -EPIPE ) {
8148 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8149 if ( state == SND_PCM_STATE_XRUN ) {
8150 apiInfo->xrun[0] = true;
8151 result = snd_pcm_prepare( handle[0] );
8153 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8154 errorText_ = errorStream_.str();
8157 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8160 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8161 errorText_ = errorStream_.str();
8165 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8166 errorText_ = errorStream_.str();
8168 error( RtAudioError::WARNING );
8172 // Check stream latency
8173 result = snd_pcm_delay( handle[0], &frames );
8174 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8178 MUTEX_UNLOCK( &stream_.mutex );
8180 RtApi::tickStreamTime();
8181 if ( doStopStream == 1 ) this->stopStream();
8184 static void *alsaCallbackHandler( void *ptr )
8186 CallbackInfo *info = (CallbackInfo *) ptr;
8187 RtApiAlsa *object = (RtApiAlsa *) info->object;
8188 bool *isRunning = &info->isRunning;
8190 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8191 if ( info->doRealtime ) {
8192 std::cerr << "RtAudio alsa: " <<
8193 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8194 "running realtime scheduling" << std::endl;
8198 while ( *isRunning == true ) {
8199 pthread_testcancel();
8200 object->callbackEvent();
8203 pthread_exit( NULL );
8206 //******************** End of __LINUX_ALSA__ *********************//
8209 #if defined(__LINUX_PULSE__)
8211 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8212 // and Tristan Matthews.
8214 #include <pulse/error.h>
8215 #include <pulse/simple.h>
8218 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8219 44100, 48000, 96000, 0};
8221 struct rtaudio_pa_format_mapping_t {
8222 RtAudioFormat rtaudio_format;
8223 pa_sample_format_t pa_format;
8226 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8227 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8228 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8229 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8230 {0, PA_SAMPLE_INVALID}};
8232 struct PulseAudioHandle {
8236 pthread_cond_t runnable_cv;
8238 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8241 RtApiPulse::~RtApiPulse()
8243 if ( stream_.state != STREAM_CLOSED )
8247 unsigned int RtApiPulse::getDeviceCount( void )
8252 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8254 RtAudio::DeviceInfo info;
8256 info.name = "PulseAudio";
8257 info.outputChannels = 2;
8258 info.inputChannels = 2;
8259 info.duplexChannels = 2;
8260 info.isDefaultOutput = true;
8261 info.isDefaultInput = true;
8263 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8264 info.sampleRates.push_back( *sr );
8266 info.preferredSampleRate = 48000;
8267 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8272 static void *pulseaudio_callback( void * user )
8274 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8275 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8276 volatile bool *isRunning = &cbi->isRunning;
8278 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8279 if (cbi->doRealtime) {
8280 std::cerr << "RtAudio pulse: " <<
8281 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8282 "running realtime scheduling" << std::endl;
8286 while ( *isRunning ) {
8287 pthread_testcancel();
8288 context->callbackEvent();
8291 pthread_exit( NULL );
8294 void RtApiPulse::closeStream( void )
8296 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8298 stream_.callbackInfo.isRunning = false;
8300 MUTEX_LOCK( &stream_.mutex );
8301 if ( stream_.state == STREAM_STOPPED ) {
8302 pah->runnable = true;
8303 pthread_cond_signal( &pah->runnable_cv );
8305 MUTEX_UNLOCK( &stream_.mutex );
8307 pthread_join( pah->thread, 0 );
8308 if ( pah->s_play ) {
8309 pa_simple_flush( pah->s_play, NULL );
8310 pa_simple_free( pah->s_play );
8313 pa_simple_free( pah->s_rec );
8315 pthread_cond_destroy( &pah->runnable_cv );
8317 stream_.apiHandle = 0;
8320 if ( stream_.userBuffer[0] ) {
8321 free( stream_.userBuffer[0] );
8322 stream_.userBuffer[0] = 0;
8324 if ( stream_.userBuffer[1] ) {
8325 free( stream_.userBuffer[1] );
8326 stream_.userBuffer[1] = 0;
8329 stream_.state = STREAM_CLOSED;
8330 stream_.mode = UNINITIALIZED;
8333 void RtApiPulse::callbackEvent( void )
8335 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8337 if ( stream_.state == STREAM_STOPPED ) {
8338 MUTEX_LOCK( &stream_.mutex );
8339 while ( !pah->runnable )
8340 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8342 if ( stream_.state != STREAM_RUNNING ) {
8343 MUTEX_UNLOCK( &stream_.mutex );
8346 MUTEX_UNLOCK( &stream_.mutex );
8349 if ( stream_.state == STREAM_CLOSED ) {
8350 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8351 "this shouldn't happen!";
8352 error( RtAudioError::WARNING );
8356 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8357 double streamTime = getStreamTime();
8358 RtAudioStreamStatus status = 0;
8359 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8360 stream_.bufferSize, streamTime, status,
8361 stream_.callbackInfo.userData );
8363 if ( doStopStream == 2 ) {
8368 MUTEX_LOCK( &stream_.mutex );
8369 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8370 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8372 if ( stream_.state != STREAM_RUNNING )
8377 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8378 if ( stream_.doConvertBuffer[OUTPUT] ) {
8379 convertBuffer( stream_.deviceBuffer,
8380 stream_.userBuffer[OUTPUT],
8381 stream_.convertInfo[OUTPUT] );
8382 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8383 formatBytes( stream_.deviceFormat[OUTPUT] );
8385 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8386 formatBytes( stream_.userFormat );
8388 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8389 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8390 pa_strerror( pa_error ) << ".";
8391 errorText_ = errorStream_.str();
8392 error( RtAudioError::WARNING );
8396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8397 if ( stream_.doConvertBuffer[INPUT] )
8398 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8399 formatBytes( stream_.deviceFormat[INPUT] );
8401 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8402 formatBytes( stream_.userFormat );
8404 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8405 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8406 pa_strerror( pa_error ) << ".";
8407 errorText_ = errorStream_.str();
8408 error( RtAudioError::WARNING );
8410 if ( stream_.doConvertBuffer[INPUT] ) {
8411 convertBuffer( stream_.userBuffer[INPUT],
8412 stream_.deviceBuffer,
8413 stream_.convertInfo[INPUT] );
8418 MUTEX_UNLOCK( &stream_.mutex );
8419 RtApi::tickStreamTime();
8421 if ( doStopStream == 1 )
8425 void RtApiPulse::startStream( void )
8427 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8429 if ( stream_.state == STREAM_CLOSED ) {
8430 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8431 error( RtAudioError::INVALID_USE );
8434 if ( stream_.state == STREAM_RUNNING ) {
8435 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8436 error( RtAudioError::WARNING );
8440 MUTEX_LOCK( &stream_.mutex );
8442 stream_.state = STREAM_RUNNING;
8444 pah->runnable = true;
8445 pthread_cond_signal( &pah->runnable_cv );
8446 MUTEX_UNLOCK( &stream_.mutex );
8449 void RtApiPulse::stopStream( void )
8451 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8453 if ( stream_.state == STREAM_CLOSED ) {
8454 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8455 error( RtAudioError::INVALID_USE );
8458 if ( stream_.state == STREAM_STOPPED ) {
8459 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8460 error( RtAudioError::WARNING );
8464 stream_.state = STREAM_STOPPED;
8465 MUTEX_LOCK( &stream_.mutex );
8467 if ( pah && pah->s_play ) {
8469 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8470 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8471 pa_strerror( pa_error ) << ".";
8472 errorText_ = errorStream_.str();
8473 MUTEX_UNLOCK( &stream_.mutex );
8474 error( RtAudioError::SYSTEM_ERROR );
8479 stream_.state = STREAM_STOPPED;
8480 MUTEX_UNLOCK( &stream_.mutex );
8483 void RtApiPulse::abortStream( void )
8485 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8487 if ( stream_.state == STREAM_CLOSED ) {
8488 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8489 error( RtAudioError::INVALID_USE );
8492 if ( stream_.state == STREAM_STOPPED ) {
8493 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8494 error( RtAudioError::WARNING );
8498 stream_.state = STREAM_STOPPED;
8499 MUTEX_LOCK( &stream_.mutex );
8501 if ( pah && pah->s_play ) {
8503 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8504 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8505 pa_strerror( pa_error ) << ".";
8506 errorText_ = errorStream_.str();
8507 MUTEX_UNLOCK( &stream_.mutex );
8508 error( RtAudioError::SYSTEM_ERROR );
8513 stream_.state = STREAM_STOPPED;
8514 MUTEX_UNLOCK( &stream_.mutex );
8517 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8518 unsigned int channels, unsigned int firstChannel,
8519 unsigned int sampleRate, RtAudioFormat format,
8520 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8522 PulseAudioHandle *pah = 0;
8523 unsigned long bufferBytes = 0;
8526 if ( device != 0 ) return false;
8527 if ( mode != INPUT && mode != OUTPUT ) return false;
8528 if ( channels != 1 && channels != 2 ) {
8529 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8532 ss.channels = channels;
8534 if ( firstChannel != 0 ) return false;
8536 bool sr_found = false;
8537 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8538 if ( sampleRate == *sr ) {
8540 stream_.sampleRate = sampleRate;
8541 ss.rate = sampleRate;
8546 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8551 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8552 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8553 if ( format == sf->rtaudio_format ) {
8555 stream_.userFormat = sf->rtaudio_format;
8556 stream_.deviceFormat[mode] = stream_.userFormat;
8557 ss.format = sf->pa_format;
8561 if ( !sf_found ) { // Use internal data format conversion.
8562 stream_.userFormat = format;
8563 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8564 ss.format = PA_SAMPLE_FLOAT32LE;
8567 // Set other stream parameters.
8568 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8569 else stream_.userInterleaved = true;
8570 stream_.deviceInterleaved[mode] = true;
8571 stream_.nBuffers = 1;
8572 stream_.doByteSwap[mode] = false;
8573 stream_.nUserChannels[mode] = channels;
8574 stream_.nDeviceChannels[mode] = channels + firstChannel;
8575 stream_.channelOffset[mode] = 0;
8576 std::string streamName = "RtAudio";
8578 // Set flags for buffer conversion.
8579 stream_.doConvertBuffer[mode] = false;
8580 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8581 stream_.doConvertBuffer[mode] = true;
8582 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8583 stream_.doConvertBuffer[mode] = true;
8585 // Allocate necessary internal buffers.
8586 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8587 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8588 if ( stream_.userBuffer[mode] == NULL ) {
8589 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8592 stream_.bufferSize = *bufferSize;
8594 if ( stream_.doConvertBuffer[mode] ) {
8596 bool makeBuffer = true;
8597 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8598 if ( mode == INPUT ) {
8599 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8600 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8601 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8606 bufferBytes *= *bufferSize;
8607 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8608 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8609 if ( stream_.deviceBuffer == NULL ) {
8610 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8616 stream_.device[mode] = device;
8618 // Setup the buffer conversion information structure.
8619 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8621 if ( !stream_.apiHandle ) {
8622 PulseAudioHandle *pah = new PulseAudioHandle;
8624 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8628 stream_.apiHandle = pah;
8629 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8630 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8634 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8637 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8640 pa_buffer_attr buffer_attr;
8641 buffer_attr.fragsize = bufferBytes;
8642 buffer_attr.maxlength = -1;
8644 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8645 if ( !pah->s_rec ) {
8646 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8651 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8652 if ( !pah->s_play ) {
8653 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8661 if ( stream_.mode == UNINITIALIZED )
8662 stream_.mode = mode;
8663 else if ( stream_.mode == mode )
8666 stream_.mode = DUPLEX;
8668 if ( !stream_.callbackInfo.isRunning ) {
8669 stream_.callbackInfo.object = this;
8671 stream_.state = STREAM_STOPPED;
8672 // Set the thread attributes for joinable and realtime scheduling
8673 // priority (optional). The higher priority will only take affect
8674 // if the program is run as root or suid. Note, under Linux
8675 // processes with CAP_SYS_NICE privilege, a user can change
8676 // scheduling policy and priority (thus need not be root). See
8677 // POSIX "capabilities".
8678 pthread_attr_t attr;
8679 pthread_attr_init( &attr );
8680 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8681 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8682 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8683 stream_.callbackInfo.doRealtime = true;
8684 struct sched_param param;
8685 int priority = options->priority;
8686 int min = sched_get_priority_min( SCHED_RR );
8687 int max = sched_get_priority_max( SCHED_RR );
8688 if ( priority < min ) priority = min;
8689 else if ( priority > max ) priority = max;
8690 param.sched_priority = priority;
8692 // Set the policy BEFORE the priority. Otherwise it fails.
8693 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8694 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8695 // This is definitely required. Otherwise it fails.
8696 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8697 pthread_attr_setschedparam(&attr, ¶m);
8700 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8702 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8705 stream_.callbackInfo.isRunning = true;
8706 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8707 pthread_attr_destroy(&attr);
8709 // Failed. Try instead with default attributes.
8710 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8712 stream_.callbackInfo.isRunning = false;
8713 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8722 if ( pah && stream_.callbackInfo.isRunning ) {
8723 pthread_cond_destroy( &pah->runnable_cv );
8725 stream_.apiHandle = 0;
8728 for ( int i=0; i<2; i++ ) {
8729 if ( stream_.userBuffer[i] ) {
8730 free( stream_.userBuffer[i] );
8731 stream_.userBuffer[i] = 0;
8735 if ( stream_.deviceBuffer ) {
8736 free( stream_.deviceBuffer );
8737 stream_.deviceBuffer = 0;
8740 stream_.state = STREAM_CLOSED;
8744 //******************** End of __LINUX_PULSE__ *********************//
8747 #if defined(__LINUX_OSS__)
8750 #include <sys/ioctl.h>
8753 #include <sys/soundcard.h>
8757 static void *ossCallbackHandler(void * ptr);
8759 // A structure to hold various information related to the OSS API
8762 int id[2]; // device ids
8765 pthread_cond_t runnable;
8768 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8771 RtApiOss :: RtApiOss()
8773 // Nothing to do here.
8776 RtApiOss :: ~RtApiOss()
8778 if ( stream_.state != STREAM_CLOSED ) closeStream();
8781 unsigned int RtApiOss :: getDeviceCount( void )
8783 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8784 if ( mixerfd == -1 ) {
8785 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8786 error( RtAudioError::WARNING );
8790 oss_sysinfo sysinfo;
8791 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8793 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8794 error( RtAudioError::WARNING );
8799 return sysinfo.numaudios;
8802 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8804 RtAudio::DeviceInfo info;
8805 info.probed = false;
8807 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8808 if ( mixerfd == -1 ) {
8809 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8810 error( RtAudioError::WARNING );
8814 oss_sysinfo sysinfo;
8815 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8816 if ( result == -1 ) {
8818 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8819 error( RtAudioError::WARNING );
8823 unsigned nDevices = sysinfo.numaudios;
8824 if ( nDevices == 0 ) {
8826 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8827 error( RtAudioError::INVALID_USE );
8831 if ( device >= nDevices ) {
8833 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8834 error( RtAudioError::INVALID_USE );
8838 oss_audioinfo ainfo;
8840 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8842 if ( result == -1 ) {
8843 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8844 errorText_ = errorStream_.str();
8845 error( RtAudioError::WARNING );
8850 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8851 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8852 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8853 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8854 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8857 // Probe data formats ... do for input
8858 unsigned long mask = ainfo.iformats;
8859 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8860 info.nativeFormats |= RTAUDIO_SINT16;
8861 if ( mask & AFMT_S8 )
8862 info.nativeFormats |= RTAUDIO_SINT8;
8863 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8864 info.nativeFormats |= RTAUDIO_SINT32;
8866 if ( mask & AFMT_FLOAT )
8867 info.nativeFormats |= RTAUDIO_FLOAT32;
8869 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8870 info.nativeFormats |= RTAUDIO_SINT24;
8872 // Check that we have at least one supported format
8873 if ( info.nativeFormats == 0 ) {
8874 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8875 errorText_ = errorStream_.str();
8876 error( RtAudioError::WARNING );
8880 // Probe the supported sample rates.
8881 info.sampleRates.clear();
8882 if ( ainfo.nrates ) {
8883 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8884 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8885 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8886 info.sampleRates.push_back( SAMPLE_RATES[k] );
8888 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8889 info.preferredSampleRate = SAMPLE_RATES[k];
8897 // Check min and max rate values;
8898 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8899 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8900 info.sampleRates.push_back( SAMPLE_RATES[k] );
8902 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8903 info.preferredSampleRate = SAMPLE_RATES[k];
8908 if ( info.sampleRates.size() == 0 ) {
8909 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8910 errorText_ = errorStream_.str();
8911 error( RtAudioError::WARNING );
8915 info.name = ainfo.name;
8922 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8923 unsigned int firstChannel, unsigned int sampleRate,
8924 RtAudioFormat format, unsigned int *bufferSize,
8925 RtAudio::StreamOptions *options )
8927 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8928 if ( mixerfd == -1 ) {
8929 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8933 oss_sysinfo sysinfo;
8934 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8935 if ( result == -1 ) {
8937 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8941 unsigned nDevices = sysinfo.numaudios;
8942 if ( nDevices == 0 ) {
8943 // This should not happen because a check is made before this function is called.
8945 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8949 if ( device >= nDevices ) {
8950 // This should not happen because a check is made before this function is called.
8952 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8956 oss_audioinfo ainfo;
8958 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8960 if ( result == -1 ) {
8961 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8962 errorText_ = errorStream_.str();
8966 // Check if device supports input or output
8967 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8968 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8969 if ( mode == OUTPUT )
8970 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8972 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8973 errorText_ = errorStream_.str();
8978 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8979 if ( mode == OUTPUT )
8981 else { // mode == INPUT
8982 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8983 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8984 close( handle->id[0] );
8986 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8987 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8988 errorText_ = errorStream_.str();
8991 // Check that the number previously set channels is the same.
8992 if ( stream_.nUserChannels[0] != channels ) {
8993 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8994 errorText_ = errorStream_.str();
9003 // Set exclusive access if specified.
9004 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9006 // Try to open the device.
9008 fd = open( ainfo.devnode, flags, 0 );
9010 if ( errno == EBUSY )
9011 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9013 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9014 errorText_ = errorStream_.str();
9018 // For duplex operation, specifically set this mode (this doesn't seem to work).
9020 if ( flags | O_RDWR ) {
9021 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9022 if ( result == -1) {
9023 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9024 errorText_ = errorStream_.str();
9030 // Check the device channel support.
9031 stream_.nUserChannels[mode] = channels;
9032 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9034 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9035 errorText_ = errorStream_.str();
9039 // Set the number of channels.
9040 int deviceChannels = channels + firstChannel;
9041 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9042 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9044 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9045 errorText_ = errorStream_.str();
9048 stream_.nDeviceChannels[mode] = deviceChannels;
9050 // Get the data format mask
9052 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9053 if ( result == -1 ) {
9055 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9056 errorText_ = errorStream_.str();
9060 // Determine how to set the device format.
9061 stream_.userFormat = format;
9062 int deviceFormat = -1;
9063 stream_.doByteSwap[mode] = false;
9064 if ( format == RTAUDIO_SINT8 ) {
9065 if ( mask & AFMT_S8 ) {
9066 deviceFormat = AFMT_S8;
9067 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9070 else if ( format == RTAUDIO_SINT16 ) {
9071 if ( mask & AFMT_S16_NE ) {
9072 deviceFormat = AFMT_S16_NE;
9073 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9075 else if ( mask & AFMT_S16_OE ) {
9076 deviceFormat = AFMT_S16_OE;
9077 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9078 stream_.doByteSwap[mode] = true;
9081 else if ( format == RTAUDIO_SINT24 ) {
9082 if ( mask & AFMT_S24_NE ) {
9083 deviceFormat = AFMT_S24_NE;
9084 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9086 else if ( mask & AFMT_S24_OE ) {
9087 deviceFormat = AFMT_S24_OE;
9088 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9089 stream_.doByteSwap[mode] = true;
9092 else if ( format == RTAUDIO_SINT32 ) {
9093 if ( mask & AFMT_S32_NE ) {
9094 deviceFormat = AFMT_S32_NE;
9095 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9097 else if ( mask & AFMT_S32_OE ) {
9098 deviceFormat = AFMT_S32_OE;
9099 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9100 stream_.doByteSwap[mode] = true;
9104 if ( deviceFormat == -1 ) {
9105 // The user requested format is not natively supported by the device.
9106 if ( mask & AFMT_S16_NE ) {
9107 deviceFormat = AFMT_S16_NE;
9108 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9110 else if ( mask & AFMT_S32_NE ) {
9111 deviceFormat = AFMT_S32_NE;
9112 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9114 else if ( mask & AFMT_S24_NE ) {
9115 deviceFormat = AFMT_S24_NE;
9116 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9118 else if ( mask & AFMT_S16_OE ) {
9119 deviceFormat = AFMT_S16_OE;
9120 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9121 stream_.doByteSwap[mode] = true;
9123 else if ( mask & AFMT_S32_OE ) {
9124 deviceFormat = AFMT_S32_OE;
9125 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9126 stream_.doByteSwap[mode] = true;
9128 else if ( mask & AFMT_S24_OE ) {
9129 deviceFormat = AFMT_S24_OE;
9130 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9131 stream_.doByteSwap[mode] = true;
9133 else if ( mask & AFMT_S8) {
9134 deviceFormat = AFMT_S8;
9135 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9139 if ( stream_.deviceFormat[mode] == 0 ) {
9140 // This really shouldn't happen ...
9142 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9143 errorText_ = errorStream_.str();
9147 // Set the data format.
9148 int temp = deviceFormat;
9149 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9150 if ( result == -1 || deviceFormat != temp ) {
9152 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9153 errorText_ = errorStream_.str();
9157 // Attempt to set the buffer size. According to OSS, the minimum
9158 // number of buffers is two. The supposed minimum buffer size is 16
9159 // bytes, so that will be our lower bound. The argument to this
9160 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9161 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9162 // We'll check the actual value used near the end of the setup
9164 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9165 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9167 if ( options ) buffers = options->numberOfBuffers;
9168 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9169 if ( buffers < 2 ) buffers = 3;
9170 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9171 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9172 if ( result == -1 ) {
9174 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9175 errorText_ = errorStream_.str();
9178 stream_.nBuffers = buffers;
9180 // Save buffer size (in sample frames).
9181 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9182 stream_.bufferSize = *bufferSize;
9184 // Set the sample rate.
9185 int srate = sampleRate;
9186 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9187 if ( result == -1 ) {
9189 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9190 errorText_ = errorStream_.str();
9194 // Verify the sample rate setup worked.
9195 if ( abs( srate - (int)sampleRate ) > 100 ) {
9197 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9198 errorText_ = errorStream_.str();
9201 stream_.sampleRate = sampleRate;
9203 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9204 // We're doing duplex setup here.
9205 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9206 stream_.nDeviceChannels[0] = deviceChannels;
9209 // Set interleaving parameters.
9210 stream_.userInterleaved = true;
9211 stream_.deviceInterleaved[mode] = true;
9212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9213 stream_.userInterleaved = false;
9215 // Set flags for buffer conversion
9216 stream_.doConvertBuffer[mode] = false;
9217 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9218 stream_.doConvertBuffer[mode] = true;
9219 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9220 stream_.doConvertBuffer[mode] = true;
9221 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9222 stream_.nUserChannels[mode] > 1 )
9223 stream_.doConvertBuffer[mode] = true;
9225 // Allocate the stream handles if necessary and then save.
9226 if ( stream_.apiHandle == 0 ) {
9228 handle = new OssHandle;
9230 catch ( std::bad_alloc& ) {
9231 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9235 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9236 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9240 stream_.apiHandle = (void *) handle;
9243 handle = (OssHandle *) stream_.apiHandle;
9245 handle->id[mode] = fd;
9247 // Allocate necessary internal buffers.
9248 unsigned long bufferBytes;
9249 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9250 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9251 if ( stream_.userBuffer[mode] == NULL ) {
9252 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9256 if ( stream_.doConvertBuffer[mode] ) {
9258 bool makeBuffer = true;
9259 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9260 if ( mode == INPUT ) {
9261 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9262 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9263 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9268 bufferBytes *= *bufferSize;
9269 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9270 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9271 if ( stream_.deviceBuffer == NULL ) {
9272 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9278 stream_.device[mode] = device;
9279 stream_.state = STREAM_STOPPED;
9281 // Setup the buffer conversion information structure.
9282 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9284 // Setup thread if necessary.
9285 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9286 // We had already set up an output stream.
9287 stream_.mode = DUPLEX;
9288 if ( stream_.device[0] == device ) handle->id[0] = fd;
9291 stream_.mode = mode;
9293 // Setup callback thread.
9294 stream_.callbackInfo.object = (void *) this;
9296 // Set the thread attributes for joinable and realtime scheduling
9297 // priority. The higher priority will only take affect if the
9298 // program is run as root or suid.
9299 pthread_attr_t attr;
9300 pthread_attr_init( &attr );
9301 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9302 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9303 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9304 stream_.callbackInfo.doRealtime = true;
9305 struct sched_param param;
9306 int priority = options->priority;
9307 int min = sched_get_priority_min( SCHED_RR );
9308 int max = sched_get_priority_max( SCHED_RR );
9309 if ( priority < min ) priority = min;
9310 else if ( priority > max ) priority = max;
9311 param.sched_priority = priority;
9313 // Set the policy BEFORE the priority. Otherwise it fails.
9314 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9315 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9316 // This is definitely required. Otherwise it fails.
9317 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9318 pthread_attr_setschedparam(&attr, ¶m);
9321 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9323 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9326 stream_.callbackInfo.isRunning = true;
9327 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9328 pthread_attr_destroy( &attr );
9330 // Failed. Try instead with default attributes.
9331 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9333 stream_.callbackInfo.isRunning = false;
9334 errorText_ = "RtApiOss::error creating callback thread!";
9344 pthread_cond_destroy( &handle->runnable );
9345 if ( handle->id[0] ) close( handle->id[0] );
9346 if ( handle->id[1] ) close( handle->id[1] );
9348 stream_.apiHandle = 0;
9351 for ( int i=0; i<2; i++ ) {
9352 if ( stream_.userBuffer[i] ) {
9353 free( stream_.userBuffer[i] );
9354 stream_.userBuffer[i] = 0;
9358 if ( stream_.deviceBuffer ) {
9359 free( stream_.deviceBuffer );
9360 stream_.deviceBuffer = 0;
9363 stream_.state = STREAM_CLOSED;
9367 void RtApiOss :: closeStream()
9369 if ( stream_.state == STREAM_CLOSED ) {
9370 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9371 error( RtAudioError::WARNING );
9375 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9376 stream_.callbackInfo.isRunning = false;
9377 MUTEX_LOCK( &stream_.mutex );
9378 if ( stream_.state == STREAM_STOPPED )
9379 pthread_cond_signal( &handle->runnable );
9380 MUTEX_UNLOCK( &stream_.mutex );
9381 pthread_join( stream_.callbackInfo.thread, NULL );
9383 if ( stream_.state == STREAM_RUNNING ) {
9384 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9385 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9387 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9388 stream_.state = STREAM_STOPPED;
9392 pthread_cond_destroy( &handle->runnable );
9393 if ( handle->id[0] ) close( handle->id[0] );
9394 if ( handle->id[1] ) close( handle->id[1] );
9396 stream_.apiHandle = 0;
9399 for ( int i=0; i<2; i++ ) {
9400 if ( stream_.userBuffer[i] ) {
9401 free( stream_.userBuffer[i] );
9402 stream_.userBuffer[i] = 0;
9406 if ( stream_.deviceBuffer ) {
9407 free( stream_.deviceBuffer );
9408 stream_.deviceBuffer = 0;
9411 stream_.mode = UNINITIALIZED;
9412 stream_.state = STREAM_CLOSED;
9415 void RtApiOss :: startStream()
9418 if ( stream_.state == STREAM_RUNNING ) {
9419 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9420 error( RtAudioError::WARNING );
9424 MUTEX_LOCK( &stream_.mutex );
9426 stream_.state = STREAM_RUNNING;
9428 // No need to do anything else here ... OSS automatically starts
9429 // when fed samples.
9431 MUTEX_UNLOCK( &stream_.mutex );
9433 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9434 pthread_cond_signal( &handle->runnable );
9437 void RtApiOss :: stopStream()
9440 if ( stream_.state == STREAM_STOPPED ) {
9441 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9442 error( RtAudioError::WARNING );
9446 MUTEX_LOCK( &stream_.mutex );
9448 // The state might change while waiting on a mutex.
9449 if ( stream_.state == STREAM_STOPPED ) {
9450 MUTEX_UNLOCK( &stream_.mutex );
9455 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9456 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9458 // Flush the output with zeros a few times.
9461 RtAudioFormat format;
9463 if ( stream_.doConvertBuffer[0] ) {
9464 buffer = stream_.deviceBuffer;
9465 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9466 format = stream_.deviceFormat[0];
9469 buffer = stream_.userBuffer[0];
9470 samples = stream_.bufferSize * stream_.nUserChannels[0];
9471 format = stream_.userFormat;
9474 memset( buffer, 0, samples * formatBytes(format) );
9475 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9476 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9477 if ( result == -1 ) {
9478 errorText_ = "RtApiOss::stopStream: audio write error.";
9479 error( RtAudioError::WARNING );
9483 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9484 if ( result == -1 ) {
9485 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9486 errorText_ = errorStream_.str();
9489 handle->triggered = false;
9492 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9493 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9494 if ( result == -1 ) {
9495 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9496 errorText_ = errorStream_.str();
9502 stream_.state = STREAM_STOPPED;
9503 MUTEX_UNLOCK( &stream_.mutex );
9505 if ( result != -1 ) return;
9506 error( RtAudioError::SYSTEM_ERROR );
9509 void RtApiOss :: abortStream()
9512 if ( stream_.state == STREAM_STOPPED ) {
9513 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9514 error( RtAudioError::WARNING );
9518 MUTEX_LOCK( &stream_.mutex );
9520 // The state might change while waiting on a mutex.
9521 if ( stream_.state == STREAM_STOPPED ) {
9522 MUTEX_UNLOCK( &stream_.mutex );
9527 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9528 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9529 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9530 if ( result == -1 ) {
9531 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9532 errorText_ = errorStream_.str();
9535 handle->triggered = false;
9538 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9539 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9540 if ( result == -1 ) {
9541 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9542 errorText_ = errorStream_.str();
9548 stream_.state = STREAM_STOPPED;
9549 MUTEX_UNLOCK( &stream_.mutex );
9551 if ( result != -1 ) return;
9552 error( RtAudioError::SYSTEM_ERROR );
9555 void RtApiOss :: callbackEvent()
9557 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9558 if ( stream_.state == STREAM_STOPPED ) {
9559 MUTEX_LOCK( &stream_.mutex );
9560 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9561 if ( stream_.state != STREAM_RUNNING ) {
9562 MUTEX_UNLOCK( &stream_.mutex );
9565 MUTEX_UNLOCK( &stream_.mutex );
9568 if ( stream_.state == STREAM_CLOSED ) {
9569 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9570 error( RtAudioError::WARNING );
9574 // Invoke user callback to get fresh output data.
9575 int doStopStream = 0;
9576 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9577 double streamTime = getStreamTime();
9578 RtAudioStreamStatus status = 0;
9579 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9580 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9581 handle->xrun[0] = false;
9583 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9584 status |= RTAUDIO_INPUT_OVERFLOW;
9585 handle->xrun[1] = false;
9587 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9588 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9589 if ( doStopStream == 2 ) {
9590 this->abortStream();
9594 MUTEX_LOCK( &stream_.mutex );
9596 // The state might change while waiting on a mutex.
9597 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9602 RtAudioFormat format;
9604 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9606 // Setup parameters and do buffer conversion if necessary.
9607 if ( stream_.doConvertBuffer[0] ) {
9608 buffer = stream_.deviceBuffer;
9609 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9610 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9611 format = stream_.deviceFormat[0];
9614 buffer = stream_.userBuffer[0];
9615 samples = stream_.bufferSize * stream_.nUserChannels[0];
9616 format = stream_.userFormat;
9619 // Do byte swapping if necessary.
9620 if ( stream_.doByteSwap[0] )
9621 byteSwapBuffer( buffer, samples, format );
9623 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9625 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9626 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9627 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9628 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9629 handle->triggered = true;
9632 // Write samples to device.
9633 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9635 if ( result == -1 ) {
9636 // We'll assume this is an underrun, though there isn't a
9637 // specific means for determining that.
9638 handle->xrun[0] = true;
9639 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9640 error( RtAudioError::WARNING );
9641 // Continue on to input section.
9645 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9647 // Setup parameters.
9648 if ( stream_.doConvertBuffer[1] ) {
9649 buffer = stream_.deviceBuffer;
9650 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9651 format = stream_.deviceFormat[1];
9654 buffer = stream_.userBuffer[1];
9655 samples = stream_.bufferSize * stream_.nUserChannels[1];
9656 format = stream_.userFormat;
9659 // Read samples from device.
9660 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9662 if ( result == -1 ) {
9663 // We'll assume this is an overrun, though there isn't a
9664 // specific means for determining that.
9665 handle->xrun[1] = true;
9666 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9667 error( RtAudioError::WARNING );
9671 // Do byte swapping if necessary.
9672 if ( stream_.doByteSwap[1] )
9673 byteSwapBuffer( buffer, samples, format );
9675 // Do buffer conversion if necessary.
9676 if ( stream_.doConvertBuffer[1] )
9677 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9681 MUTEX_UNLOCK( &stream_.mutex );
9683 RtApi::tickStreamTime();
9684 if ( doStopStream == 1 ) this->stopStream();
9687 static void *ossCallbackHandler( void *ptr )
9689 CallbackInfo *info = (CallbackInfo *) ptr;
9690 RtApiOss *object = (RtApiOss *) info->object;
9691 bool *isRunning = &info->isRunning;
9693 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9694 if (info->doRealtime) {
9695 std::cerr << "RtAudio oss: " <<
9696 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9697 "running realtime scheduling" << std::endl;
9701 while ( *isRunning == true ) {
9702 pthread_testcancel();
9703 object->callbackEvent();
9706 pthread_exit( NULL );
9709 //******************** End of __LINUX_OSS__ *********************//
9713 // *************************************************** //
9715 // Protected common (OS-independent) RtAudio methods.
9717 // *************************************************** //
9719 // This method can be modified to control the behavior of error
9720 // message printing.
9721 void RtApi :: error( RtAudioError::Type type )
9723 errorStream_.str(""); // clear the ostringstream
9725 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9726 if ( errorCallback ) {
9727 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9729 if ( firstErrorOccurred_ )
9732 firstErrorOccurred_ = true;
9733 const std::string errorMessage = errorText_;
9735 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9736 stream_.callbackInfo.isRunning = false; // exit from the thread
9740 errorCallback( type, errorMessage );
9741 firstErrorOccurred_ = false;
9745 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9746 std::cerr << '\n' << errorText_ << "\n\n";
9747 else if ( type != RtAudioError::WARNING )
9748 throw( RtAudioError( errorText_, type ) );
9751 void RtApi :: verifyStream()
9753 if ( stream_.state == STREAM_CLOSED ) {
9754 errorText_ = "RtApi:: a stream is not open!";
9755 error( RtAudioError::INVALID_USE );
9759 void RtApi :: clearStreamInfo()
9761 stream_.mode = UNINITIALIZED;
9762 stream_.state = STREAM_CLOSED;
9763 stream_.sampleRate = 0;
9764 stream_.bufferSize = 0;
9765 stream_.nBuffers = 0;
9766 stream_.userFormat = 0;
9767 stream_.userInterleaved = true;
9768 stream_.streamTime = 0.0;
9769 stream_.apiHandle = 0;
9770 stream_.deviceBuffer = 0;
9771 stream_.callbackInfo.callback = 0;
9772 stream_.callbackInfo.userData = 0;
9773 stream_.callbackInfo.isRunning = false;
9774 stream_.callbackInfo.errorCallback = 0;
9775 for ( int i=0; i<2; i++ ) {
9776 stream_.device[i] = 11111;
9777 stream_.doConvertBuffer[i] = false;
9778 stream_.deviceInterleaved[i] = true;
9779 stream_.doByteSwap[i] = false;
9780 stream_.nUserChannels[i] = 0;
9781 stream_.nDeviceChannels[i] = 0;
9782 stream_.channelOffset[i] = 0;
9783 stream_.deviceFormat[i] = 0;
9784 stream_.latency[i] = 0;
9785 stream_.userBuffer[i] = 0;
9786 stream_.convertInfo[i].channels = 0;
9787 stream_.convertInfo[i].inJump = 0;
9788 stream_.convertInfo[i].outJump = 0;
9789 stream_.convertInfo[i].inFormat = 0;
9790 stream_.convertInfo[i].outFormat = 0;
9791 stream_.convertInfo[i].inOffset.clear();
9792 stream_.convertInfo[i].outOffset.clear();
9796 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9798 if ( format == RTAUDIO_SINT16 )
9800 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9802 else if ( format == RTAUDIO_FLOAT64 )
9804 else if ( format == RTAUDIO_SINT24 )
9806 else if ( format == RTAUDIO_SINT8 )
9809 errorText_ = "RtApi::formatBytes: undefined format.";
9810 error( RtAudioError::WARNING );
9815 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9817 if ( mode == INPUT ) { // convert device to user buffer
9818 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9819 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9820 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9821 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9823 else { // convert user to device buffer
9824 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9825 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9826 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9827 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9830 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9831 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9833 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9835 // Set up the interleave/deinterleave offsets.
9836 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9837 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9838 ( mode == INPUT && stream_.userInterleaved ) ) {
9839 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9840 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9841 stream_.convertInfo[mode].outOffset.push_back( k );
9842 stream_.convertInfo[mode].inJump = 1;
9846 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9847 stream_.convertInfo[mode].inOffset.push_back( k );
9848 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9849 stream_.convertInfo[mode].outJump = 1;
9853 else { // no (de)interleaving
9854 if ( stream_.userInterleaved ) {
9855 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9856 stream_.convertInfo[mode].inOffset.push_back( k );
9857 stream_.convertInfo[mode].outOffset.push_back( k );
9861 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9862 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9863 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9864 stream_.convertInfo[mode].inJump = 1;
9865 stream_.convertInfo[mode].outJump = 1;
9870 // Add channel offset.
9871 if ( firstChannel > 0 ) {
9872 if ( stream_.deviceInterleaved[mode] ) {
9873 if ( mode == OUTPUT ) {
9874 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9875 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9878 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9879 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9883 if ( mode == OUTPUT ) {
9884 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9885 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9888 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9889 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9895 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9897 // This function does format conversion, input/output channel compensation, and
9898 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9899 // the lower three bytes of a 32-bit integer.
9901 // Clear our device buffer when in/out duplex device channels are different
9902 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9903 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9904 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9907 if (info.outFormat == RTAUDIO_FLOAT64) {
9909 Float64 *out = (Float64 *)outBuffer;
9911 if (info.inFormat == RTAUDIO_SINT8) {
9912 signed char *in = (signed char *)inBuffer;
9913 scale = 1.0 / 127.5;
9914 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9915 for (j=0; j<info.channels; j++) {
9916 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9917 out[info.outOffset[j]] += 0.5;
9918 out[info.outOffset[j]] *= scale;
9921 out += info.outJump;
9924 else if (info.inFormat == RTAUDIO_SINT16) {
9925 Int16 *in = (Int16 *)inBuffer;
9926 scale = 1.0 / 32767.5;
9927 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9928 for (j=0; j<info.channels; j++) {
9929 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9930 out[info.outOffset[j]] += 0.5;
9931 out[info.outOffset[j]] *= scale;
9934 out += info.outJump;
9937 else if (info.inFormat == RTAUDIO_SINT24) {
9938 Int24 *in = (Int24 *)inBuffer;
9939 scale = 1.0 / 8388607.5;
9940 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9941 for (j=0; j<info.channels; j++) {
9942 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9943 out[info.outOffset[j]] += 0.5;
9944 out[info.outOffset[j]] *= scale;
9947 out += info.outJump;
9950 else if (info.inFormat == RTAUDIO_SINT32) {
9951 Int32 *in = (Int32 *)inBuffer;
9952 scale = 1.0 / 2147483647.5;
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9954 for (j=0; j<info.channels; j++) {
9955 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9956 out[info.outOffset[j]] += 0.5;
9957 out[info.outOffset[j]] *= scale;
9960 out += info.outJump;
9963 else if (info.inFormat == RTAUDIO_FLOAT32) {
9964 Float32 *in = (Float32 *)inBuffer;
9965 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9966 for (j=0; j<info.channels; j++) {
9967 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9970 out += info.outJump;
9973 else if (info.inFormat == RTAUDIO_FLOAT64) {
9974 // Channel compensation and/or (de)interleaving only.
9975 Float64 *in = (Float64 *)inBuffer;
9976 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9977 for (j=0; j<info.channels; j++) {
9978 out[info.outOffset[j]] = in[info.inOffset[j]];
9981 out += info.outJump;
9985 else if (info.outFormat == RTAUDIO_FLOAT32) {
9987 Float32 *out = (Float32 *)outBuffer;
9989 if (info.inFormat == RTAUDIO_SINT8) {
9990 signed char *in = (signed char *)inBuffer;
9991 scale = (Float32) ( 1.0 / 127.5 );
9992 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9993 for (j=0; j<info.channels; j++) {
9994 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9995 out[info.outOffset[j]] += 0.5;
9996 out[info.outOffset[j]] *= scale;
9999 out += info.outJump;
10002 else if (info.inFormat == RTAUDIO_SINT16) {
10003 Int16 *in = (Int16 *)inBuffer;
10004 scale = (Float32) ( 1.0 / 32767.5 );
10005 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10006 for (j=0; j<info.channels; j++) {
10007 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10008 out[info.outOffset[j]] += 0.5;
10009 out[info.outOffset[j]] *= scale;
10012 out += info.outJump;
10015 else if (info.inFormat == RTAUDIO_SINT24) {
10016 Int24 *in = (Int24 *)inBuffer;
10017 scale = (Float32) ( 1.0 / 8388607.5 );
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10019 for (j=0; j<info.channels; j++) {
10020 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10021 out[info.outOffset[j]] += 0.5;
10022 out[info.outOffset[j]] *= scale;
10025 out += info.outJump;
10028 else if (info.inFormat == RTAUDIO_SINT32) {
10029 Int32 *in = (Int32 *)inBuffer;
10030 scale = (Float32) ( 1.0 / 2147483647.5 );
10031 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10032 for (j=0; j<info.channels; j++) {
10033 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10034 out[info.outOffset[j]] += 0.5;
10035 out[info.outOffset[j]] *= scale;
10038 out += info.outJump;
10041 else if (info.inFormat == RTAUDIO_FLOAT32) {
10042 // Channel compensation and/or (de)interleaving only.
10043 Float32 *in = (Float32 *)inBuffer;
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10045 for (j=0; j<info.channels; j++) {
10046 out[info.outOffset[j]] = in[info.inOffset[j]];
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_FLOAT64) {
10053 Float64 *in = (Float64 *)inBuffer;
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10055 for (j=0; j<info.channels; j++) {
10056 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10059 out += info.outJump;
10063 else if (info.outFormat == RTAUDIO_SINT32) {
10064 Int32 *out = (Int32 *)outBuffer;
10065 if (info.inFormat == RTAUDIO_SINT8) {
10066 signed char *in = (signed char *)inBuffer;
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10068 for (j=0; j<info.channels; j++) {
10069 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10070 out[info.outOffset[j]] <<= 24;
10073 out += info.outJump;
10076 else if (info.inFormat == RTAUDIO_SINT16) {
10077 Int16 *in = (Int16 *)inBuffer;
10078 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10079 for (j=0; j<info.channels; j++) {
10080 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10081 out[info.outOffset[j]] <<= 16;
10084 out += info.outJump;
10087 else if (info.inFormat == RTAUDIO_SINT24) {
10088 Int24 *in = (Int24 *)inBuffer;
10089 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10090 for (j=0; j<info.channels; j++) {
10091 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10092 out[info.outOffset[j]] <<= 8;
10095 out += info.outJump;
10098 else if (info.inFormat == RTAUDIO_SINT32) {
10099 // Channel compensation and/or (de)interleaving only.
10100 Int32 *in = (Int32 *)inBuffer;
10101 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10102 for (j=0; j<info.channels; j++) {
10103 out[info.outOffset[j]] = in[info.inOffset[j]];
10106 out += info.outJump;
10109 else if (info.inFormat == RTAUDIO_FLOAT32) {
10110 Float32 *in = (Float32 *)inBuffer;
10111 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10112 for (j=0; j<info.channels; j++) {
10113 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10116 out += info.outJump;
10119 else if (info.inFormat == RTAUDIO_FLOAT64) {
10120 Float64 *in = (Float64 *)inBuffer;
10121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10122 for (j=0; j<info.channels; j++) {
10123 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10126 out += info.outJump;
10130 else if (info.outFormat == RTAUDIO_SINT24) {
10131 Int24 *out = (Int24 *)outBuffer;
10132 if (info.inFormat == RTAUDIO_SINT8) {
10133 signed char *in = (signed char *)inBuffer;
10134 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10135 for (j=0; j<info.channels; j++) {
10136 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10137 //out[info.outOffset[j]] <<= 16;
10140 out += info.outJump;
10143 else if (info.inFormat == RTAUDIO_SINT16) {
10144 Int16 *in = (Int16 *)inBuffer;
10145 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10146 for (j=0; j<info.channels; j++) {
10147 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10148 //out[info.outOffset[j]] <<= 8;
10151 out += info.outJump;
10154 else if (info.inFormat == RTAUDIO_SINT24) {
10155 // Channel compensation and/or (de)interleaving only.
10156 Int24 *in = (Int24 *)inBuffer;
10157 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10158 for (j=0; j<info.channels; j++) {
10159 out[info.outOffset[j]] = in[info.inOffset[j]];
10162 out += info.outJump;
10165 else if (info.inFormat == RTAUDIO_SINT32) {
10166 Int32 *in = (Int32 *)inBuffer;
10167 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10168 for (j=0; j<info.channels; j++) {
10169 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10170 //out[info.outOffset[j]] >>= 8;
10173 out += info.outJump;
10176 else if (info.inFormat == RTAUDIO_FLOAT32) {
10177 Float32 *in = (Float32 *)inBuffer;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10183 out += info.outJump;
10186 else if (info.inFormat == RTAUDIO_FLOAT64) {
10187 Float64 *in = (Float64 *)inBuffer;
10188 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10189 for (j=0; j<info.channels; j++) {
10190 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10193 out += info.outJump;
10197 else if (info.outFormat == RTAUDIO_SINT16) {
10198 Int16 *out = (Int16 *)outBuffer;
10199 if (info.inFormat == RTAUDIO_SINT8) {
10200 signed char *in = (signed char *)inBuffer;
10201 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10202 for (j=0; j<info.channels; j++) {
10203 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10204 out[info.outOffset[j]] <<= 8;
10207 out += info.outJump;
10210 else if (info.inFormat == RTAUDIO_SINT16) {
10211 // Channel compensation and/or (de)interleaving only.
10212 Int16 *in = (Int16 *)inBuffer;
10213 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10214 for (j=0; j<info.channels; j++) {
10215 out[info.outOffset[j]] = in[info.inOffset[j]];
10218 out += info.outJump;
10221 else if (info.inFormat == RTAUDIO_SINT24) {
10222 Int24 *in = (Int24 *)inBuffer;
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10224 for (j=0; j<info.channels; j++) {
10225 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10228 out += info.outJump;
10231 else if (info.inFormat == RTAUDIO_SINT32) {
10232 Int32 *in = (Int32 *)inBuffer;
10233 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10234 for (j=0; j<info.channels; j++) {
10235 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10238 out += info.outJump;
10241 else if (info.inFormat == RTAUDIO_FLOAT32) {
10242 Float32 *in = (Float32 *)inBuffer;
10243 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10244 for (j=0; j<info.channels; j++) {
10245 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10248 out += info.outJump;
10251 else if (info.inFormat == RTAUDIO_FLOAT64) {
10252 Float64 *in = (Float64 *)inBuffer;
10253 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10254 for (j=0; j<info.channels; j++) {
10255 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10258 out += info.outJump;
10262 else if (info.outFormat == RTAUDIO_SINT8) {
10263 signed char *out = (signed char *)outBuffer;
10264 if (info.inFormat == RTAUDIO_SINT8) {
10265 // Channel compensation and/or (de)interleaving only.
10266 signed char *in = (signed char *)inBuffer;
10267 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10268 for (j=0; j<info.channels; j++) {
10269 out[info.outOffset[j]] = in[info.inOffset[j]];
10272 out += info.outJump;
10275 if (info.inFormat == RTAUDIO_SINT16) {
10276 Int16 *in = (Int16 *)inBuffer;
10277 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10278 for (j=0; j<info.channels; j++) {
10279 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10282 out += info.outJump;
10285 else if (info.inFormat == RTAUDIO_SINT24) {
10286 Int24 *in = (Int24 *)inBuffer;
10287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10288 for (j=0; j<info.channels; j++) {
10289 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10292 out += info.outJump;
10295 else if (info.inFormat == RTAUDIO_SINT32) {
10296 Int32 *in = (Int32 *)inBuffer;
10297 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10298 for (j=0; j<info.channels; j++) {
10299 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10302 out += info.outJump;
10305 else if (info.inFormat == RTAUDIO_FLOAT32) {
10306 Float32 *in = (Float32 *)inBuffer;
10307 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10308 for (j=0; j<info.channels; j++) {
10309 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10312 out += info.outJump;
10315 else if (info.inFormat == RTAUDIO_FLOAT64) {
10316 Float64 *in = (Float64 *)inBuffer;
10317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10318 for (j=0; j<info.channels; j++) {
10319 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10322 out += info.outJump;
10328 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10329 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10330 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10332 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10338 if ( format == RTAUDIO_SINT16 ) {
10339 for ( unsigned int i=0; i<samples; i++ ) {
10340 // Swap 1st and 2nd bytes.
10345 // Increment 2 bytes.
10349 else if ( format == RTAUDIO_SINT32 ||
10350 format == RTAUDIO_FLOAT32 ) {
10351 for ( unsigned int i=0; i<samples; i++ ) {
10352 // Swap 1st and 4th bytes.
10357 // Swap 2nd and 3rd bytes.
10363 // Increment 3 more bytes.
10367 else if ( format == RTAUDIO_SINT24 ) {
10368 for ( unsigned int i=0; i<samples; i++ ) {
10369 // Swap 1st and 3rd bytes.
10374 // Increment 2 more bytes.
10378 else if ( format == RTAUDIO_FLOAT64 ) {
10379 for ( unsigned int i=0; i<samples; i++ ) {
10380 // Swap 1st and 8th bytes
10385 // Swap 2nd and 7th bytes
10391 // Swap 3rd and 6th bytes
10397 // Swap 4th and 5th bytes
10403 // Increment 5 more bytes.
10409 // Indentation settings for Vim and Emacs
10411 // Local Variables:
10412 // c-basic-offset: 2
10413 // indent-tabs-mode: nil
10416 // vim: et sts=2 sw=2